GNU Linux-libre 6.9.2-gnu
[releases.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI event handling. */
27
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
39 #include "smp.h"
40 #include "msft.h"
41 #include "eir.h"
42
43 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
44                  "\x00\x00\x00\x00\x00\x00\x00\x00"
45
46 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
47
48 /* Handle HCI Event packets */
49
50 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
51                              u8 ev, size_t len)
52 {
53         void *data;
54
55         data = skb_pull_data(skb, len);
56         if (!data)
57                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
58
59         return data;
60 }
61
62 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
63                              u16 op, size_t len)
64 {
65         void *data;
66
67         data = skb_pull_data(skb, len);
68         if (!data)
69                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
70
71         return data;
72 }
73
74 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
75                                 u8 ev, size_t len)
76 {
77         void *data;
78
79         data = skb_pull_data(skb, len);
80         if (!data)
81                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
82
83         return data;
84 }
85
86 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
87                                 struct sk_buff *skb)
88 {
89         struct hci_ev_status *rp = data;
90
91         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
92
93         /* It is possible that we receive Inquiry Complete event right
94          * before we receive Inquiry Cancel Command Complete event, in
95          * which case the latter event should have status of Command
96          * Disallowed. This should not be treated as error, since
97          * we actually achieve what Inquiry Cancel wants to achieve,
98          * which is to end the last Inquiry session.
99          */
100         if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
101                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
102                 rp->status = 0x00;
103         }
104
105         if (rp->status)
106                 return rp->status;
107
108         clear_bit(HCI_INQUIRY, &hdev->flags);
109         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
110         wake_up_bit(&hdev->flags, HCI_INQUIRY);
111
112         hci_dev_lock(hdev);
113         /* Set discovery state to stopped if we're not doing LE active
114          * scanning.
115          */
116         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
117             hdev->le_scan_type != LE_SCAN_ACTIVE)
118                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
119         hci_dev_unlock(hdev);
120
121         return rp->status;
122 }
123
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125                               struct sk_buff *skb)
126 {
127         struct hci_ev_status *rp = data;
128
129         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130
131         if (rp->status)
132                 return rp->status;
133
134         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135
136         return rp->status;
137 }
138
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140                                    struct sk_buff *skb)
141 {
142         struct hci_ev_status *rp = data;
143
144         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145
146         if (rp->status)
147                 return rp->status;
148
149         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150
151         return rp->status;
152 }
153
154 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
155                                         struct sk_buff *skb)
156 {
157         struct hci_ev_status *rp = data;
158
159         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
160
161         return rp->status;
162 }
163
164 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
165                                 struct sk_buff *skb)
166 {
167         struct hci_rp_role_discovery *rp = data;
168         struct hci_conn *conn;
169
170         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
171
172         if (rp->status)
173                 return rp->status;
174
175         hci_dev_lock(hdev);
176
177         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
178         if (conn)
179                 conn->role = rp->role;
180
181         hci_dev_unlock(hdev);
182
183         return rp->status;
184 }
185
186 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
187                                   struct sk_buff *skb)
188 {
189         struct hci_rp_read_link_policy *rp = data;
190         struct hci_conn *conn;
191
192         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
193
194         if (rp->status)
195                 return rp->status;
196
197         hci_dev_lock(hdev);
198
199         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
200         if (conn)
201                 conn->link_policy = __le16_to_cpu(rp->policy);
202
203         hci_dev_unlock(hdev);
204
205         return rp->status;
206 }
207
208 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
209                                    struct sk_buff *skb)
210 {
211         struct hci_rp_write_link_policy *rp = data;
212         struct hci_conn *conn;
213         void *sent;
214
215         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
216
217         if (rp->status)
218                 return rp->status;
219
220         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
221         if (!sent)
222                 return rp->status;
223
224         hci_dev_lock(hdev);
225
226         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
227         if (conn)
228                 conn->link_policy = get_unaligned_le16(sent + 2);
229
230         hci_dev_unlock(hdev);
231
232         return rp->status;
233 }
234
235 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
236                                       struct sk_buff *skb)
237 {
238         struct hci_rp_read_def_link_policy *rp = data;
239
240         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
241
242         if (rp->status)
243                 return rp->status;
244
245         hdev->link_policy = __le16_to_cpu(rp->policy);
246
247         return rp->status;
248 }
249
250 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
251                                        struct sk_buff *skb)
252 {
253         struct hci_ev_status *rp = data;
254         void *sent;
255
256         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
257
258         if (rp->status)
259                 return rp->status;
260
261         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
262         if (!sent)
263                 return rp->status;
264
265         hdev->link_policy = get_unaligned_le16(sent);
266
267         return rp->status;
268 }
269
270 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
271 {
272         struct hci_ev_status *rp = data;
273
274         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
275
276         clear_bit(HCI_RESET, &hdev->flags);
277
278         if (rp->status)
279                 return rp->status;
280
281         /* Reset all non-persistent flags */
282         hci_dev_clear_volatile_flags(hdev);
283
284         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
285
286         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
287         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
288
289         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
290         hdev->adv_data_len = 0;
291
292         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
293         hdev->scan_rsp_data_len = 0;
294
295         hdev->le_scan_type = LE_SCAN_PASSIVE;
296
297         hdev->ssp_debug_mode = 0;
298
299         hci_bdaddr_list_clear(&hdev->le_accept_list);
300         hci_bdaddr_list_clear(&hdev->le_resolv_list);
301
302         return rp->status;
303 }
304
305 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
306                                       struct sk_buff *skb)
307 {
308         struct hci_rp_read_stored_link_key *rp = data;
309         struct hci_cp_read_stored_link_key *sent;
310
311         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
312
313         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
314         if (!sent)
315                 return rp->status;
316
317         if (!rp->status && sent->read_all == 0x01) {
318                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
319                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
320         }
321
322         return rp->status;
323 }
324
325 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
326                                         struct sk_buff *skb)
327 {
328         struct hci_rp_delete_stored_link_key *rp = data;
329         u16 num_keys;
330
331         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
332
333         if (rp->status)
334                 return rp->status;
335
336         num_keys = le16_to_cpu(rp->num_keys);
337
338         if (num_keys <= hdev->stored_num_keys)
339                 hdev->stored_num_keys -= num_keys;
340         else
341                 hdev->stored_num_keys = 0;
342
343         return rp->status;
344 }
345
346 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
347                                   struct sk_buff *skb)
348 {
349         struct hci_ev_status *rp = data;
350         void *sent;
351
352         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
353
354         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
355         if (!sent)
356                 return rp->status;
357
358         hci_dev_lock(hdev);
359
360         if (hci_dev_test_flag(hdev, HCI_MGMT))
361                 mgmt_set_local_name_complete(hdev, sent, rp->status);
362         else if (!rp->status)
363                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
364
365         hci_dev_unlock(hdev);
366
367         return rp->status;
368 }
369
370 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
371                                  struct sk_buff *skb)
372 {
373         struct hci_rp_read_local_name *rp = data;
374
375         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
376
377         if (rp->status)
378                 return rp->status;
379
380         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
381             hci_dev_test_flag(hdev, HCI_CONFIG))
382                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
383
384         return rp->status;
385 }
386
387 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
388                                    struct sk_buff *skb)
389 {
390         struct hci_ev_status *rp = data;
391         void *sent;
392
393         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
394
395         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
396         if (!sent)
397                 return rp->status;
398
399         hci_dev_lock(hdev);
400
401         if (!rp->status) {
402                 __u8 param = *((__u8 *) sent);
403
404                 if (param == AUTH_ENABLED)
405                         set_bit(HCI_AUTH, &hdev->flags);
406                 else
407                         clear_bit(HCI_AUTH, &hdev->flags);
408         }
409
410         if (hci_dev_test_flag(hdev, HCI_MGMT))
411                 mgmt_auth_enable_complete(hdev, rp->status);
412
413         hci_dev_unlock(hdev);
414
415         return rp->status;
416 }
417
418 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
419                                     struct sk_buff *skb)
420 {
421         struct hci_ev_status *rp = data;
422         __u8 param;
423         void *sent;
424
425         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
426
427         if (rp->status)
428                 return rp->status;
429
430         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
431         if (!sent)
432                 return rp->status;
433
434         param = *((__u8 *) sent);
435
436         if (param)
437                 set_bit(HCI_ENCRYPT, &hdev->flags);
438         else
439                 clear_bit(HCI_ENCRYPT, &hdev->flags);
440
441         return rp->status;
442 }
443
444 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
445                                    struct sk_buff *skb)
446 {
447         struct hci_ev_status *rp = data;
448         __u8 param;
449         void *sent;
450
451         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
452
453         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
454         if (!sent)
455                 return rp->status;
456
457         param = *((__u8 *) sent);
458
459         hci_dev_lock(hdev);
460
461         if (rp->status) {
462                 hdev->discov_timeout = 0;
463                 goto done;
464         }
465
466         if (param & SCAN_INQUIRY)
467                 set_bit(HCI_ISCAN, &hdev->flags);
468         else
469                 clear_bit(HCI_ISCAN, &hdev->flags);
470
471         if (param & SCAN_PAGE)
472                 set_bit(HCI_PSCAN, &hdev->flags);
473         else
474                 clear_bit(HCI_PSCAN, &hdev->flags);
475
476 done:
477         hci_dev_unlock(hdev);
478
479         return rp->status;
480 }
481
482 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
483                                   struct sk_buff *skb)
484 {
485         struct hci_ev_status *rp = data;
486         struct hci_cp_set_event_filter *cp;
487         void *sent;
488
489         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
490
491         if (rp->status)
492                 return rp->status;
493
494         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
495         if (!sent)
496                 return rp->status;
497
498         cp = (struct hci_cp_set_event_filter *)sent;
499
500         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
501                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
502         else
503                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
504
505         return rp->status;
506 }
507
508 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
509                                    struct sk_buff *skb)
510 {
511         struct hci_rp_read_class_of_dev *rp = data;
512
513         if (WARN_ON(!hdev))
514                 return HCI_ERROR_UNSPECIFIED;
515
516         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
517
518         if (rp->status)
519                 return rp->status;
520
521         memcpy(hdev->dev_class, rp->dev_class, 3);
522
523         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
524                    hdev->dev_class[1], hdev->dev_class[0]);
525
526         return rp->status;
527 }
528
529 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
530                                     struct sk_buff *skb)
531 {
532         struct hci_ev_status *rp = data;
533         void *sent;
534
535         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
536
537         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
538         if (!sent)
539                 return rp->status;
540
541         hci_dev_lock(hdev);
542
543         if (!rp->status)
544                 memcpy(hdev->dev_class, sent, 3);
545
546         if (hci_dev_test_flag(hdev, HCI_MGMT))
547                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
548
549         hci_dev_unlock(hdev);
550
551         return rp->status;
552 }
553
554 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
555                                     struct sk_buff *skb)
556 {
557         struct hci_rp_read_voice_setting *rp = data;
558         __u16 setting;
559
560         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
561
562         if (rp->status)
563                 return rp->status;
564
565         setting = __le16_to_cpu(rp->voice_setting);
566
567         if (hdev->voice_setting == setting)
568                 return rp->status;
569
570         hdev->voice_setting = setting;
571
572         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
573
574         if (hdev->notify)
575                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
576
577         return rp->status;
578 }
579
580 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
581                                      struct sk_buff *skb)
582 {
583         struct hci_ev_status *rp = data;
584         __u16 setting;
585         void *sent;
586
587         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
588
589         if (rp->status)
590                 return rp->status;
591
592         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
593         if (!sent)
594                 return rp->status;
595
596         setting = get_unaligned_le16(sent);
597
598         if (hdev->voice_setting == setting)
599                 return rp->status;
600
601         hdev->voice_setting = setting;
602
603         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
604
605         if (hdev->notify)
606                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
607
608         return rp->status;
609 }
610
611 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
612                                         struct sk_buff *skb)
613 {
614         struct hci_rp_read_num_supported_iac *rp = data;
615
616         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
617
618         if (rp->status)
619                 return rp->status;
620
621         hdev->num_iac = rp->num_iac;
622
623         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
624
625         return rp->status;
626 }
627
628 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
629                                 struct sk_buff *skb)
630 {
631         struct hci_ev_status *rp = data;
632         struct hci_cp_write_ssp_mode *sent;
633
634         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
635
636         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
637         if (!sent)
638                 return rp->status;
639
640         hci_dev_lock(hdev);
641
642         if (!rp->status) {
643                 if (sent->mode)
644                         hdev->features[1][0] |= LMP_HOST_SSP;
645                 else
646                         hdev->features[1][0] &= ~LMP_HOST_SSP;
647         }
648
649         if (!rp->status) {
650                 if (sent->mode)
651                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
652                 else
653                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
654         }
655
656         hci_dev_unlock(hdev);
657
658         return rp->status;
659 }
660
661 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
662                                   struct sk_buff *skb)
663 {
664         struct hci_ev_status *rp = data;
665         struct hci_cp_write_sc_support *sent;
666
667         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
668
669         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
670         if (!sent)
671                 return rp->status;
672
673         hci_dev_lock(hdev);
674
675         if (!rp->status) {
676                 if (sent->support)
677                         hdev->features[1][0] |= LMP_HOST_SC;
678                 else
679                         hdev->features[1][0] &= ~LMP_HOST_SC;
680         }
681
682         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
683                 if (sent->support)
684                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
685                 else
686                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
687         }
688
689         hci_dev_unlock(hdev);
690
691         return rp->status;
692 }
693
694 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
695                                     struct sk_buff *skb)
696 {
697         struct hci_rp_read_local_version *rp = data;
698
699         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
700
701         if (rp->status)
702                 return rp->status;
703
704         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
705             hci_dev_test_flag(hdev, HCI_CONFIG)) {
706                 hdev->hci_ver = rp->hci_ver;
707                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
708                 hdev->lmp_ver = rp->lmp_ver;
709                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
710                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
711         }
712
713         return rp->status;
714 }
715
716 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
717                                    struct sk_buff *skb)
718 {
719         struct hci_rp_read_enc_key_size *rp = data;
720         struct hci_conn *conn;
721         u16 handle;
722         u8 status = rp->status;
723
724         bt_dev_dbg(hdev, "status 0x%2.2x", status);
725
726         handle = le16_to_cpu(rp->handle);
727
728         hci_dev_lock(hdev);
729
730         conn = hci_conn_hash_lookup_handle(hdev, handle);
731         if (!conn) {
732                 status = 0xFF;
733                 goto done;
734         }
735
736         /* While unexpected, the read_enc_key_size command may fail. The most
737          * secure approach is to then assume the key size is 0 to force a
738          * disconnection.
739          */
740         if (status) {
741                 bt_dev_err(hdev, "failed to read key size for handle %u",
742                            handle);
743                 conn->enc_key_size = 0;
744         } else {
745                 conn->enc_key_size = rp->key_size;
746                 status = 0;
747
748                 if (conn->enc_key_size < hdev->min_enc_key_size) {
749                         /* As slave role, the conn->state has been set to
750                          * BT_CONNECTED and l2cap conn req might not be received
751                          * yet, at this moment the l2cap layer almost does
752                          * nothing with the non-zero status.
753                          * So we also clear encrypt related bits, and then the
754                          * handler of l2cap conn req will get the right secure
755                          * state at a later time.
756                          */
757                         status = HCI_ERROR_AUTH_FAILURE;
758                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
759                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
760                 }
761         }
762
763         hci_encrypt_cfm(conn, status);
764
765 done:
766         hci_dev_unlock(hdev);
767
768         return status;
769 }
770
771 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
772                                      struct sk_buff *skb)
773 {
774         struct hci_rp_read_local_commands *rp = data;
775
776         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
777
778         if (rp->status)
779                 return rp->status;
780
781         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
782             hci_dev_test_flag(hdev, HCI_CONFIG))
783                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
784
785         return rp->status;
786 }
787
788 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
789                                            struct sk_buff *skb)
790 {
791         struct hci_rp_read_auth_payload_to *rp = data;
792         struct hci_conn *conn;
793
794         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
795
796         if (rp->status)
797                 return rp->status;
798
799         hci_dev_lock(hdev);
800
801         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
802         if (conn)
803                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
804
805         hci_dev_unlock(hdev);
806
807         return rp->status;
808 }
809
810 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
811                                             struct sk_buff *skb)
812 {
813         struct hci_rp_write_auth_payload_to *rp = data;
814         struct hci_conn *conn;
815         void *sent;
816
817         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
818
819         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
820         if (!sent)
821                 return rp->status;
822
823         hci_dev_lock(hdev);
824
825         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
826         if (!conn) {
827                 rp->status = 0xff;
828                 goto unlock;
829         }
830
831         if (!rp->status)
832                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
833
834 unlock:
835         hci_dev_unlock(hdev);
836
837         return rp->status;
838 }
839
840 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
841                                      struct sk_buff *skb)
842 {
843         struct hci_rp_read_local_features *rp = data;
844
845         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
846
847         if (rp->status)
848                 return rp->status;
849
850         memcpy(hdev->features, rp->features, 8);
851
852         /* Adjust default settings according to features
853          * supported by device. */
854
855         if (hdev->features[0][0] & LMP_3SLOT)
856                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
857
858         if (hdev->features[0][0] & LMP_5SLOT)
859                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
860
861         if (hdev->features[0][1] & LMP_HV2) {
862                 hdev->pkt_type  |= (HCI_HV2);
863                 hdev->esco_type |= (ESCO_HV2);
864         }
865
866         if (hdev->features[0][1] & LMP_HV3) {
867                 hdev->pkt_type  |= (HCI_HV3);
868                 hdev->esco_type |= (ESCO_HV3);
869         }
870
871         if (lmp_esco_capable(hdev))
872                 hdev->esco_type |= (ESCO_EV3);
873
874         if (hdev->features[0][4] & LMP_EV4)
875                 hdev->esco_type |= (ESCO_EV4);
876
877         if (hdev->features[0][4] & LMP_EV5)
878                 hdev->esco_type |= (ESCO_EV5);
879
880         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
881                 hdev->esco_type |= (ESCO_2EV3);
882
883         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
884                 hdev->esco_type |= (ESCO_3EV3);
885
886         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
887                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
888
889         return rp->status;
890 }
891
892 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
893                                          struct sk_buff *skb)
894 {
895         struct hci_rp_read_local_ext_features *rp = data;
896
897         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
898
899         if (rp->status)
900                 return rp->status;
901
902         if (hdev->max_page < rp->max_page) {
903                 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
904                              &hdev->quirks))
905                         bt_dev_warn(hdev, "broken local ext features page 2");
906                 else
907                         hdev->max_page = rp->max_page;
908         }
909
910         if (rp->page < HCI_MAX_PAGES)
911                 memcpy(hdev->features[rp->page], rp->features, 8);
912
913         return rp->status;
914 }
915
916 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
917                                         struct sk_buff *skb)
918 {
919         struct hci_rp_read_flow_control_mode *rp = data;
920
921         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
922
923         if (rp->status)
924                 return rp->status;
925
926         hdev->flow_ctl_mode = rp->mode;
927
928         return rp->status;
929 }
930
931 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
932                                   struct sk_buff *skb)
933 {
934         struct hci_rp_read_buffer_size *rp = data;
935
936         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
937
938         if (rp->status)
939                 return rp->status;
940
941         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
942         hdev->sco_mtu  = rp->sco_mtu;
943         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
944         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
945
946         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
947                 hdev->sco_mtu  = 64;
948                 hdev->sco_pkts = 8;
949         }
950
951         hdev->acl_cnt = hdev->acl_pkts;
952         hdev->sco_cnt = hdev->sco_pkts;
953
954         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
955                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
956
957         if (!hdev->acl_mtu || !hdev->acl_pkts)
958                 return HCI_ERROR_INVALID_PARAMETERS;
959
960         return rp->status;
961 }
962
963 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
964                               struct sk_buff *skb)
965 {
966         struct hci_rp_read_bd_addr *rp = data;
967
968         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
969
970         if (rp->status)
971                 return rp->status;
972
973         if (test_bit(HCI_INIT, &hdev->flags))
974                 bacpy(&hdev->bdaddr, &rp->bdaddr);
975
976         if (hci_dev_test_flag(hdev, HCI_SETUP))
977                 bacpy(&hdev->setup_addr, &rp->bdaddr);
978
979         return rp->status;
980 }
981
982 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
983                                          struct sk_buff *skb)
984 {
985         struct hci_rp_read_local_pairing_opts *rp = data;
986
987         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
988
989         if (rp->status)
990                 return rp->status;
991
992         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
993             hci_dev_test_flag(hdev, HCI_CONFIG)) {
994                 hdev->pairing_opts = rp->pairing_opts;
995                 hdev->max_enc_key_size = rp->max_key_size;
996         }
997
998         return rp->status;
999 }
1000
1001 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1002                                          struct sk_buff *skb)
1003 {
1004         struct hci_rp_read_page_scan_activity *rp = data;
1005
1006         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1007
1008         if (rp->status)
1009                 return rp->status;
1010
1011         if (test_bit(HCI_INIT, &hdev->flags)) {
1012                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1013                 hdev->page_scan_window = __le16_to_cpu(rp->window);
1014         }
1015
1016         return rp->status;
1017 }
1018
1019 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1020                                           struct sk_buff *skb)
1021 {
1022         struct hci_ev_status *rp = data;
1023         struct hci_cp_write_page_scan_activity *sent;
1024
1025         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1026
1027         if (rp->status)
1028                 return rp->status;
1029
1030         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1031         if (!sent)
1032                 return rp->status;
1033
1034         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1035         hdev->page_scan_window = __le16_to_cpu(sent->window);
1036
1037         return rp->status;
1038 }
1039
1040 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1041                                      struct sk_buff *skb)
1042 {
1043         struct hci_rp_read_page_scan_type *rp = data;
1044
1045         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1046
1047         if (rp->status)
1048                 return rp->status;
1049
1050         if (test_bit(HCI_INIT, &hdev->flags))
1051                 hdev->page_scan_type = rp->type;
1052
1053         return rp->status;
1054 }
1055
1056 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1057                                       struct sk_buff *skb)
1058 {
1059         struct hci_ev_status *rp = data;
1060         u8 *type;
1061
1062         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1063
1064         if (rp->status)
1065                 return rp->status;
1066
1067         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1068         if (type)
1069                 hdev->page_scan_type = *type;
1070
1071         return rp->status;
1072 }
1073
1074 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1075                                       struct sk_buff *skb)
1076 {
1077         struct hci_rp_read_data_block_size *rp = data;
1078
1079         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1080
1081         if (rp->status)
1082                 return rp->status;
1083
1084         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1085         hdev->block_len = __le16_to_cpu(rp->block_len);
1086         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1087
1088         hdev->block_cnt = hdev->num_blocks;
1089
1090         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1091                hdev->block_cnt, hdev->block_len);
1092
1093         return rp->status;
1094 }
1095
1096 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1097                             struct sk_buff *skb)
1098 {
1099         struct hci_rp_read_clock *rp = data;
1100         struct hci_cp_read_clock *cp;
1101         struct hci_conn *conn;
1102
1103         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1104
1105         if (rp->status)
1106                 return rp->status;
1107
1108         hci_dev_lock(hdev);
1109
1110         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1111         if (!cp)
1112                 goto unlock;
1113
1114         if (cp->which == 0x00) {
1115                 hdev->clock = le32_to_cpu(rp->clock);
1116                 goto unlock;
1117         }
1118
1119         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1120         if (conn) {
1121                 conn->clock = le32_to_cpu(rp->clock);
1122                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1123         }
1124
1125 unlock:
1126         hci_dev_unlock(hdev);
1127         return rp->status;
1128 }
1129
1130 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1131                                      struct sk_buff *skb)
1132 {
1133         struct hci_rp_read_local_amp_info *rp = data;
1134
1135         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1136
1137         if (rp->status)
1138                 return rp->status;
1139
1140         hdev->amp_status = rp->amp_status;
1141         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1142         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1143         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1144         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1145         hdev->amp_type = rp->amp_type;
1146         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1147         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1148         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1149         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1150
1151         return rp->status;
1152 }
1153
1154 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1155                                        struct sk_buff *skb)
1156 {
1157         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1158
1159         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1160
1161         if (rp->status)
1162                 return rp->status;
1163
1164         hdev->inq_tx_power = rp->tx_power;
1165
1166         return rp->status;
1167 }
1168
1169 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1170                                              struct sk_buff *skb)
1171 {
1172         struct hci_rp_read_def_err_data_reporting *rp = data;
1173
1174         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1175
1176         if (rp->status)
1177                 return rp->status;
1178
1179         hdev->err_data_reporting = rp->err_data_reporting;
1180
1181         return rp->status;
1182 }
1183
1184 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1185                                               struct sk_buff *skb)
1186 {
1187         struct hci_ev_status *rp = data;
1188         struct hci_cp_write_def_err_data_reporting *cp;
1189
1190         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1191
1192         if (rp->status)
1193                 return rp->status;
1194
1195         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1196         if (!cp)
1197                 return rp->status;
1198
1199         hdev->err_data_reporting = cp->err_data_reporting;
1200
1201         return rp->status;
1202 }
1203
1204 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1205                                 struct sk_buff *skb)
1206 {
1207         struct hci_rp_pin_code_reply *rp = data;
1208         struct hci_cp_pin_code_reply *cp;
1209         struct hci_conn *conn;
1210
1211         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1212
1213         hci_dev_lock(hdev);
1214
1215         if (hci_dev_test_flag(hdev, HCI_MGMT))
1216                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1217
1218         if (rp->status)
1219                 goto unlock;
1220
1221         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1222         if (!cp)
1223                 goto unlock;
1224
1225         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1226         if (conn)
1227                 conn->pin_length = cp->pin_len;
1228
1229 unlock:
1230         hci_dev_unlock(hdev);
1231         return rp->status;
1232 }
1233
1234 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1235                                     struct sk_buff *skb)
1236 {
1237         struct hci_rp_pin_code_neg_reply *rp = data;
1238
1239         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1240
1241         hci_dev_lock(hdev);
1242
1243         if (hci_dev_test_flag(hdev, HCI_MGMT))
1244                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1245                                                  rp->status);
1246
1247         hci_dev_unlock(hdev);
1248
1249         return rp->status;
1250 }
1251
1252 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1253                                      struct sk_buff *skb)
1254 {
1255         struct hci_rp_le_read_buffer_size *rp = data;
1256
1257         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1258
1259         if (rp->status)
1260                 return rp->status;
1261
1262         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1263         hdev->le_pkts = rp->le_max_pkt;
1264
1265         hdev->le_cnt = hdev->le_pkts;
1266
1267         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1268
1269         if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1270                 return HCI_ERROR_INVALID_PARAMETERS;
1271
1272         return rp->status;
1273 }
1274
1275 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1276                                         struct sk_buff *skb)
1277 {
1278         struct hci_rp_le_read_local_features *rp = data;
1279
1280         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1281
1282         if (rp->status)
1283                 return rp->status;
1284
1285         memcpy(hdev->le_features, rp->features, 8);
1286
1287         return rp->status;
1288 }
1289
1290 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1291                                       struct sk_buff *skb)
1292 {
1293         struct hci_rp_le_read_adv_tx_power *rp = data;
1294
1295         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1296
1297         if (rp->status)
1298                 return rp->status;
1299
1300         hdev->adv_tx_power = rp->tx_power;
1301
1302         return rp->status;
1303 }
1304
1305 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1306                                     struct sk_buff *skb)
1307 {
1308         struct hci_rp_user_confirm_reply *rp = data;
1309
1310         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1311
1312         hci_dev_lock(hdev);
1313
1314         if (hci_dev_test_flag(hdev, HCI_MGMT))
1315                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1316                                                  rp->status);
1317
1318         hci_dev_unlock(hdev);
1319
1320         return rp->status;
1321 }
1322
1323 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1324                                         struct sk_buff *skb)
1325 {
1326         struct hci_rp_user_confirm_reply *rp = data;
1327
1328         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1329
1330         hci_dev_lock(hdev);
1331
1332         if (hci_dev_test_flag(hdev, HCI_MGMT))
1333                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1334                                                      ACL_LINK, 0, rp->status);
1335
1336         hci_dev_unlock(hdev);
1337
1338         return rp->status;
1339 }
1340
1341 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1342                                     struct sk_buff *skb)
1343 {
1344         struct hci_rp_user_confirm_reply *rp = data;
1345
1346         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1347
1348         hci_dev_lock(hdev);
1349
1350         if (hci_dev_test_flag(hdev, HCI_MGMT))
1351                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1352                                                  0, rp->status);
1353
1354         hci_dev_unlock(hdev);
1355
1356         return rp->status;
1357 }
1358
1359 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1360                                         struct sk_buff *skb)
1361 {
1362         struct hci_rp_user_confirm_reply *rp = data;
1363
1364         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1365
1366         hci_dev_lock(hdev);
1367
1368         if (hci_dev_test_flag(hdev, HCI_MGMT))
1369                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1370                                                      ACL_LINK, 0, rp->status);
1371
1372         hci_dev_unlock(hdev);
1373
1374         return rp->status;
1375 }
1376
1377 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1378                                      struct sk_buff *skb)
1379 {
1380         struct hci_rp_read_local_oob_data *rp = data;
1381
1382         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1383
1384         return rp->status;
1385 }
1386
1387 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1388                                          struct sk_buff *skb)
1389 {
1390         struct hci_rp_read_local_oob_ext_data *rp = data;
1391
1392         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1393
1394         return rp->status;
1395 }
1396
1397 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1398                                     struct sk_buff *skb)
1399 {
1400         struct hci_ev_status *rp = data;
1401         bdaddr_t *sent;
1402
1403         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1404
1405         if (rp->status)
1406                 return rp->status;
1407
1408         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1409         if (!sent)
1410                 return rp->status;
1411
1412         hci_dev_lock(hdev);
1413
1414         bacpy(&hdev->random_addr, sent);
1415
1416         if (!bacmp(&hdev->rpa, sent)) {
1417                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1418                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1419                                    secs_to_jiffies(hdev->rpa_timeout));
1420         }
1421
1422         hci_dev_unlock(hdev);
1423
1424         return rp->status;
1425 }
1426
1427 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1428                                     struct sk_buff *skb)
1429 {
1430         struct hci_ev_status *rp = data;
1431         struct hci_cp_le_set_default_phy *cp;
1432
1433         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1434
1435         if (rp->status)
1436                 return rp->status;
1437
1438         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1439         if (!cp)
1440                 return rp->status;
1441
1442         hci_dev_lock(hdev);
1443
1444         hdev->le_tx_def_phys = cp->tx_phys;
1445         hdev->le_rx_def_phys = cp->rx_phys;
1446
1447         hci_dev_unlock(hdev);
1448
1449         return rp->status;
1450 }
1451
1452 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1453                                             struct sk_buff *skb)
1454 {
1455         struct hci_ev_status *rp = data;
1456         struct hci_cp_le_set_adv_set_rand_addr *cp;
1457         struct adv_info *adv;
1458
1459         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1460
1461         if (rp->status)
1462                 return rp->status;
1463
1464         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1465         /* Update only in case the adv instance since handle 0x00 shall be using
1466          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1467          * non-extended adverting.
1468          */
1469         if (!cp || !cp->handle)
1470                 return rp->status;
1471
1472         hci_dev_lock(hdev);
1473
1474         adv = hci_find_adv_instance(hdev, cp->handle);
1475         if (adv) {
1476                 bacpy(&adv->random_addr, &cp->bdaddr);
1477                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1478                         adv->rpa_expired = false;
1479                         queue_delayed_work(hdev->workqueue,
1480                                            &adv->rpa_expired_cb,
1481                                            secs_to_jiffies(hdev->rpa_timeout));
1482                 }
1483         }
1484
1485         hci_dev_unlock(hdev);
1486
1487         return rp->status;
1488 }
1489
1490 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1491                                    struct sk_buff *skb)
1492 {
1493         struct hci_ev_status *rp = data;
1494         u8 *instance;
1495         int err;
1496
1497         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1498
1499         if (rp->status)
1500                 return rp->status;
1501
1502         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1503         if (!instance)
1504                 return rp->status;
1505
1506         hci_dev_lock(hdev);
1507
1508         err = hci_remove_adv_instance(hdev, *instance);
1509         if (!err)
1510                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1511                                          *instance);
1512
1513         hci_dev_unlock(hdev);
1514
1515         return rp->status;
1516 }
1517
1518 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1519                                    struct sk_buff *skb)
1520 {
1521         struct hci_ev_status *rp = data;
1522         struct adv_info *adv, *n;
1523         int err;
1524
1525         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1526
1527         if (rp->status)
1528                 return rp->status;
1529
1530         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1531                 return rp->status;
1532
1533         hci_dev_lock(hdev);
1534
1535         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1536                 u8 instance = adv->instance;
1537
1538                 err = hci_remove_adv_instance(hdev, instance);
1539                 if (!err)
1540                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1541                                                  hdev, instance);
1542         }
1543
1544         hci_dev_unlock(hdev);
1545
1546         return rp->status;
1547 }
1548
1549 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1550                                         struct sk_buff *skb)
1551 {
1552         struct hci_rp_le_read_transmit_power *rp = data;
1553
1554         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1555
1556         if (rp->status)
1557                 return rp->status;
1558
1559         hdev->min_le_tx_power = rp->min_le_tx_power;
1560         hdev->max_le_tx_power = rp->max_le_tx_power;
1561
1562         return rp->status;
1563 }
1564
1565 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1566                                      struct sk_buff *skb)
1567 {
1568         struct hci_ev_status *rp = data;
1569         struct hci_cp_le_set_privacy_mode *cp;
1570         struct hci_conn_params *params;
1571
1572         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1573
1574         if (rp->status)
1575                 return rp->status;
1576
1577         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1578         if (!cp)
1579                 return rp->status;
1580
1581         hci_dev_lock(hdev);
1582
1583         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1584         if (params)
1585                 WRITE_ONCE(params->privacy_mode, cp->mode);
1586
1587         hci_dev_unlock(hdev);
1588
1589         return rp->status;
1590 }
1591
1592 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1593                                    struct sk_buff *skb)
1594 {
1595         struct hci_ev_status *rp = data;
1596         __u8 *sent;
1597
1598         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1599
1600         if (rp->status)
1601                 return rp->status;
1602
1603         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1604         if (!sent)
1605                 return rp->status;
1606
1607         hci_dev_lock(hdev);
1608
1609         /* If we're doing connection initiation as peripheral. Set a
1610          * timeout in case something goes wrong.
1611          */
1612         if (*sent) {
1613                 struct hci_conn *conn;
1614
1615                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1616
1617                 conn = hci_lookup_le_connect(hdev);
1618                 if (conn)
1619                         queue_delayed_work(hdev->workqueue,
1620                                            &conn->le_conn_timeout,
1621                                            conn->conn_timeout);
1622         } else {
1623                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1624         }
1625
1626         hci_dev_unlock(hdev);
1627
1628         return rp->status;
1629 }
1630
1631 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1632                                        struct sk_buff *skb)
1633 {
1634         struct hci_cp_le_set_ext_adv_enable *cp;
1635         struct hci_cp_ext_adv_set *set;
1636         struct adv_info *adv = NULL, *n;
1637         struct hci_ev_status *rp = data;
1638
1639         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1640
1641         if (rp->status)
1642                 return rp->status;
1643
1644         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1645         if (!cp)
1646                 return rp->status;
1647
1648         set = (void *)cp->data;
1649
1650         hci_dev_lock(hdev);
1651
1652         if (cp->num_of_sets)
1653                 adv = hci_find_adv_instance(hdev, set->handle);
1654
1655         if (cp->enable) {
1656                 struct hci_conn *conn;
1657
1658                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1659
1660                 if (adv && !adv->periodic)
1661                         adv->enabled = true;
1662
1663                 conn = hci_lookup_le_connect(hdev);
1664                 if (conn)
1665                         queue_delayed_work(hdev->workqueue,
1666                                            &conn->le_conn_timeout,
1667                                            conn->conn_timeout);
1668         } else {
1669                 if (cp->num_of_sets) {
1670                         if (adv)
1671                                 adv->enabled = false;
1672
1673                         /* If just one instance was disabled check if there are
1674                          * any other instance enabled before clearing HCI_LE_ADV
1675                          */
1676                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1677                                                  list) {
1678                                 if (adv->enabled)
1679                                         goto unlock;
1680                         }
1681                 } else {
1682                         /* All instances shall be considered disabled */
1683                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1684                                                  list)
1685                                 adv->enabled = false;
1686                 }
1687
1688                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1689         }
1690
1691 unlock:
1692         hci_dev_unlock(hdev);
1693         return rp->status;
1694 }
1695
1696 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1697                                    struct sk_buff *skb)
1698 {
1699         struct hci_cp_le_set_scan_param *cp;
1700         struct hci_ev_status *rp = data;
1701
1702         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1703
1704         if (rp->status)
1705                 return rp->status;
1706
1707         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1708         if (!cp)
1709                 return rp->status;
1710
1711         hci_dev_lock(hdev);
1712
1713         hdev->le_scan_type = cp->type;
1714
1715         hci_dev_unlock(hdev);
1716
1717         return rp->status;
1718 }
1719
1720 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1721                                        struct sk_buff *skb)
1722 {
1723         struct hci_cp_le_set_ext_scan_params *cp;
1724         struct hci_ev_status *rp = data;
1725         struct hci_cp_le_scan_phy_params *phy_param;
1726
1727         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1728
1729         if (rp->status)
1730                 return rp->status;
1731
1732         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1733         if (!cp)
1734                 return rp->status;
1735
1736         phy_param = (void *)cp->data;
1737
1738         hci_dev_lock(hdev);
1739
1740         hdev->le_scan_type = phy_param->type;
1741
1742         hci_dev_unlock(hdev);
1743
1744         return rp->status;
1745 }
1746
1747 static bool has_pending_adv_report(struct hci_dev *hdev)
1748 {
1749         struct discovery_state *d = &hdev->discovery;
1750
1751         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1752 }
1753
1754 static void clear_pending_adv_report(struct hci_dev *hdev)
1755 {
1756         struct discovery_state *d = &hdev->discovery;
1757
1758         bacpy(&d->last_adv_addr, BDADDR_ANY);
1759         d->last_adv_data_len = 0;
1760 }
1761
1762 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1763                                      u8 bdaddr_type, s8 rssi, u32 flags,
1764                                      u8 *data, u8 len)
1765 {
1766         struct discovery_state *d = &hdev->discovery;
1767
1768         if (len > max_adv_len(hdev))
1769                 return;
1770
1771         bacpy(&d->last_adv_addr, bdaddr);
1772         d->last_adv_addr_type = bdaddr_type;
1773         d->last_adv_rssi = rssi;
1774         d->last_adv_flags = flags;
1775         memcpy(d->last_adv_data, data, len);
1776         d->last_adv_data_len = len;
1777 }
1778
1779 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1780 {
1781         hci_dev_lock(hdev);
1782
1783         switch (enable) {
1784         case LE_SCAN_ENABLE:
1785                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1786                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1787                         clear_pending_adv_report(hdev);
1788                 if (hci_dev_test_flag(hdev, HCI_MESH))
1789                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1790                 break;
1791
1792         case LE_SCAN_DISABLE:
1793                 /* We do this here instead of when setting DISCOVERY_STOPPED
1794                  * since the latter would potentially require waiting for
1795                  * inquiry to stop too.
1796                  */
1797                 if (has_pending_adv_report(hdev)) {
1798                         struct discovery_state *d = &hdev->discovery;
1799
1800                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1801                                           d->last_adv_addr_type, NULL,
1802                                           d->last_adv_rssi, d->last_adv_flags,
1803                                           d->last_adv_data,
1804                                           d->last_adv_data_len, NULL, 0, 0);
1805                 }
1806
1807                 /* Cancel this timer so that we don't try to disable scanning
1808                  * when it's already disabled.
1809                  */
1810                 cancel_delayed_work(&hdev->le_scan_disable);
1811
1812                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1813
1814                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1815                  * interrupted scanning due to a connect request. Mark
1816                  * therefore discovery as stopped.
1817                  */
1818                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1819                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1820                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1821                          hdev->discovery.state == DISCOVERY_FINDING)
1822                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1823
1824                 break;
1825
1826         default:
1827                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1828                            enable);
1829                 break;
1830         }
1831
1832         hci_dev_unlock(hdev);
1833 }
1834
1835 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1836                                     struct sk_buff *skb)
1837 {
1838         struct hci_cp_le_set_scan_enable *cp;
1839         struct hci_ev_status *rp = data;
1840
1841         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1842
1843         if (rp->status)
1844                 return rp->status;
1845
1846         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1847         if (!cp)
1848                 return rp->status;
1849
1850         le_set_scan_enable_complete(hdev, cp->enable);
1851
1852         return rp->status;
1853 }
1854
1855 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1856                                         struct sk_buff *skb)
1857 {
1858         struct hci_cp_le_set_ext_scan_enable *cp;
1859         struct hci_ev_status *rp = data;
1860
1861         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1862
1863         if (rp->status)
1864                 return rp->status;
1865
1866         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1867         if (!cp)
1868                 return rp->status;
1869
1870         le_set_scan_enable_complete(hdev, cp->enable);
1871
1872         return rp->status;
1873 }
1874
1875 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1876                                       struct sk_buff *skb)
1877 {
1878         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1879
1880         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1881                    rp->num_of_sets);
1882
1883         if (rp->status)
1884                 return rp->status;
1885
1886         hdev->le_num_of_adv_sets = rp->num_of_sets;
1887
1888         return rp->status;
1889 }
1890
1891 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1892                                           struct sk_buff *skb)
1893 {
1894         struct hci_rp_le_read_accept_list_size *rp = data;
1895
1896         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1897
1898         if (rp->status)
1899                 return rp->status;
1900
1901         hdev->le_accept_list_size = rp->size;
1902
1903         return rp->status;
1904 }
1905
1906 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1907                                       struct sk_buff *skb)
1908 {
1909         struct hci_ev_status *rp = data;
1910
1911         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1912
1913         if (rp->status)
1914                 return rp->status;
1915
1916         hci_dev_lock(hdev);
1917         hci_bdaddr_list_clear(&hdev->le_accept_list);
1918         hci_dev_unlock(hdev);
1919
1920         return rp->status;
1921 }
1922
1923 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1924                                        struct sk_buff *skb)
1925 {
1926         struct hci_cp_le_add_to_accept_list *sent;
1927         struct hci_ev_status *rp = data;
1928
1929         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1930
1931         if (rp->status)
1932                 return rp->status;
1933
1934         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1935         if (!sent)
1936                 return rp->status;
1937
1938         hci_dev_lock(hdev);
1939         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1940                             sent->bdaddr_type);
1941         hci_dev_unlock(hdev);
1942
1943         return rp->status;
1944 }
1945
1946 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1947                                          struct sk_buff *skb)
1948 {
1949         struct hci_cp_le_del_from_accept_list *sent;
1950         struct hci_ev_status *rp = data;
1951
1952         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1953
1954         if (rp->status)
1955                 return rp->status;
1956
1957         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1958         if (!sent)
1959                 return rp->status;
1960
1961         hci_dev_lock(hdev);
1962         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1963                             sent->bdaddr_type);
1964         hci_dev_unlock(hdev);
1965
1966         return rp->status;
1967 }
1968
1969 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1970                                           struct sk_buff *skb)
1971 {
1972         struct hci_rp_le_read_supported_states *rp = data;
1973
1974         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1975
1976         if (rp->status)
1977                 return rp->status;
1978
1979         memcpy(hdev->le_states, rp->le_states, 8);
1980
1981         return rp->status;
1982 }
1983
1984 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1985                                       struct sk_buff *skb)
1986 {
1987         struct hci_rp_le_read_def_data_len *rp = data;
1988
1989         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1990
1991         if (rp->status)
1992                 return rp->status;
1993
1994         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1995         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1996
1997         return rp->status;
1998 }
1999
2000 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2001                                        struct sk_buff *skb)
2002 {
2003         struct hci_cp_le_write_def_data_len *sent;
2004         struct hci_ev_status *rp = data;
2005
2006         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2007
2008         if (rp->status)
2009                 return rp->status;
2010
2011         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2012         if (!sent)
2013                 return rp->status;
2014
2015         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2016         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2017
2018         return rp->status;
2019 }
2020
2021 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2022                                        struct sk_buff *skb)
2023 {
2024         struct hci_cp_le_add_to_resolv_list *sent;
2025         struct hci_ev_status *rp = data;
2026
2027         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028
2029         if (rp->status)
2030                 return rp->status;
2031
2032         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2033         if (!sent)
2034                 return rp->status;
2035
2036         hci_dev_lock(hdev);
2037         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2038                                 sent->bdaddr_type, sent->peer_irk,
2039                                 sent->local_irk);
2040         hci_dev_unlock(hdev);
2041
2042         return rp->status;
2043 }
2044
2045 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2046                                          struct sk_buff *skb)
2047 {
2048         struct hci_cp_le_del_from_resolv_list *sent;
2049         struct hci_ev_status *rp = data;
2050
2051         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2052
2053         if (rp->status)
2054                 return rp->status;
2055
2056         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2057         if (!sent)
2058                 return rp->status;
2059
2060         hci_dev_lock(hdev);
2061         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2062                             sent->bdaddr_type);
2063         hci_dev_unlock(hdev);
2064
2065         return rp->status;
2066 }
2067
2068 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2069                                       struct sk_buff *skb)
2070 {
2071         struct hci_ev_status *rp = data;
2072
2073         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2074
2075         if (rp->status)
2076                 return rp->status;
2077
2078         hci_dev_lock(hdev);
2079         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2080         hci_dev_unlock(hdev);
2081
2082         return rp->status;
2083 }
2084
2085 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2086                                           struct sk_buff *skb)
2087 {
2088         struct hci_rp_le_read_resolv_list_size *rp = data;
2089
2090         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2091
2092         if (rp->status)
2093                 return rp->status;
2094
2095         hdev->le_resolv_list_size = rp->size;
2096
2097         return rp->status;
2098 }
2099
2100 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2101                                                struct sk_buff *skb)
2102 {
2103         struct hci_ev_status *rp = data;
2104         __u8 *sent;
2105
2106         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2107
2108         if (rp->status)
2109                 return rp->status;
2110
2111         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2112         if (!sent)
2113                 return rp->status;
2114
2115         hci_dev_lock(hdev);
2116
2117         if (*sent)
2118                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2119         else
2120                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2121
2122         hci_dev_unlock(hdev);
2123
2124         return rp->status;
2125 }
2126
2127 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2128                                       struct sk_buff *skb)
2129 {
2130         struct hci_rp_le_read_max_data_len *rp = data;
2131
2132         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2133
2134         if (rp->status)
2135                 return rp->status;
2136
2137         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2138         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2139         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2140         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2141
2142         return rp->status;
2143 }
2144
2145 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2146                                          struct sk_buff *skb)
2147 {
2148         struct hci_cp_write_le_host_supported *sent;
2149         struct hci_ev_status *rp = data;
2150
2151         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2152
2153         if (rp->status)
2154                 return rp->status;
2155
2156         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2157         if (!sent)
2158                 return rp->status;
2159
2160         hci_dev_lock(hdev);
2161
2162         if (sent->le) {
2163                 hdev->features[1][0] |= LMP_HOST_LE;
2164                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2165         } else {
2166                 hdev->features[1][0] &= ~LMP_HOST_LE;
2167                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2168                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2169         }
2170
2171         if (sent->simul)
2172                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2173         else
2174                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2175
2176         hci_dev_unlock(hdev);
2177
2178         return rp->status;
2179 }
2180
2181 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2182                                struct sk_buff *skb)
2183 {
2184         struct hci_cp_le_set_adv_param *cp;
2185         struct hci_ev_status *rp = data;
2186
2187         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2188
2189         if (rp->status)
2190                 return rp->status;
2191
2192         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2193         if (!cp)
2194                 return rp->status;
2195
2196         hci_dev_lock(hdev);
2197         hdev->adv_addr_type = cp->own_address_type;
2198         hci_dev_unlock(hdev);
2199
2200         return rp->status;
2201 }
2202
2203 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2204                                    struct sk_buff *skb)
2205 {
2206         struct hci_rp_le_set_ext_adv_params *rp = data;
2207         struct hci_cp_le_set_ext_adv_params *cp;
2208         struct adv_info *adv_instance;
2209
2210         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2211
2212         if (rp->status)
2213                 return rp->status;
2214
2215         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2216         if (!cp)
2217                 return rp->status;
2218
2219         hci_dev_lock(hdev);
2220         hdev->adv_addr_type = cp->own_addr_type;
2221         if (!cp->handle) {
2222                 /* Store in hdev for instance 0 */
2223                 hdev->adv_tx_power = rp->tx_power;
2224         } else {
2225                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2226                 if (adv_instance)
2227                         adv_instance->tx_power = rp->tx_power;
2228         }
2229         /* Update adv data as tx power is known now */
2230         hci_update_adv_data(hdev, cp->handle);
2231
2232         hci_dev_unlock(hdev);
2233
2234         return rp->status;
2235 }
2236
2237 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2238                            struct sk_buff *skb)
2239 {
2240         struct hci_rp_read_rssi *rp = data;
2241         struct hci_conn *conn;
2242
2243         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2244
2245         if (rp->status)
2246                 return rp->status;
2247
2248         hci_dev_lock(hdev);
2249
2250         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2251         if (conn)
2252                 conn->rssi = rp->rssi;
2253
2254         hci_dev_unlock(hdev);
2255
2256         return rp->status;
2257 }
2258
2259 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2260                                struct sk_buff *skb)
2261 {
2262         struct hci_cp_read_tx_power *sent;
2263         struct hci_rp_read_tx_power *rp = data;
2264         struct hci_conn *conn;
2265
2266         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2267
2268         if (rp->status)
2269                 return rp->status;
2270
2271         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2272         if (!sent)
2273                 return rp->status;
2274
2275         hci_dev_lock(hdev);
2276
2277         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2278         if (!conn)
2279                 goto unlock;
2280
2281         switch (sent->type) {
2282         case 0x00:
2283                 conn->tx_power = rp->tx_power;
2284                 break;
2285         case 0x01:
2286                 conn->max_tx_power = rp->tx_power;
2287                 break;
2288         }
2289
2290 unlock:
2291         hci_dev_unlock(hdev);
2292         return rp->status;
2293 }
2294
2295 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2296                                       struct sk_buff *skb)
2297 {
2298         struct hci_ev_status *rp = data;
2299         u8 *mode;
2300
2301         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2302
2303         if (rp->status)
2304                 return rp->status;
2305
2306         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2307         if (mode)
2308                 hdev->ssp_debug_mode = *mode;
2309
2310         return rp->status;
2311 }
2312
2313 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2314 {
2315         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2316
2317         if (status)
2318                 return;
2319
2320         if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2321                 set_bit(HCI_INQUIRY, &hdev->flags);
2322 }
2323
2324 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2325 {
2326         struct hci_cp_create_conn *cp;
2327         struct hci_conn *conn;
2328
2329         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2330
2331         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2332         if (!cp)
2333                 return;
2334
2335         hci_dev_lock(hdev);
2336
2337         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2338
2339         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2340
2341         if (status) {
2342                 if (conn && conn->state == BT_CONNECT) {
2343                         conn->state = BT_CLOSED;
2344                         hci_connect_cfm(conn, status);
2345                         hci_conn_del(conn);
2346                 }
2347         } else {
2348                 if (!conn) {
2349                         conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2350                                                   HCI_ROLE_MASTER);
2351                         if (IS_ERR(conn))
2352                                 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2353                 }
2354         }
2355
2356         hci_dev_unlock(hdev);
2357 }
2358
2359 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2360 {
2361         struct hci_cp_add_sco *cp;
2362         struct hci_conn *acl;
2363         struct hci_link *link;
2364         __u16 handle;
2365
2366         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2367
2368         if (!status)
2369                 return;
2370
2371         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2372         if (!cp)
2373                 return;
2374
2375         handle = __le16_to_cpu(cp->handle);
2376
2377         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2378
2379         hci_dev_lock(hdev);
2380
2381         acl = hci_conn_hash_lookup_handle(hdev, handle);
2382         if (acl) {
2383                 link = list_first_entry_or_null(&acl->link_list,
2384                                                 struct hci_link, list);
2385                 if (link && link->conn) {
2386                         link->conn->state = BT_CLOSED;
2387
2388                         hci_connect_cfm(link->conn, status);
2389                         hci_conn_del(link->conn);
2390                 }
2391         }
2392
2393         hci_dev_unlock(hdev);
2394 }
2395
2396 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2397 {
2398         struct hci_cp_auth_requested *cp;
2399         struct hci_conn *conn;
2400
2401         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2402
2403         if (!status)
2404                 return;
2405
2406         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2407         if (!cp)
2408                 return;
2409
2410         hci_dev_lock(hdev);
2411
2412         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2413         if (conn) {
2414                 if (conn->state == BT_CONFIG) {
2415                         hci_connect_cfm(conn, status);
2416                         hci_conn_drop(conn);
2417                 }
2418         }
2419
2420         hci_dev_unlock(hdev);
2421 }
2422
2423 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2424 {
2425         struct hci_cp_set_conn_encrypt *cp;
2426         struct hci_conn *conn;
2427
2428         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2429
2430         if (!status)
2431                 return;
2432
2433         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2434         if (!cp)
2435                 return;
2436
2437         hci_dev_lock(hdev);
2438
2439         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2440         if (conn) {
2441                 if (conn->state == BT_CONFIG) {
2442                         hci_connect_cfm(conn, status);
2443                         hci_conn_drop(conn);
2444                 }
2445         }
2446
2447         hci_dev_unlock(hdev);
2448 }
2449
2450 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2451                                     struct hci_conn *conn)
2452 {
2453         if (conn->state != BT_CONFIG || !conn->out)
2454                 return 0;
2455
2456         if (conn->pending_sec_level == BT_SECURITY_SDP)
2457                 return 0;
2458
2459         /* Only request authentication for SSP connections or non-SSP
2460          * devices with sec_level MEDIUM or HIGH or if MITM protection
2461          * is requested.
2462          */
2463         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2464             conn->pending_sec_level != BT_SECURITY_FIPS &&
2465             conn->pending_sec_level != BT_SECURITY_HIGH &&
2466             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2467                 return 0;
2468
2469         return 1;
2470 }
2471
2472 static int hci_resolve_name(struct hci_dev *hdev,
2473                                    struct inquiry_entry *e)
2474 {
2475         struct hci_cp_remote_name_req cp;
2476
2477         memset(&cp, 0, sizeof(cp));
2478
2479         bacpy(&cp.bdaddr, &e->data.bdaddr);
2480         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2481         cp.pscan_mode = e->data.pscan_mode;
2482         cp.clock_offset = e->data.clock_offset;
2483
2484         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2485 }
2486
2487 static bool hci_resolve_next_name(struct hci_dev *hdev)
2488 {
2489         struct discovery_state *discov = &hdev->discovery;
2490         struct inquiry_entry *e;
2491
2492         if (list_empty(&discov->resolve))
2493                 return false;
2494
2495         /* We should stop if we already spent too much time resolving names. */
2496         if (time_after(jiffies, discov->name_resolve_timeout)) {
2497                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2498                 return false;
2499         }
2500
2501         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2502         if (!e)
2503                 return false;
2504
2505         if (hci_resolve_name(hdev, e) == 0) {
2506                 e->name_state = NAME_PENDING;
2507                 return true;
2508         }
2509
2510         return false;
2511 }
2512
2513 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2514                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2515 {
2516         struct discovery_state *discov = &hdev->discovery;
2517         struct inquiry_entry *e;
2518
2519         /* Update the mgmt connected state if necessary. Be careful with
2520          * conn objects that exist but are not (yet) connected however.
2521          * Only those in BT_CONFIG or BT_CONNECTED states can be
2522          * considered connected.
2523          */
2524         if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2525                 mgmt_device_connected(hdev, conn, name, name_len);
2526
2527         if (discov->state == DISCOVERY_STOPPED)
2528                 return;
2529
2530         if (discov->state == DISCOVERY_STOPPING)
2531                 goto discov_complete;
2532
2533         if (discov->state != DISCOVERY_RESOLVING)
2534                 return;
2535
2536         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2537         /* If the device was not found in a list of found devices names of which
2538          * are pending. there is no need to continue resolving a next name as it
2539          * will be done upon receiving another Remote Name Request Complete
2540          * Event */
2541         if (!e)
2542                 return;
2543
2544         list_del(&e->list);
2545
2546         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2547         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2548                          name, name_len);
2549
2550         if (hci_resolve_next_name(hdev))
2551                 return;
2552
2553 discov_complete:
2554         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2555 }
2556
2557 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2558 {
2559         struct hci_cp_remote_name_req *cp;
2560         struct hci_conn *conn;
2561
2562         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2563
2564         /* If successful wait for the name req complete event before
2565          * checking for the need to do authentication */
2566         if (!status)
2567                 return;
2568
2569         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2570         if (!cp)
2571                 return;
2572
2573         hci_dev_lock(hdev);
2574
2575         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2576
2577         if (hci_dev_test_flag(hdev, HCI_MGMT))
2578                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2579
2580         if (!conn)
2581                 goto unlock;
2582
2583         if (!hci_outgoing_auth_needed(hdev, conn))
2584                 goto unlock;
2585
2586         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2587                 struct hci_cp_auth_requested auth_cp;
2588
2589                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2590
2591                 auth_cp.handle = __cpu_to_le16(conn->handle);
2592                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2593                              sizeof(auth_cp), &auth_cp);
2594         }
2595
2596 unlock:
2597         hci_dev_unlock(hdev);
2598 }
2599
2600 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2601 {
2602         struct hci_cp_read_remote_features *cp;
2603         struct hci_conn *conn;
2604
2605         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2606
2607         if (!status)
2608                 return;
2609
2610         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2611         if (!cp)
2612                 return;
2613
2614         hci_dev_lock(hdev);
2615
2616         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2617         if (conn) {
2618                 if (conn->state == BT_CONFIG) {
2619                         hci_connect_cfm(conn, status);
2620                         hci_conn_drop(conn);
2621                 }
2622         }
2623
2624         hci_dev_unlock(hdev);
2625 }
2626
2627 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2628 {
2629         struct hci_cp_read_remote_ext_features *cp;
2630         struct hci_conn *conn;
2631
2632         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2633
2634         if (!status)
2635                 return;
2636
2637         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2638         if (!cp)
2639                 return;
2640
2641         hci_dev_lock(hdev);
2642
2643         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2644         if (conn) {
2645                 if (conn->state == BT_CONFIG) {
2646                         hci_connect_cfm(conn, status);
2647                         hci_conn_drop(conn);
2648                 }
2649         }
2650
2651         hci_dev_unlock(hdev);
2652 }
2653
2654 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2655                                        __u8 status)
2656 {
2657         struct hci_conn *acl;
2658         struct hci_link *link;
2659
2660         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2661
2662         hci_dev_lock(hdev);
2663
2664         acl = hci_conn_hash_lookup_handle(hdev, handle);
2665         if (acl) {
2666                 link = list_first_entry_or_null(&acl->link_list,
2667                                                 struct hci_link, list);
2668                 if (link && link->conn) {
2669                         link->conn->state = BT_CLOSED;
2670
2671                         hci_connect_cfm(link->conn, status);
2672                         hci_conn_del(link->conn);
2673                 }
2674         }
2675
2676         hci_dev_unlock(hdev);
2677 }
2678
2679 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2680 {
2681         struct hci_cp_setup_sync_conn *cp;
2682
2683         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2684
2685         if (!status)
2686                 return;
2687
2688         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2689         if (!cp)
2690                 return;
2691
2692         hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2693 }
2694
2695 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2696 {
2697         struct hci_cp_enhanced_setup_sync_conn *cp;
2698
2699         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2700
2701         if (!status)
2702                 return;
2703
2704         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2705         if (!cp)
2706                 return;
2707
2708         hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2709 }
2710
2711 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2712 {
2713         struct hci_cp_sniff_mode *cp;
2714         struct hci_conn *conn;
2715
2716         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2717
2718         if (!status)
2719                 return;
2720
2721         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2722         if (!cp)
2723                 return;
2724
2725         hci_dev_lock(hdev);
2726
2727         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2728         if (conn) {
2729                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2730
2731                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2732                         hci_sco_setup(conn, status);
2733         }
2734
2735         hci_dev_unlock(hdev);
2736 }
2737
2738 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2739 {
2740         struct hci_cp_exit_sniff_mode *cp;
2741         struct hci_conn *conn;
2742
2743         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2744
2745         if (!status)
2746                 return;
2747
2748         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2749         if (!cp)
2750                 return;
2751
2752         hci_dev_lock(hdev);
2753
2754         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2755         if (conn) {
2756                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2757
2758                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2759                         hci_sco_setup(conn, status);
2760         }
2761
2762         hci_dev_unlock(hdev);
2763 }
2764
2765 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2766 {
2767         struct hci_cp_disconnect *cp;
2768         struct hci_conn_params *params;
2769         struct hci_conn *conn;
2770         bool mgmt_conn;
2771
2772         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2773
2774         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2775          * otherwise cleanup the connection immediately.
2776          */
2777         if (!status && !hdev->suspended)
2778                 return;
2779
2780         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2781         if (!cp)
2782                 return;
2783
2784         hci_dev_lock(hdev);
2785
2786         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2787         if (!conn)
2788                 goto unlock;
2789
2790         if (status) {
2791                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2792                                        conn->dst_type, status);
2793
2794                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2795                         hdev->cur_adv_instance = conn->adv_instance;
2796                         hci_enable_advertising(hdev);
2797                 }
2798
2799                 /* Inform sockets conn is gone before we delete it */
2800                 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2801
2802                 goto done;
2803         }
2804
2805         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2806
2807         if (conn->type == ACL_LINK) {
2808                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2809                         hci_remove_link_key(hdev, &conn->dst);
2810         }
2811
2812         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2813         if (params) {
2814                 switch (params->auto_connect) {
2815                 case HCI_AUTO_CONN_LINK_LOSS:
2816                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2817                                 break;
2818                         fallthrough;
2819
2820                 case HCI_AUTO_CONN_DIRECT:
2821                 case HCI_AUTO_CONN_ALWAYS:
2822                         hci_pend_le_list_del_init(params);
2823                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
2824                         break;
2825
2826                 default:
2827                         break;
2828                 }
2829         }
2830
2831         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2832                                  cp->reason, mgmt_conn);
2833
2834         hci_disconn_cfm(conn, cp->reason);
2835
2836 done:
2837         /* If the disconnection failed for any reason, the upper layer
2838          * does not retry to disconnect in current implementation.
2839          * Hence, we need to do some basic cleanup here and re-enable
2840          * advertising if necessary.
2841          */
2842         hci_conn_del(conn);
2843 unlock:
2844         hci_dev_unlock(hdev);
2845 }
2846
2847 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2848 {
2849         /* When using controller based address resolution, then the new
2850          * address types 0x02 and 0x03 are used. These types need to be
2851          * converted back into either public address or random address type
2852          */
2853         switch (type) {
2854         case ADDR_LE_DEV_PUBLIC_RESOLVED:
2855                 if (resolved)
2856                         *resolved = true;
2857                 return ADDR_LE_DEV_PUBLIC;
2858         case ADDR_LE_DEV_RANDOM_RESOLVED:
2859                 if (resolved)
2860                         *resolved = true;
2861                 return ADDR_LE_DEV_RANDOM;
2862         }
2863
2864         if (resolved)
2865                 *resolved = false;
2866         return type;
2867 }
2868
2869 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2870                               u8 peer_addr_type, u8 own_address_type,
2871                               u8 filter_policy)
2872 {
2873         struct hci_conn *conn;
2874
2875         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2876                                        peer_addr_type);
2877         if (!conn)
2878                 return;
2879
2880         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2881
2882         /* Store the initiator and responder address information which
2883          * is needed for SMP. These values will not change during the
2884          * lifetime of the connection.
2885          */
2886         conn->init_addr_type = own_address_type;
2887         if (own_address_type == ADDR_LE_DEV_RANDOM)
2888                 bacpy(&conn->init_addr, &hdev->random_addr);
2889         else
2890                 bacpy(&conn->init_addr, &hdev->bdaddr);
2891
2892         conn->resp_addr_type = peer_addr_type;
2893         bacpy(&conn->resp_addr, peer_addr);
2894 }
2895
2896 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2897 {
2898         struct hci_cp_le_create_conn *cp;
2899
2900         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2901
2902         /* All connection failure handling is taken care of by the
2903          * hci_conn_failed function which is triggered by the HCI
2904          * request completion callbacks used for connecting.
2905          */
2906         if (status)
2907                 return;
2908
2909         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2910         if (!cp)
2911                 return;
2912
2913         hci_dev_lock(hdev);
2914
2915         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2916                           cp->own_address_type, cp->filter_policy);
2917
2918         hci_dev_unlock(hdev);
2919 }
2920
2921 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2922 {
2923         struct hci_cp_le_ext_create_conn *cp;
2924
2925         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2926
2927         /* All connection failure handling is taken care of by the
2928          * hci_conn_failed function which is triggered by the HCI
2929          * request completion callbacks used for connecting.
2930          */
2931         if (status)
2932                 return;
2933
2934         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2935         if (!cp)
2936                 return;
2937
2938         hci_dev_lock(hdev);
2939
2940         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2941                           cp->own_addr_type, cp->filter_policy);
2942
2943         hci_dev_unlock(hdev);
2944 }
2945
2946 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2947 {
2948         struct hci_cp_le_read_remote_features *cp;
2949         struct hci_conn *conn;
2950
2951         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2952
2953         if (!status)
2954                 return;
2955
2956         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2957         if (!cp)
2958                 return;
2959
2960         hci_dev_lock(hdev);
2961
2962         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2963         if (conn) {
2964                 if (conn->state == BT_CONFIG) {
2965                         hci_connect_cfm(conn, status);
2966                         hci_conn_drop(conn);
2967                 }
2968         }
2969
2970         hci_dev_unlock(hdev);
2971 }
2972
2973 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2974 {
2975         struct hci_cp_le_start_enc *cp;
2976         struct hci_conn *conn;
2977
2978         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2979
2980         if (!status)
2981                 return;
2982
2983         hci_dev_lock(hdev);
2984
2985         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2986         if (!cp)
2987                 goto unlock;
2988
2989         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2990         if (!conn)
2991                 goto unlock;
2992
2993         if (conn->state != BT_CONNECTED)
2994                 goto unlock;
2995
2996         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2997         hci_conn_drop(conn);
2998
2999 unlock:
3000         hci_dev_unlock(hdev);
3001 }
3002
3003 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3004 {
3005         struct hci_cp_switch_role *cp;
3006         struct hci_conn *conn;
3007
3008         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3009
3010         if (!status)
3011                 return;
3012
3013         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3014         if (!cp)
3015                 return;
3016
3017         hci_dev_lock(hdev);
3018
3019         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3020         if (conn)
3021                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3022
3023         hci_dev_unlock(hdev);
3024 }
3025
3026 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3027                                      struct sk_buff *skb)
3028 {
3029         struct hci_ev_status *ev = data;
3030         struct discovery_state *discov = &hdev->discovery;
3031         struct inquiry_entry *e;
3032
3033         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3034
3035         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3036                 return;
3037
3038         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3039         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3040
3041         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3042                 return;
3043
3044         hci_dev_lock(hdev);
3045
3046         if (discov->state != DISCOVERY_FINDING)
3047                 goto unlock;
3048
3049         if (list_empty(&discov->resolve)) {
3050                 /* When BR/EDR inquiry is active and no LE scanning is in
3051                  * progress, then change discovery state to indicate completion.
3052                  *
3053                  * When running LE scanning and BR/EDR inquiry simultaneously
3054                  * and the LE scan already finished, then change the discovery
3055                  * state to indicate completion.
3056                  */
3057                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3058                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3059                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3060                 goto unlock;
3061         }
3062
3063         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3064         if (e && hci_resolve_name(hdev, e) == 0) {
3065                 e->name_state = NAME_PENDING;
3066                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3067                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3068         } else {
3069                 /* When BR/EDR inquiry is active and no LE scanning is in
3070                  * progress, then change discovery state to indicate completion.
3071                  *
3072                  * When running LE scanning and BR/EDR inquiry simultaneously
3073                  * and the LE scan already finished, then change the discovery
3074                  * state to indicate completion.
3075                  */
3076                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3077                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3078                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3079         }
3080
3081 unlock:
3082         hci_dev_unlock(hdev);
3083 }
3084
3085 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3086                                    struct sk_buff *skb)
3087 {
3088         struct hci_ev_inquiry_result *ev = edata;
3089         struct inquiry_data data;
3090         int i;
3091
3092         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3093                              flex_array_size(ev, info, ev->num)))
3094                 return;
3095
3096         bt_dev_dbg(hdev, "num %d", ev->num);
3097
3098         if (!ev->num)
3099                 return;
3100
3101         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3102                 return;
3103
3104         hci_dev_lock(hdev);
3105
3106         for (i = 0; i < ev->num; i++) {
3107                 struct inquiry_info *info = &ev->info[i];
3108                 u32 flags;
3109
3110                 bacpy(&data.bdaddr, &info->bdaddr);
3111                 data.pscan_rep_mode     = info->pscan_rep_mode;
3112                 data.pscan_period_mode  = info->pscan_period_mode;
3113                 data.pscan_mode         = info->pscan_mode;
3114                 memcpy(data.dev_class, info->dev_class, 3);
3115                 data.clock_offset       = info->clock_offset;
3116                 data.rssi               = HCI_RSSI_INVALID;
3117                 data.ssp_mode           = 0x00;
3118
3119                 flags = hci_inquiry_cache_update(hdev, &data, false);
3120
3121                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3122                                   info->dev_class, HCI_RSSI_INVALID,
3123                                   flags, NULL, 0, NULL, 0, 0);
3124         }
3125
3126         hci_dev_unlock(hdev);
3127 }
3128
3129 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3130                                   struct sk_buff *skb)
3131 {
3132         struct hci_ev_conn_complete *ev = data;
3133         struct hci_conn *conn;
3134         u8 status = ev->status;
3135
3136         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3137
3138         hci_dev_lock(hdev);
3139
3140         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3141         if (!conn) {
3142                 /* In case of error status and there is no connection pending
3143                  * just unlock as there is nothing to cleanup.
3144                  */
3145                 if (ev->status)
3146                         goto unlock;
3147
3148                 /* Connection may not exist if auto-connected. Check the bredr
3149                  * allowlist to see if this device is allowed to auto connect.
3150                  * If link is an ACL type, create a connection class
3151                  * automatically.
3152                  *
3153                  * Auto-connect will only occur if the event filter is
3154                  * programmed with a given address. Right now, event filter is
3155                  * only used during suspend.
3156                  */
3157                 if (ev->link_type == ACL_LINK &&
3158                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3159                                                       &ev->bdaddr,
3160                                                       BDADDR_BREDR)) {
3161                         conn = hci_conn_add_unset(hdev, ev->link_type,
3162                                                   &ev->bdaddr, HCI_ROLE_SLAVE);
3163                         if (IS_ERR(conn)) {
3164                                 bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3165                                 goto unlock;
3166                         }
3167                 } else {
3168                         if (ev->link_type != SCO_LINK)
3169                                 goto unlock;
3170
3171                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3172                                                        &ev->bdaddr);
3173                         if (!conn)
3174                                 goto unlock;
3175
3176                         conn->type = SCO_LINK;
3177                 }
3178         }
3179
3180         /* The HCI_Connection_Complete event is only sent once per connection.
3181          * Processing it more than once per connection can corrupt kernel memory.
3182          *
3183          * As the connection handle is set here for the first time, it indicates
3184          * whether the connection is already set up.
3185          */
3186         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3187                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3188                 goto unlock;
3189         }
3190
3191         if (!status) {
3192                 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3193                 if (status)
3194                         goto done;
3195
3196                 if (conn->type == ACL_LINK) {
3197                         conn->state = BT_CONFIG;
3198                         hci_conn_hold(conn);
3199
3200                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3201                             !hci_find_link_key(hdev, &ev->bdaddr))
3202                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3203                         else
3204                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3205                 } else
3206                         conn->state = BT_CONNECTED;
3207
3208                 hci_debugfs_create_conn(conn);
3209                 hci_conn_add_sysfs(conn);
3210
3211                 if (test_bit(HCI_AUTH, &hdev->flags))
3212                         set_bit(HCI_CONN_AUTH, &conn->flags);
3213
3214                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3215                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3216
3217                 /* "Link key request" completed ahead of "connect request" completes */
3218                 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3219                     ev->link_type == ACL_LINK) {
3220                         struct link_key *key;
3221                         struct hci_cp_read_enc_key_size cp;
3222
3223                         key = hci_find_link_key(hdev, &ev->bdaddr);
3224                         if (key) {
3225                                 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3226
3227                                 if (!read_key_size_capable(hdev)) {
3228                                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3229                                 } else {
3230                                         cp.handle = cpu_to_le16(conn->handle);
3231                                         if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3232                                                          sizeof(cp), &cp)) {
3233                                                 bt_dev_err(hdev, "sending read key size failed");
3234                                                 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3235                                         }
3236                                 }
3237
3238                                 hci_encrypt_cfm(conn, ev->status);
3239                         }
3240                 }
3241
3242                 /* Get remote features */
3243                 if (conn->type == ACL_LINK) {
3244                         struct hci_cp_read_remote_features cp;
3245                         cp.handle = ev->handle;
3246                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3247                                      sizeof(cp), &cp);
3248
3249                         hci_update_scan(hdev);
3250                 }
3251
3252                 /* Set packet type for incoming connection */
3253                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3254                         struct hci_cp_change_conn_ptype cp;
3255                         cp.handle = ev->handle;
3256                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3257                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3258                                      &cp);
3259                 }
3260         }
3261
3262         if (conn->type == ACL_LINK)
3263                 hci_sco_setup(conn, ev->status);
3264
3265 done:
3266         if (status) {
3267                 hci_conn_failed(conn, status);
3268         } else if (ev->link_type == SCO_LINK) {
3269                 switch (conn->setting & SCO_AIRMODE_MASK) {
3270                 case SCO_AIRMODE_CVSD:
3271                         if (hdev->notify)
3272                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3273                         break;
3274                 }
3275
3276                 hci_connect_cfm(conn, status);
3277         }
3278
3279 unlock:
3280         hci_dev_unlock(hdev);
3281 }
3282
3283 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3284 {
3285         struct hci_cp_reject_conn_req cp;
3286
3287         bacpy(&cp.bdaddr, bdaddr);
3288         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3289         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3290 }
3291
3292 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3293                                  struct sk_buff *skb)
3294 {
3295         struct hci_ev_conn_request *ev = data;
3296         int mask = hdev->link_mode;
3297         struct inquiry_entry *ie;
3298         struct hci_conn *conn;
3299         __u8 flags = 0;
3300
3301         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3302
3303         /* Reject incoming connection from device with same BD ADDR against
3304          * CVE-2020-26555
3305          */
3306         if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3307                 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3308                            &ev->bdaddr);
3309                 hci_reject_conn(hdev, &ev->bdaddr);
3310                 return;
3311         }
3312
3313         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3314                                       &flags);
3315
3316         if (!(mask & HCI_LM_ACCEPT)) {
3317                 hci_reject_conn(hdev, &ev->bdaddr);
3318                 return;
3319         }
3320
3321         hci_dev_lock(hdev);
3322
3323         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3324                                    BDADDR_BREDR)) {
3325                 hci_reject_conn(hdev, &ev->bdaddr);
3326                 goto unlock;
3327         }
3328
3329         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3330          * connection. These features are only touched through mgmt so
3331          * only do the checks if HCI_MGMT is set.
3332          */
3333         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3334             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3335             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3336                                                BDADDR_BREDR)) {
3337                 hci_reject_conn(hdev, &ev->bdaddr);
3338                 goto unlock;
3339         }
3340
3341         /* Connection accepted */
3342
3343         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3344         if (ie)
3345                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3346
3347         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3348                         &ev->bdaddr);
3349         if (!conn) {
3350                 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3351                                           HCI_ROLE_SLAVE);
3352                 if (IS_ERR(conn)) {
3353                         bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3354                         goto unlock;
3355                 }
3356         }
3357
3358         memcpy(conn->dev_class, ev->dev_class, 3);
3359
3360         hci_dev_unlock(hdev);
3361
3362         if (ev->link_type == ACL_LINK ||
3363             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3364                 struct hci_cp_accept_conn_req cp;
3365                 conn->state = BT_CONNECT;
3366
3367                 bacpy(&cp.bdaddr, &ev->bdaddr);
3368
3369                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3370                         cp.role = 0x00; /* Become central */
3371                 else
3372                         cp.role = 0x01; /* Remain peripheral */
3373
3374                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3375         } else if (!(flags & HCI_PROTO_DEFER)) {
3376                 struct hci_cp_accept_sync_conn_req cp;
3377                 conn->state = BT_CONNECT;
3378
3379                 bacpy(&cp.bdaddr, &ev->bdaddr);
3380                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3381
3382                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3383                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3384                 cp.max_latency    = cpu_to_le16(0xffff);
3385                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3386                 cp.retrans_effort = 0xff;
3387
3388                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3389                              &cp);
3390         } else {
3391                 conn->state = BT_CONNECT2;
3392                 hci_connect_cfm(conn, 0);
3393         }
3394
3395         return;
3396 unlock:
3397         hci_dev_unlock(hdev);
3398 }
3399
3400 static u8 hci_to_mgmt_reason(u8 err)
3401 {
3402         switch (err) {
3403         case HCI_ERROR_CONNECTION_TIMEOUT:
3404                 return MGMT_DEV_DISCONN_TIMEOUT;
3405         case HCI_ERROR_REMOTE_USER_TERM:
3406         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3407         case HCI_ERROR_REMOTE_POWER_OFF:
3408                 return MGMT_DEV_DISCONN_REMOTE;
3409         case HCI_ERROR_LOCAL_HOST_TERM:
3410                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3411         default:
3412                 return MGMT_DEV_DISCONN_UNKNOWN;
3413         }
3414 }
3415
3416 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3417                                      struct sk_buff *skb)
3418 {
3419         struct hci_ev_disconn_complete *ev = data;
3420         u8 reason;
3421         struct hci_conn_params *params;
3422         struct hci_conn *conn;
3423         bool mgmt_connected;
3424
3425         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3426
3427         hci_dev_lock(hdev);
3428
3429         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3430         if (!conn)
3431                 goto unlock;
3432
3433         if (ev->status) {
3434                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3435                                        conn->dst_type, ev->status);
3436                 goto unlock;
3437         }
3438
3439         conn->state = BT_CLOSED;
3440
3441         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3442
3443         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3444                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3445         else
3446                 reason = hci_to_mgmt_reason(ev->reason);
3447
3448         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3449                                 reason, mgmt_connected);
3450
3451         if (conn->type == ACL_LINK) {
3452                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3453                         hci_remove_link_key(hdev, &conn->dst);
3454
3455                 hci_update_scan(hdev);
3456         }
3457
3458         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3459         if (params) {
3460                 switch (params->auto_connect) {
3461                 case HCI_AUTO_CONN_LINK_LOSS:
3462                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3463                                 break;
3464                         fallthrough;
3465
3466                 case HCI_AUTO_CONN_DIRECT:
3467                 case HCI_AUTO_CONN_ALWAYS:
3468                         hci_pend_le_list_del_init(params);
3469                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
3470                         hci_update_passive_scan(hdev);
3471                         break;
3472
3473                 default:
3474                         break;
3475                 }
3476         }
3477
3478         hci_disconn_cfm(conn, ev->reason);
3479
3480         /* Re-enable advertising if necessary, since it might
3481          * have been disabled by the connection. From the
3482          * HCI_LE_Set_Advertise_Enable command description in
3483          * the core specification (v4.0):
3484          * "The Controller shall continue advertising until the Host
3485          * issues an LE_Set_Advertise_Enable command with
3486          * Advertising_Enable set to 0x00 (Advertising is disabled)
3487          * or until a connection is created or until the Advertising
3488          * is timed out due to Directed Advertising."
3489          */
3490         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3491                 hdev->cur_adv_instance = conn->adv_instance;
3492                 hci_enable_advertising(hdev);
3493         }
3494
3495         hci_conn_del(conn);
3496
3497 unlock:
3498         hci_dev_unlock(hdev);
3499 }
3500
3501 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3502                                   struct sk_buff *skb)
3503 {
3504         struct hci_ev_auth_complete *ev = data;
3505         struct hci_conn *conn;
3506
3507         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3508
3509         hci_dev_lock(hdev);
3510
3511         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3512         if (!conn)
3513                 goto unlock;
3514
3515         if (!ev->status) {
3516                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3517                 set_bit(HCI_CONN_AUTH, &conn->flags);
3518                 conn->sec_level = conn->pending_sec_level;
3519         } else {
3520                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3521                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3522
3523                 mgmt_auth_failed(conn, ev->status);
3524         }
3525
3526         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3527
3528         if (conn->state == BT_CONFIG) {
3529                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3530                         struct hci_cp_set_conn_encrypt cp;
3531                         cp.handle  = ev->handle;
3532                         cp.encrypt = 0x01;
3533                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3534                                      &cp);
3535                 } else {
3536                         conn->state = BT_CONNECTED;
3537                         hci_connect_cfm(conn, ev->status);
3538                         hci_conn_drop(conn);
3539                 }
3540         } else {
3541                 hci_auth_cfm(conn, ev->status);
3542
3543                 hci_conn_hold(conn);
3544                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3545                 hci_conn_drop(conn);
3546         }
3547
3548         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3549                 if (!ev->status) {
3550                         struct hci_cp_set_conn_encrypt cp;
3551                         cp.handle  = ev->handle;
3552                         cp.encrypt = 0x01;
3553                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3554                                      &cp);
3555                 } else {
3556                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3557                         hci_encrypt_cfm(conn, ev->status);
3558                 }
3559         }
3560
3561 unlock:
3562         hci_dev_unlock(hdev);
3563 }
3564
3565 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3566                                 struct sk_buff *skb)
3567 {
3568         struct hci_ev_remote_name *ev = data;
3569         struct hci_conn *conn;
3570
3571         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3572
3573         hci_dev_lock(hdev);
3574
3575         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3576
3577         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3578                 goto check_auth;
3579
3580         if (ev->status == 0)
3581                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3582                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3583         else
3584                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3585
3586 check_auth:
3587         if (!conn)
3588                 goto unlock;
3589
3590         if (!hci_outgoing_auth_needed(hdev, conn))
3591                 goto unlock;
3592
3593         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3594                 struct hci_cp_auth_requested cp;
3595
3596                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3597
3598                 cp.handle = __cpu_to_le16(conn->handle);
3599                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3600         }
3601
3602 unlock:
3603         hci_dev_unlock(hdev);
3604 }
3605
3606 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3607                                    struct sk_buff *skb)
3608 {
3609         struct hci_ev_encrypt_change *ev = data;
3610         struct hci_conn *conn;
3611
3612         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3613
3614         hci_dev_lock(hdev);
3615
3616         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3617         if (!conn)
3618                 goto unlock;
3619
3620         if (!ev->status) {
3621                 if (ev->encrypt) {
3622                         /* Encryption implies authentication */
3623                         set_bit(HCI_CONN_AUTH, &conn->flags);
3624                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3625                         conn->sec_level = conn->pending_sec_level;
3626
3627                         /* P-256 authentication key implies FIPS */
3628                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3629                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3630
3631                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3632                             conn->type == LE_LINK)
3633                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3634                 } else {
3635                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3636                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3637                 }
3638         }
3639
3640         /* We should disregard the current RPA and generate a new one
3641          * whenever the encryption procedure fails.
3642          */
3643         if (ev->status && conn->type == LE_LINK) {
3644                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3645                 hci_adv_instances_set_rpa_expired(hdev, true);
3646         }
3647
3648         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3649
3650         /* Check link security requirements are met */
3651         if (!hci_conn_check_link_mode(conn))
3652                 ev->status = HCI_ERROR_AUTH_FAILURE;
3653
3654         if (ev->status && conn->state == BT_CONNECTED) {
3655                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3656                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3657
3658                 /* Notify upper layers so they can cleanup before
3659                  * disconnecting.
3660                  */
3661                 hci_encrypt_cfm(conn, ev->status);
3662                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3663                 hci_conn_drop(conn);
3664                 goto unlock;
3665         }
3666
3667         /* Try reading the encryption key size for encrypted ACL links */
3668         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3669                 struct hci_cp_read_enc_key_size cp;
3670
3671                 /* Only send HCI_Read_Encryption_Key_Size if the
3672                  * controller really supports it. If it doesn't, assume
3673                  * the default size (16).
3674                  */
3675                 if (!read_key_size_capable(hdev)) {
3676                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3677                         goto notify;
3678                 }
3679
3680                 cp.handle = cpu_to_le16(conn->handle);
3681                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3682                                  sizeof(cp), &cp)) {
3683                         bt_dev_err(hdev, "sending read key size failed");
3684                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3685                         goto notify;
3686                 }
3687
3688                 goto unlock;
3689         }
3690
3691         /* Set the default Authenticated Payload Timeout after
3692          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3693          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3694          * sent when the link is active and Encryption is enabled, the conn
3695          * type can be either LE or ACL and controller must support LMP Ping.
3696          * Ensure for AES-CCM encryption as well.
3697          */
3698         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3699             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3700             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3701              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3702                 struct hci_cp_write_auth_payload_to cp;
3703
3704                 cp.handle = cpu_to_le16(conn->handle);
3705                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3706                 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3707                                  sizeof(cp), &cp))
3708                         bt_dev_err(hdev, "write auth payload timeout failed");
3709         }
3710
3711 notify:
3712         hci_encrypt_cfm(conn, ev->status);
3713
3714 unlock:
3715         hci_dev_unlock(hdev);
3716 }
3717
3718 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3719                                              struct sk_buff *skb)
3720 {
3721         struct hci_ev_change_link_key_complete *ev = data;
3722         struct hci_conn *conn;
3723
3724         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3725
3726         hci_dev_lock(hdev);
3727
3728         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3729         if (conn) {
3730                 if (!ev->status)
3731                         set_bit(HCI_CONN_SECURE, &conn->flags);
3732
3733                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3734
3735                 hci_key_change_cfm(conn, ev->status);
3736         }
3737
3738         hci_dev_unlock(hdev);
3739 }
3740
3741 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3742                                     struct sk_buff *skb)
3743 {
3744         struct hci_ev_remote_features *ev = data;
3745         struct hci_conn *conn;
3746
3747         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3748
3749         hci_dev_lock(hdev);
3750
3751         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3752         if (!conn)
3753                 goto unlock;
3754
3755         if (!ev->status)
3756                 memcpy(conn->features[0], ev->features, 8);
3757
3758         if (conn->state != BT_CONFIG)
3759                 goto unlock;
3760
3761         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3762             lmp_ext_feat_capable(conn)) {
3763                 struct hci_cp_read_remote_ext_features cp;
3764                 cp.handle = ev->handle;
3765                 cp.page = 0x01;
3766                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3767                              sizeof(cp), &cp);
3768                 goto unlock;
3769         }
3770
3771         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3772                 struct hci_cp_remote_name_req cp;
3773                 memset(&cp, 0, sizeof(cp));
3774                 bacpy(&cp.bdaddr, &conn->dst);
3775                 cp.pscan_rep_mode = 0x02;
3776                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3777         } else {
3778                 mgmt_device_connected(hdev, conn, NULL, 0);
3779         }
3780
3781         if (!hci_outgoing_auth_needed(hdev, conn)) {
3782                 conn->state = BT_CONNECTED;
3783                 hci_connect_cfm(conn, ev->status);
3784                 hci_conn_drop(conn);
3785         }
3786
3787 unlock:
3788         hci_dev_unlock(hdev);
3789 }
3790
3791 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3792 {
3793         cancel_delayed_work(&hdev->cmd_timer);
3794
3795         rcu_read_lock();
3796         if (!test_bit(HCI_RESET, &hdev->flags)) {
3797                 if (ncmd) {
3798                         cancel_delayed_work(&hdev->ncmd_timer);
3799                         atomic_set(&hdev->cmd_cnt, 1);
3800                 } else {
3801                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3802                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3803                                                    HCI_NCMD_TIMEOUT);
3804                 }
3805         }
3806         rcu_read_unlock();
3807 }
3808
3809 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3810                                         struct sk_buff *skb)
3811 {
3812         struct hci_rp_le_read_buffer_size_v2 *rp = data;
3813
3814         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3815
3816         if (rp->status)
3817                 return rp->status;
3818
3819         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3820         hdev->le_pkts  = rp->acl_max_pkt;
3821         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3822         hdev->iso_pkts = rp->iso_max_pkt;
3823
3824         hdev->le_cnt  = hdev->le_pkts;
3825         hdev->iso_cnt = hdev->iso_pkts;
3826
3827         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3828                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3829
3830         if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3831                 return HCI_ERROR_INVALID_PARAMETERS;
3832
3833         return rp->status;
3834 }
3835
3836 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3837 {
3838         struct hci_conn *conn, *tmp;
3839
3840         lockdep_assert_held(&hdev->lock);
3841
3842         list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3843                 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3844                     conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3845                         continue;
3846
3847                 if (HCI_CONN_HANDLE_UNSET(conn->handle))
3848                         hci_conn_failed(conn, status);
3849         }
3850 }
3851
3852 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3853                                    struct sk_buff *skb)
3854 {
3855         struct hci_rp_le_set_cig_params *rp = data;
3856         struct hci_cp_le_set_cig_params *cp;
3857         struct hci_conn *conn;
3858         u8 status = rp->status;
3859         bool pending = false;
3860         int i;
3861
3862         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3863
3864         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3865         if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3866                             rp->cig_id != cp->cig_id)) {
3867                 bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3868                 status = HCI_ERROR_UNSPECIFIED;
3869         }
3870
3871         hci_dev_lock(hdev);
3872
3873         /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3874          *
3875          * If the Status return parameter is non-zero, then the state of the CIG
3876          * and its CIS configurations shall not be changed by the command. If
3877          * the CIG did not already exist, it shall not be created.
3878          */
3879         if (status) {
3880                 /* Keep current configuration, fail only the unbound CIS */
3881                 hci_unbound_cis_failed(hdev, rp->cig_id, status);
3882                 goto unlock;
3883         }
3884
3885         /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3886          *
3887          * If the Status return parameter is zero, then the Controller shall
3888          * set the Connection_Handle arrayed return parameter to the connection
3889          * handle(s) corresponding to the CIS configurations specified in
3890          * the CIS_IDs command parameter, in the same order.
3891          */
3892         for (i = 0; i < rp->num_handles; ++i) {
3893                 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3894                                                 cp->cis[i].cis_id);
3895                 if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3896                         continue;
3897
3898                 if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3899                         continue;
3900
3901                 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3902                         continue;
3903
3904                 if (conn->state == BT_CONNECT)
3905                         pending = true;
3906         }
3907
3908 unlock:
3909         if (pending)
3910                 hci_le_create_cis_pending(hdev);
3911
3912         hci_dev_unlock(hdev);
3913
3914         return rp->status;
3915 }
3916
3917 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3918                                    struct sk_buff *skb)
3919 {
3920         struct hci_rp_le_setup_iso_path *rp = data;
3921         struct hci_cp_le_setup_iso_path *cp;
3922         struct hci_conn *conn;
3923
3924         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3925
3926         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3927         if (!cp)
3928                 return rp->status;
3929
3930         hci_dev_lock(hdev);
3931
3932         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3933         if (!conn)
3934                 goto unlock;
3935
3936         if (rp->status) {
3937                 hci_connect_cfm(conn, rp->status);
3938                 hci_conn_del(conn);
3939                 goto unlock;
3940         }
3941
3942         switch (cp->direction) {
3943         /* Input (Host to Controller) */
3944         case 0x00:
3945                 /* Only confirm connection if output only */
3946                 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3947                         hci_connect_cfm(conn, rp->status);
3948                 break;
3949         /* Output (Controller to Host) */
3950         case 0x01:
3951                 /* Confirm connection since conn->iso_qos is always configured
3952                  * last.
3953                  */
3954                 hci_connect_cfm(conn, rp->status);
3955
3956                 /* Notify device connected in case it is a BIG Sync */
3957                 if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3958                         mgmt_device_connected(hdev, conn, NULL, 0);
3959
3960                 break;
3961         }
3962
3963 unlock:
3964         hci_dev_unlock(hdev);
3965         return rp->status;
3966 }
3967
3968 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3969 {
3970         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3971 }
3972
3973 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3974                                    struct sk_buff *skb)
3975 {
3976         struct hci_ev_status *rp = data;
3977         struct hci_cp_le_set_per_adv_params *cp;
3978
3979         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3980
3981         if (rp->status)
3982                 return rp->status;
3983
3984         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3985         if (!cp)
3986                 return rp->status;
3987
3988         /* TODO: set the conn state */
3989         return rp->status;
3990 }
3991
3992 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3993                                        struct sk_buff *skb)
3994 {
3995         struct hci_ev_status *rp = data;
3996         struct hci_cp_le_set_per_adv_enable *cp;
3997         struct adv_info *adv = NULL, *n;
3998         u8 per_adv_cnt = 0;
3999
4000         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4001
4002         if (rp->status)
4003                 return rp->status;
4004
4005         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4006         if (!cp)
4007                 return rp->status;
4008
4009         hci_dev_lock(hdev);
4010
4011         adv = hci_find_adv_instance(hdev, cp->handle);
4012
4013         if (cp->enable) {
4014                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4015
4016                 if (adv)
4017                         adv->enabled = true;
4018         } else {
4019                 /* If just one instance was disabled check if there are
4020                  * any other instance enabled before clearing HCI_LE_PER_ADV.
4021                  * The current periodic adv instance will be marked as
4022                  * disabled once extended advertising is also disabled.
4023                  */
4024                 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4025                                          list) {
4026                         if (adv->periodic && adv->enabled)
4027                                 per_adv_cnt++;
4028                 }
4029
4030                 if (per_adv_cnt > 1)
4031                         goto unlock;
4032
4033                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4034         }
4035
4036 unlock:
4037         hci_dev_unlock(hdev);
4038
4039         return rp->status;
4040 }
4041
4042 #define HCI_CC_VL(_op, _func, _min, _max) \
4043 { \
4044         .op = _op, \
4045         .func = _func, \
4046         .min_len = _min, \
4047         .max_len = _max, \
4048 }
4049
4050 #define HCI_CC(_op, _func, _len) \
4051         HCI_CC_VL(_op, _func, _len, _len)
4052
4053 #define HCI_CC_STATUS(_op, _func) \
4054         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4055
4056 static const struct hci_cc {
4057         u16  op;
4058         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4059         u16  min_len;
4060         u16  max_len;
4061 } hci_cc_table[] = {
4062         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4063         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4064         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4065         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4066                       hci_cc_remote_name_req_cancel),
4067         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4068                sizeof(struct hci_rp_role_discovery)),
4069         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4070                sizeof(struct hci_rp_read_link_policy)),
4071         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4072                sizeof(struct hci_rp_write_link_policy)),
4073         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4074                sizeof(struct hci_rp_read_def_link_policy)),
4075         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4076                       hci_cc_write_def_link_policy),
4077         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4078         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4079                sizeof(struct hci_rp_read_stored_link_key)),
4080         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4081                sizeof(struct hci_rp_delete_stored_link_key)),
4082         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4083         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4084                sizeof(struct hci_rp_read_local_name)),
4085         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4086         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4087         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4088         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4089         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4090                sizeof(struct hci_rp_read_class_of_dev)),
4091         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4092         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4093                sizeof(struct hci_rp_read_voice_setting)),
4094         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4095         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4096                sizeof(struct hci_rp_read_num_supported_iac)),
4097         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4098         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4099         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4100                sizeof(struct hci_rp_read_auth_payload_to)),
4101         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4102                sizeof(struct hci_rp_write_auth_payload_to)),
4103         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4104                sizeof(struct hci_rp_read_local_version)),
4105         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4106                sizeof(struct hci_rp_read_local_commands)),
4107         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4108                sizeof(struct hci_rp_read_local_features)),
4109         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4110                sizeof(struct hci_rp_read_local_ext_features)),
4111         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4112                sizeof(struct hci_rp_read_buffer_size)),
4113         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4114                sizeof(struct hci_rp_read_bd_addr)),
4115         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4116                sizeof(struct hci_rp_read_local_pairing_opts)),
4117         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4118                sizeof(struct hci_rp_read_page_scan_activity)),
4119         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4120                       hci_cc_write_page_scan_activity),
4121         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4122                sizeof(struct hci_rp_read_page_scan_type)),
4123         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4124         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4125                sizeof(struct hci_rp_read_data_block_size)),
4126         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4127                sizeof(struct hci_rp_read_flow_control_mode)),
4128         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4129                sizeof(struct hci_rp_read_local_amp_info)),
4130         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4131                sizeof(struct hci_rp_read_clock)),
4132         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4133                sizeof(struct hci_rp_read_enc_key_size)),
4134         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4135                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4136         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4137                hci_cc_read_def_err_data_reporting,
4138                sizeof(struct hci_rp_read_def_err_data_reporting)),
4139         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4140                       hci_cc_write_def_err_data_reporting),
4141         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4142                sizeof(struct hci_rp_pin_code_reply)),
4143         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4144                sizeof(struct hci_rp_pin_code_neg_reply)),
4145         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4146                sizeof(struct hci_rp_read_local_oob_data)),
4147         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4148                sizeof(struct hci_rp_read_local_oob_ext_data)),
4149         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4150                sizeof(struct hci_rp_le_read_buffer_size)),
4151         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4152                sizeof(struct hci_rp_le_read_local_features)),
4153         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4154                sizeof(struct hci_rp_le_read_adv_tx_power)),
4155         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4156                sizeof(struct hci_rp_user_confirm_reply)),
4157         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4158                sizeof(struct hci_rp_user_confirm_reply)),
4159         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4160                sizeof(struct hci_rp_user_confirm_reply)),
4161         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4162                sizeof(struct hci_rp_user_confirm_reply)),
4163         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4164         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4165         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4166         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4167         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4168                hci_cc_le_read_accept_list_size,
4169                sizeof(struct hci_rp_le_read_accept_list_size)),
4170         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4171         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4172                       hci_cc_le_add_to_accept_list),
4173         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4174                       hci_cc_le_del_from_accept_list),
4175         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4176                sizeof(struct hci_rp_le_read_supported_states)),
4177         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4178                sizeof(struct hci_rp_le_read_def_data_len)),
4179         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4180                       hci_cc_le_write_def_data_len),
4181         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4182                       hci_cc_le_add_to_resolv_list),
4183         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4184                       hci_cc_le_del_from_resolv_list),
4185         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4186                       hci_cc_le_clear_resolv_list),
4187         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4188                sizeof(struct hci_rp_le_read_resolv_list_size)),
4189         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4190                       hci_cc_le_set_addr_resolution_enable),
4191         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4192                sizeof(struct hci_rp_le_read_max_data_len)),
4193         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4194                       hci_cc_write_le_host_supported),
4195         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4196         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4197                sizeof(struct hci_rp_read_rssi)),
4198         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4199                sizeof(struct hci_rp_read_tx_power)),
4200         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4201         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4202                       hci_cc_le_set_ext_scan_param),
4203         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4204                       hci_cc_le_set_ext_scan_enable),
4205         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4206         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4207                hci_cc_le_read_num_adv_sets,
4208                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4209         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4210                sizeof(struct hci_rp_le_set_ext_adv_params)),
4211         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4212                       hci_cc_le_set_ext_adv_enable),
4213         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4214                       hci_cc_le_set_adv_set_random_addr),
4215         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4216         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4217         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4218         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4219                       hci_cc_le_set_per_adv_enable),
4220         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4221                sizeof(struct hci_rp_le_read_transmit_power)),
4222         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4223         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4224                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4225         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4226                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4227         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4228                sizeof(struct hci_rp_le_setup_iso_path)),
4229 };
4230
4231 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4232                       struct sk_buff *skb)
4233 {
4234         void *data;
4235
4236         if (skb->len < cc->min_len) {
4237                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4238                            cc->op, skb->len, cc->min_len);
4239                 return HCI_ERROR_UNSPECIFIED;
4240         }
4241
4242         /* Just warn if the length is over max_len size it still be possible to
4243          * partially parse the cc so leave to callback to decide if that is
4244          * acceptable.
4245          */
4246         if (skb->len > cc->max_len)
4247                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4248                             cc->op, skb->len, cc->max_len);
4249
4250         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4251         if (!data)
4252                 return HCI_ERROR_UNSPECIFIED;
4253
4254         return cc->func(hdev, data, skb);
4255 }
4256
4257 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4258                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4259                                  hci_req_complete_t *req_complete,
4260                                  hci_req_complete_skb_t *req_complete_skb)
4261 {
4262         struct hci_ev_cmd_complete *ev = data;
4263         int i;
4264
4265         *opcode = __le16_to_cpu(ev->opcode);
4266
4267         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4268
4269         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4270                 if (hci_cc_table[i].op == *opcode) {
4271                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4272                         break;
4273                 }
4274         }
4275
4276         if (i == ARRAY_SIZE(hci_cc_table)) {
4277                 /* Unknown opcode, assume byte 0 contains the status, so
4278                  * that e.g. __hci_cmd_sync() properly returns errors
4279                  * for vendor specific commands send by HCI drivers.
4280                  * If a vendor doesn't actually follow this convention we may
4281                  * need to introduce a vendor CC table in order to properly set
4282                  * the status.
4283                  */
4284                 *status = skb->data[0];
4285         }
4286
4287         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4288
4289         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4290                              req_complete_skb);
4291
4292         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4293                 bt_dev_err(hdev,
4294                            "unexpected event for opcode 0x%4.4x", *opcode);
4295                 return;
4296         }
4297
4298         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4299                 queue_work(hdev->workqueue, &hdev->cmd_work);
4300 }
4301
4302 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4303 {
4304         struct hci_cp_le_create_cis *cp;
4305         bool pending = false;
4306         int i;
4307
4308         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4309
4310         if (!status)
4311                 return;
4312
4313         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4314         if (!cp)
4315                 return;
4316
4317         hci_dev_lock(hdev);
4318
4319         /* Remove connection if command failed */
4320         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4321                 struct hci_conn *conn;
4322                 u16 handle;
4323
4324                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4325
4326                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4327                 if (conn) {
4328                         if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4329                                                &conn->flags))
4330                                 pending = true;
4331                         conn->state = BT_CLOSED;
4332                         hci_connect_cfm(conn, status);
4333                         hci_conn_del(conn);
4334                 }
4335         }
4336
4337         if (pending)
4338                 hci_le_create_cis_pending(hdev);
4339
4340         hci_dev_unlock(hdev);
4341 }
4342
4343 #define HCI_CS(_op, _func) \
4344 { \
4345         .op = _op, \
4346         .func = _func, \
4347 }
4348
4349 static const struct hci_cs {
4350         u16  op;
4351         void (*func)(struct hci_dev *hdev, __u8 status);
4352 } hci_cs_table[] = {
4353         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4354         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4355         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4356         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4357         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4358         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4359         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4360         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4361         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4362                hci_cs_read_remote_ext_features),
4363         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4364         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4365                hci_cs_enhanced_setup_sync_conn),
4366         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4367         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4368         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4369         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4370         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4371         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4372         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4373         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4374         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4375 };
4376
4377 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4378                                struct sk_buff *skb, u16 *opcode, u8 *status,
4379                                hci_req_complete_t *req_complete,
4380                                hci_req_complete_skb_t *req_complete_skb)
4381 {
4382         struct hci_ev_cmd_status *ev = data;
4383         int i;
4384
4385         *opcode = __le16_to_cpu(ev->opcode);
4386         *status = ev->status;
4387
4388         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4389
4390         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4391                 if (hci_cs_table[i].op == *opcode) {
4392                         hci_cs_table[i].func(hdev, ev->status);
4393                         break;
4394                 }
4395         }
4396
4397         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4398
4399         /* Indicate request completion if the command failed. Also, if
4400          * we're not waiting for a special event and we get a success
4401          * command status we should try to flag the request as completed
4402          * (since for this kind of commands there will not be a command
4403          * complete event).
4404          */
4405         if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4406                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4407                                      req_complete_skb);
4408                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4409                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4410                                    *opcode);
4411                         return;
4412                 }
4413         }
4414
4415         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4416                 queue_work(hdev->workqueue, &hdev->cmd_work);
4417 }
4418
4419 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4420                                    struct sk_buff *skb)
4421 {
4422         struct hci_ev_hardware_error *ev = data;
4423
4424         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4425
4426         hdev->hw_error_code = ev->code;
4427
4428         queue_work(hdev->req_workqueue, &hdev->error_reset);
4429 }
4430
4431 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4432                                 struct sk_buff *skb)
4433 {
4434         struct hci_ev_role_change *ev = data;
4435         struct hci_conn *conn;
4436
4437         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4438
4439         hci_dev_lock(hdev);
4440
4441         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4442         if (conn) {
4443                 if (!ev->status)
4444                         conn->role = ev->role;
4445
4446                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4447
4448                 hci_role_switch_cfm(conn, ev->status, ev->role);
4449         }
4450
4451         hci_dev_unlock(hdev);
4452 }
4453
4454 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4455                                   struct sk_buff *skb)
4456 {
4457         struct hci_ev_num_comp_pkts *ev = data;
4458         int i;
4459
4460         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4461                              flex_array_size(ev, handles, ev->num)))
4462                 return;
4463
4464         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4465                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4466                 return;
4467         }
4468
4469         bt_dev_dbg(hdev, "num %d", ev->num);
4470
4471         for (i = 0; i < ev->num; i++) {
4472                 struct hci_comp_pkts_info *info = &ev->handles[i];
4473                 struct hci_conn *conn;
4474                 __u16  handle, count;
4475
4476                 handle = __le16_to_cpu(info->handle);
4477                 count  = __le16_to_cpu(info->count);
4478
4479                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4480                 if (!conn)
4481                         continue;
4482
4483                 conn->sent -= count;
4484
4485                 switch (conn->type) {
4486                 case ACL_LINK:
4487                         hdev->acl_cnt += count;
4488                         if (hdev->acl_cnt > hdev->acl_pkts)
4489                                 hdev->acl_cnt = hdev->acl_pkts;
4490                         break;
4491
4492                 case LE_LINK:
4493                         if (hdev->le_pkts) {
4494                                 hdev->le_cnt += count;
4495                                 if (hdev->le_cnt > hdev->le_pkts)
4496                                         hdev->le_cnt = hdev->le_pkts;
4497                         } else {
4498                                 hdev->acl_cnt += count;
4499                                 if (hdev->acl_cnt > hdev->acl_pkts)
4500                                         hdev->acl_cnt = hdev->acl_pkts;
4501                         }
4502                         break;
4503
4504                 case SCO_LINK:
4505                         hdev->sco_cnt += count;
4506                         if (hdev->sco_cnt > hdev->sco_pkts)
4507                                 hdev->sco_cnt = hdev->sco_pkts;
4508                         break;
4509
4510                 case ISO_LINK:
4511                         if (hdev->iso_pkts) {
4512                                 hdev->iso_cnt += count;
4513                                 if (hdev->iso_cnt > hdev->iso_pkts)
4514                                         hdev->iso_cnt = hdev->iso_pkts;
4515                         } else if (hdev->le_pkts) {
4516                                 hdev->le_cnt += count;
4517                                 if (hdev->le_cnt > hdev->le_pkts)
4518                                         hdev->le_cnt = hdev->le_pkts;
4519                         } else {
4520                                 hdev->acl_cnt += count;
4521                                 if (hdev->acl_cnt > hdev->acl_pkts)
4522                                         hdev->acl_cnt = hdev->acl_pkts;
4523                         }
4524                         break;
4525
4526                 default:
4527                         bt_dev_err(hdev, "unknown type %d conn %p",
4528                                    conn->type, conn);
4529                         break;
4530                 }
4531         }
4532
4533         queue_work(hdev->workqueue, &hdev->tx_work);
4534 }
4535
4536 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4537                                                  __u16 handle)
4538 {
4539         struct hci_chan *chan;
4540
4541         switch (hdev->dev_type) {
4542         case HCI_PRIMARY:
4543                 return hci_conn_hash_lookup_handle(hdev, handle);
4544         case HCI_AMP:
4545                 chan = hci_chan_lookup_handle(hdev, handle);
4546                 if (chan)
4547                         return chan->conn;
4548                 break;
4549         default:
4550                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4551                 break;
4552         }
4553
4554         return NULL;
4555 }
4556
4557 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4558                                     struct sk_buff *skb)
4559 {
4560         struct hci_ev_num_comp_blocks *ev = data;
4561         int i;
4562
4563         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4564                              flex_array_size(ev, handles, ev->num_hndl)))
4565                 return;
4566
4567         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4568                 bt_dev_err(hdev, "wrong event for mode %d",
4569                            hdev->flow_ctl_mode);
4570                 return;
4571         }
4572
4573         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4574                    ev->num_hndl);
4575
4576         for (i = 0; i < ev->num_hndl; i++) {
4577                 struct hci_comp_blocks_info *info = &ev->handles[i];
4578                 struct hci_conn *conn = NULL;
4579                 __u16  handle, block_count;
4580
4581                 handle = __le16_to_cpu(info->handle);
4582                 block_count = __le16_to_cpu(info->blocks);
4583
4584                 conn = __hci_conn_lookup_handle(hdev, handle);
4585                 if (!conn)
4586                         continue;
4587
4588                 conn->sent -= block_count;
4589
4590                 switch (conn->type) {
4591                 case ACL_LINK:
4592                 case AMP_LINK:
4593                         hdev->block_cnt += block_count;
4594                         if (hdev->block_cnt > hdev->num_blocks)
4595                                 hdev->block_cnt = hdev->num_blocks;
4596                         break;
4597
4598                 default:
4599                         bt_dev_err(hdev, "unknown type %d conn %p",
4600                                    conn->type, conn);
4601                         break;
4602                 }
4603         }
4604
4605         queue_work(hdev->workqueue, &hdev->tx_work);
4606 }
4607
4608 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4609                                 struct sk_buff *skb)
4610 {
4611         struct hci_ev_mode_change *ev = data;
4612         struct hci_conn *conn;
4613
4614         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4615
4616         hci_dev_lock(hdev);
4617
4618         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4619         if (conn) {
4620                 conn->mode = ev->mode;
4621
4622                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4623                                         &conn->flags)) {
4624                         if (conn->mode == HCI_CM_ACTIVE)
4625                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4626                         else
4627                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4628                 }
4629
4630                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4631                         hci_sco_setup(conn, ev->status);
4632         }
4633
4634         hci_dev_unlock(hdev);
4635 }
4636
4637 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4638                                      struct sk_buff *skb)
4639 {
4640         struct hci_ev_pin_code_req *ev = data;
4641         struct hci_conn *conn;
4642
4643         bt_dev_dbg(hdev, "");
4644
4645         hci_dev_lock(hdev);
4646
4647         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4648         if (!conn)
4649                 goto unlock;
4650
4651         if (conn->state == BT_CONNECTED) {
4652                 hci_conn_hold(conn);
4653                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4654                 hci_conn_drop(conn);
4655         }
4656
4657         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4658             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4659                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4660                              sizeof(ev->bdaddr), &ev->bdaddr);
4661         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4662                 u8 secure;
4663
4664                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4665                         secure = 1;
4666                 else
4667                         secure = 0;
4668
4669                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4670         }
4671
4672 unlock:
4673         hci_dev_unlock(hdev);
4674 }
4675
4676 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4677 {
4678         if (key_type == HCI_LK_CHANGED_COMBINATION)
4679                 return;
4680
4681         conn->pin_length = pin_len;
4682         conn->key_type = key_type;
4683
4684         switch (key_type) {
4685         case HCI_LK_LOCAL_UNIT:
4686         case HCI_LK_REMOTE_UNIT:
4687         case HCI_LK_DEBUG_COMBINATION:
4688                 return;
4689         case HCI_LK_COMBINATION:
4690                 if (pin_len == 16)
4691                         conn->pending_sec_level = BT_SECURITY_HIGH;
4692                 else
4693                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4694                 break;
4695         case HCI_LK_UNAUTH_COMBINATION_P192:
4696         case HCI_LK_UNAUTH_COMBINATION_P256:
4697                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4698                 break;
4699         case HCI_LK_AUTH_COMBINATION_P192:
4700                 conn->pending_sec_level = BT_SECURITY_HIGH;
4701                 break;
4702         case HCI_LK_AUTH_COMBINATION_P256:
4703                 conn->pending_sec_level = BT_SECURITY_FIPS;
4704                 break;
4705         }
4706 }
4707
4708 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4709                                      struct sk_buff *skb)
4710 {
4711         struct hci_ev_link_key_req *ev = data;
4712         struct hci_cp_link_key_reply cp;
4713         struct hci_conn *conn;
4714         struct link_key *key;
4715
4716         bt_dev_dbg(hdev, "");
4717
4718         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4719                 return;
4720
4721         hci_dev_lock(hdev);
4722
4723         key = hci_find_link_key(hdev, &ev->bdaddr);
4724         if (!key) {
4725                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4726                 goto not_found;
4727         }
4728
4729         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4730
4731         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4732         if (conn) {
4733                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4734
4735                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4736                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4737                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4738                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4739                         goto not_found;
4740                 }
4741
4742                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4743                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4744                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4745                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4746                         goto not_found;
4747                 }
4748
4749                 conn_set_key(conn, key->type, key->pin_len);
4750         }
4751
4752         bacpy(&cp.bdaddr, &ev->bdaddr);
4753         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4754
4755         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4756
4757         hci_dev_unlock(hdev);
4758
4759         return;
4760
4761 not_found:
4762         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4763         hci_dev_unlock(hdev);
4764 }
4765
4766 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4767                                     struct sk_buff *skb)
4768 {
4769         struct hci_ev_link_key_notify *ev = data;
4770         struct hci_conn *conn;
4771         struct link_key *key;
4772         bool persistent;
4773         u8 pin_len = 0;
4774
4775         bt_dev_dbg(hdev, "");
4776
4777         hci_dev_lock(hdev);
4778
4779         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4780         if (!conn)
4781                 goto unlock;
4782
4783         /* Ignore NULL link key against CVE-2020-26555 */
4784         if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4785                 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4786                            &ev->bdaddr);
4787                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4788                 hci_conn_drop(conn);
4789                 goto unlock;
4790         }
4791
4792         hci_conn_hold(conn);
4793         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4794         hci_conn_drop(conn);
4795
4796         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4797         conn_set_key(conn, ev->key_type, conn->pin_length);
4798
4799         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4800                 goto unlock;
4801
4802         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4803                                 ev->key_type, pin_len, &persistent);
4804         if (!key)
4805                 goto unlock;
4806
4807         /* Update connection information since adding the key will have
4808          * fixed up the type in the case of changed combination keys.
4809          */
4810         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4811                 conn_set_key(conn, key->type, key->pin_len);
4812
4813         mgmt_new_link_key(hdev, key, persistent);
4814
4815         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4816          * is set. If it's not set simply remove the key from the kernel
4817          * list (we've still notified user space about it but with
4818          * store_hint being 0).
4819          */
4820         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4821             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4822                 list_del_rcu(&key->list);
4823                 kfree_rcu(key, rcu);
4824                 goto unlock;
4825         }
4826
4827         if (persistent)
4828                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4829         else
4830                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4831
4832 unlock:
4833         hci_dev_unlock(hdev);
4834 }
4835
4836 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4837                                  struct sk_buff *skb)
4838 {
4839         struct hci_ev_clock_offset *ev = data;
4840         struct hci_conn *conn;
4841
4842         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4843
4844         hci_dev_lock(hdev);
4845
4846         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4847         if (conn && !ev->status) {
4848                 struct inquiry_entry *ie;
4849
4850                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4851                 if (ie) {
4852                         ie->data.clock_offset = ev->clock_offset;
4853                         ie->timestamp = jiffies;
4854                 }
4855         }
4856
4857         hci_dev_unlock(hdev);
4858 }
4859
4860 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4861                                     struct sk_buff *skb)
4862 {
4863         struct hci_ev_pkt_type_change *ev = data;
4864         struct hci_conn *conn;
4865
4866         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4867
4868         hci_dev_lock(hdev);
4869
4870         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4871         if (conn && !ev->status)
4872                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4873
4874         hci_dev_unlock(hdev);
4875 }
4876
4877 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4878                                    struct sk_buff *skb)
4879 {
4880         struct hci_ev_pscan_rep_mode *ev = data;
4881         struct inquiry_entry *ie;
4882
4883         bt_dev_dbg(hdev, "");
4884
4885         hci_dev_lock(hdev);
4886
4887         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4888         if (ie) {
4889                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4890                 ie->timestamp = jiffies;
4891         }
4892
4893         hci_dev_unlock(hdev);
4894 }
4895
4896 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4897                                              struct sk_buff *skb)
4898 {
4899         struct hci_ev_inquiry_result_rssi *ev = edata;
4900         struct inquiry_data data;
4901         int i;
4902
4903         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4904
4905         if (!ev->num)
4906                 return;
4907
4908         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4909                 return;
4910
4911         hci_dev_lock(hdev);
4912
4913         if (skb->len == array_size(ev->num,
4914                                    sizeof(struct inquiry_info_rssi_pscan))) {
4915                 struct inquiry_info_rssi_pscan *info;
4916
4917                 for (i = 0; i < ev->num; i++) {
4918                         u32 flags;
4919
4920                         info = hci_ev_skb_pull(hdev, skb,
4921                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4922                                                sizeof(*info));
4923                         if (!info) {
4924                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4925                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4926                                 goto unlock;
4927                         }
4928
4929                         bacpy(&data.bdaddr, &info->bdaddr);
4930                         data.pscan_rep_mode     = info->pscan_rep_mode;
4931                         data.pscan_period_mode  = info->pscan_period_mode;
4932                         data.pscan_mode         = info->pscan_mode;
4933                         memcpy(data.dev_class, info->dev_class, 3);
4934                         data.clock_offset       = info->clock_offset;
4935                         data.rssi               = info->rssi;
4936                         data.ssp_mode           = 0x00;
4937
4938                         flags = hci_inquiry_cache_update(hdev, &data, false);
4939
4940                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4941                                           info->dev_class, info->rssi,
4942                                           flags, NULL, 0, NULL, 0, 0);
4943                 }
4944         } else if (skb->len == array_size(ev->num,
4945                                           sizeof(struct inquiry_info_rssi))) {
4946                 struct inquiry_info_rssi *info;
4947
4948                 for (i = 0; i < ev->num; i++) {
4949                         u32 flags;
4950
4951                         info = hci_ev_skb_pull(hdev, skb,
4952                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4953                                                sizeof(*info));
4954                         if (!info) {
4955                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4956                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4957                                 goto unlock;
4958                         }
4959
4960                         bacpy(&data.bdaddr, &info->bdaddr);
4961                         data.pscan_rep_mode     = info->pscan_rep_mode;
4962                         data.pscan_period_mode  = info->pscan_period_mode;
4963                         data.pscan_mode         = 0x00;
4964                         memcpy(data.dev_class, info->dev_class, 3);
4965                         data.clock_offset       = info->clock_offset;
4966                         data.rssi               = info->rssi;
4967                         data.ssp_mode           = 0x00;
4968
4969                         flags = hci_inquiry_cache_update(hdev, &data, false);
4970
4971                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4972                                           info->dev_class, info->rssi,
4973                                           flags, NULL, 0, NULL, 0, 0);
4974                 }
4975         } else {
4976                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4977                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4978         }
4979 unlock:
4980         hci_dev_unlock(hdev);
4981 }
4982
4983 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4984                                         struct sk_buff *skb)
4985 {
4986         struct hci_ev_remote_ext_features *ev = data;
4987         struct hci_conn *conn;
4988
4989         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4990
4991         hci_dev_lock(hdev);
4992
4993         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4994         if (!conn)
4995                 goto unlock;
4996
4997         if (ev->page < HCI_MAX_PAGES)
4998                 memcpy(conn->features[ev->page], ev->features, 8);
4999
5000         if (!ev->status && ev->page == 0x01) {
5001                 struct inquiry_entry *ie;
5002
5003                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5004                 if (ie)
5005                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5006
5007                 if (ev->features[0] & LMP_HOST_SSP) {
5008                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5009                 } else {
5010                         /* It is mandatory by the Bluetooth specification that
5011                          * Extended Inquiry Results are only used when Secure
5012                          * Simple Pairing is enabled, but some devices violate
5013                          * this.
5014                          *
5015                          * To make these devices work, the internal SSP
5016                          * enabled flag needs to be cleared if the remote host
5017                          * features do not indicate SSP support */
5018                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5019                 }
5020
5021                 if (ev->features[0] & LMP_HOST_SC)
5022                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5023         }
5024
5025         if (conn->state != BT_CONFIG)
5026                 goto unlock;
5027
5028         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5029                 struct hci_cp_remote_name_req cp;
5030                 memset(&cp, 0, sizeof(cp));
5031                 bacpy(&cp.bdaddr, &conn->dst);
5032                 cp.pscan_rep_mode = 0x02;
5033                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5034         } else {
5035                 mgmt_device_connected(hdev, conn, NULL, 0);
5036         }
5037
5038         if (!hci_outgoing_auth_needed(hdev, conn)) {
5039                 conn->state = BT_CONNECTED;
5040                 hci_connect_cfm(conn, ev->status);
5041                 hci_conn_drop(conn);
5042         }
5043
5044 unlock:
5045         hci_dev_unlock(hdev);
5046 }
5047
5048 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5049                                        struct sk_buff *skb)
5050 {
5051         struct hci_ev_sync_conn_complete *ev = data;
5052         struct hci_conn *conn;
5053         u8 status = ev->status;
5054
5055         switch (ev->link_type) {
5056         case SCO_LINK:
5057         case ESCO_LINK:
5058                 break;
5059         default:
5060                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5061                  * for HCI_Synchronous_Connection_Complete is limited to
5062                  * either SCO or eSCO
5063                  */
5064                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5065                 return;
5066         }
5067
5068         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5069
5070         hci_dev_lock(hdev);
5071
5072         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5073         if (!conn) {
5074                 if (ev->link_type == ESCO_LINK)
5075                         goto unlock;
5076
5077                 /* When the link type in the event indicates SCO connection
5078                  * and lookup of the connection object fails, then check
5079                  * if an eSCO connection object exists.
5080                  *
5081                  * The core limits the synchronous connections to either
5082                  * SCO or eSCO. The eSCO connection is preferred and tried
5083                  * to be setup first and until successfully established,
5084                  * the link type will be hinted as eSCO.
5085                  */
5086                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5087                 if (!conn)
5088                         goto unlock;
5089         }
5090
5091         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5092          * Processing it more than once per connection can corrupt kernel memory.
5093          *
5094          * As the connection handle is set here for the first time, it indicates
5095          * whether the connection is already set up.
5096          */
5097         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5098                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5099                 goto unlock;
5100         }
5101
5102         switch (status) {
5103         case 0x00:
5104                 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5105                 if (status) {
5106                         conn->state = BT_CLOSED;
5107                         break;
5108                 }
5109
5110                 conn->state  = BT_CONNECTED;
5111                 conn->type   = ev->link_type;
5112
5113                 hci_debugfs_create_conn(conn);
5114                 hci_conn_add_sysfs(conn);
5115                 break;
5116
5117         case 0x10:      /* Connection Accept Timeout */
5118         case 0x0d:      /* Connection Rejected due to Limited Resources */
5119         case 0x11:      /* Unsupported Feature or Parameter Value */
5120         case 0x1c:      /* SCO interval rejected */
5121         case 0x1a:      /* Unsupported Remote Feature */
5122         case 0x1e:      /* Invalid LMP Parameters */
5123         case 0x1f:      /* Unspecified error */
5124         case 0x20:      /* Unsupported LMP Parameter value */
5125                 if (conn->out) {
5126                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5127                                         (hdev->esco_type & EDR_ESCO_MASK);
5128                         if (hci_setup_sync(conn, conn->parent->handle))
5129                                 goto unlock;
5130                 }
5131                 fallthrough;
5132
5133         default:
5134                 conn->state = BT_CLOSED;
5135                 break;
5136         }
5137
5138         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5139         /* Notify only in case of SCO over HCI transport data path which
5140          * is zero and non-zero value shall be non-HCI transport data path
5141          */
5142         if (conn->codec.data_path == 0 && hdev->notify) {
5143                 switch (ev->air_mode) {
5144                 case 0x02:
5145                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5146                         break;
5147                 case 0x03:
5148                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5149                         break;
5150                 }
5151         }
5152
5153         hci_connect_cfm(conn, status);
5154         if (status)
5155                 hci_conn_del(conn);
5156
5157 unlock:
5158         hci_dev_unlock(hdev);
5159 }
5160
5161 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5162 {
5163         size_t parsed = 0;
5164
5165         while (parsed < eir_len) {
5166                 u8 field_len = eir[0];
5167
5168                 if (field_len == 0)
5169                         return parsed;
5170
5171                 parsed += field_len + 1;
5172                 eir += field_len + 1;
5173         }
5174
5175         return eir_len;
5176 }
5177
5178 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5179                                             struct sk_buff *skb)
5180 {
5181         struct hci_ev_ext_inquiry_result *ev = edata;
5182         struct inquiry_data data;
5183         size_t eir_len;
5184         int i;
5185
5186         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5187                              flex_array_size(ev, info, ev->num)))
5188                 return;
5189
5190         bt_dev_dbg(hdev, "num %d", ev->num);
5191
5192         if (!ev->num)
5193                 return;
5194
5195         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5196                 return;
5197
5198         hci_dev_lock(hdev);
5199
5200         for (i = 0; i < ev->num; i++) {
5201                 struct extended_inquiry_info *info = &ev->info[i];
5202                 u32 flags;
5203                 bool name_known;
5204
5205                 bacpy(&data.bdaddr, &info->bdaddr);
5206                 data.pscan_rep_mode     = info->pscan_rep_mode;
5207                 data.pscan_period_mode  = info->pscan_period_mode;
5208                 data.pscan_mode         = 0x00;
5209                 memcpy(data.dev_class, info->dev_class, 3);
5210                 data.clock_offset       = info->clock_offset;
5211                 data.rssi               = info->rssi;
5212                 data.ssp_mode           = 0x01;
5213
5214                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5215                         name_known = eir_get_data(info->data,
5216                                                   sizeof(info->data),
5217                                                   EIR_NAME_COMPLETE, NULL);
5218                 else
5219                         name_known = true;
5220
5221                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5222
5223                 eir_len = eir_get_length(info->data, sizeof(info->data));
5224
5225                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5226                                   info->dev_class, info->rssi,
5227                                   flags, info->data, eir_len, NULL, 0, 0);
5228         }
5229
5230         hci_dev_unlock(hdev);
5231 }
5232
5233 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5234                                          struct sk_buff *skb)
5235 {
5236         struct hci_ev_key_refresh_complete *ev = data;
5237         struct hci_conn *conn;
5238
5239         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5240                    __le16_to_cpu(ev->handle));
5241
5242         hci_dev_lock(hdev);
5243
5244         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5245         if (!conn)
5246                 goto unlock;
5247
5248         /* For BR/EDR the necessary steps are taken through the
5249          * auth_complete event.
5250          */
5251         if (conn->type != LE_LINK)
5252                 goto unlock;
5253
5254         if (!ev->status)
5255                 conn->sec_level = conn->pending_sec_level;
5256
5257         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5258
5259         if (ev->status && conn->state == BT_CONNECTED) {
5260                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5261                 hci_conn_drop(conn);
5262                 goto unlock;
5263         }
5264
5265         if (conn->state == BT_CONFIG) {
5266                 if (!ev->status)
5267                         conn->state = BT_CONNECTED;
5268
5269                 hci_connect_cfm(conn, ev->status);
5270                 hci_conn_drop(conn);
5271         } else {
5272                 hci_auth_cfm(conn, ev->status);
5273
5274                 hci_conn_hold(conn);
5275                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5276                 hci_conn_drop(conn);
5277         }
5278
5279 unlock:
5280         hci_dev_unlock(hdev);
5281 }
5282
5283 static u8 hci_get_auth_req(struct hci_conn *conn)
5284 {
5285         /* If remote requests no-bonding follow that lead */
5286         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5287             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5288                 return conn->remote_auth | (conn->auth_type & 0x01);
5289
5290         /* If both remote and local have enough IO capabilities, require
5291          * MITM protection
5292          */
5293         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5294             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5295                 return conn->remote_auth | 0x01;
5296
5297         /* No MITM protection possible so ignore remote requirement */
5298         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5299 }
5300
5301 static u8 bredr_oob_data_present(struct hci_conn *conn)
5302 {
5303         struct hci_dev *hdev = conn->hdev;
5304         struct oob_data *data;
5305
5306         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5307         if (!data)
5308                 return 0x00;
5309
5310         if (bredr_sc_enabled(hdev)) {
5311                 /* When Secure Connections is enabled, then just
5312                  * return the present value stored with the OOB
5313                  * data. The stored value contains the right present
5314                  * information. However it can only be trusted when
5315                  * not in Secure Connection Only mode.
5316                  */
5317                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5318                         return data->present;
5319
5320                 /* When Secure Connections Only mode is enabled, then
5321                  * the P-256 values are required. If they are not
5322                  * available, then do not declare that OOB data is
5323                  * present.
5324                  */
5325                 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5326                     !crypto_memneq(data->hash256, ZERO_KEY, 16))
5327                         return 0x00;
5328
5329                 return 0x02;
5330         }
5331
5332         /* When Secure Connections is not enabled or actually
5333          * not supported by the hardware, then check that if
5334          * P-192 data values are present.
5335          */
5336         if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5337             !crypto_memneq(data->hash192, ZERO_KEY, 16))
5338                 return 0x00;
5339
5340         return 0x01;
5341 }
5342
5343 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5344                                     struct sk_buff *skb)
5345 {
5346         struct hci_ev_io_capa_request *ev = data;
5347         struct hci_conn *conn;
5348
5349         bt_dev_dbg(hdev, "");
5350
5351         hci_dev_lock(hdev);
5352
5353         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5354         if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5355                 goto unlock;
5356
5357         /* Assume remote supports SSP since it has triggered this event */
5358         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5359
5360         hci_conn_hold(conn);
5361
5362         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5363                 goto unlock;
5364
5365         /* Allow pairing if we're pairable, the initiators of the
5366          * pairing or if the remote is not requesting bonding.
5367          */
5368         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5369             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5370             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5371                 struct hci_cp_io_capability_reply cp;
5372
5373                 bacpy(&cp.bdaddr, &ev->bdaddr);
5374                 /* Change the IO capability from KeyboardDisplay
5375                  * to DisplayYesNo as it is not supported by BT spec. */
5376                 cp.capability = (conn->io_capability == 0x04) ?
5377                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5378
5379                 /* If we are initiators, there is no remote information yet */
5380                 if (conn->remote_auth == 0xff) {
5381                         /* Request MITM protection if our IO caps allow it
5382                          * except for the no-bonding case.
5383                          */
5384                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5385                             conn->auth_type != HCI_AT_NO_BONDING)
5386                                 conn->auth_type |= 0x01;
5387                 } else {
5388                         conn->auth_type = hci_get_auth_req(conn);
5389                 }
5390
5391                 /* If we're not bondable, force one of the non-bondable
5392                  * authentication requirement values.
5393                  */
5394                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5395                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5396
5397                 cp.authentication = conn->auth_type;
5398                 cp.oob_data = bredr_oob_data_present(conn);
5399
5400                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5401                              sizeof(cp), &cp);
5402         } else {
5403                 struct hci_cp_io_capability_neg_reply cp;
5404
5405                 bacpy(&cp.bdaddr, &ev->bdaddr);
5406                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5407
5408                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5409                              sizeof(cp), &cp);
5410         }
5411
5412 unlock:
5413         hci_dev_unlock(hdev);
5414 }
5415
5416 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5417                                   struct sk_buff *skb)
5418 {
5419         struct hci_ev_io_capa_reply *ev = data;
5420         struct hci_conn *conn;
5421
5422         bt_dev_dbg(hdev, "");
5423
5424         hci_dev_lock(hdev);
5425
5426         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5427         if (!conn)
5428                 goto unlock;
5429
5430         conn->remote_cap = ev->capability;
5431         conn->remote_auth = ev->authentication;
5432
5433 unlock:
5434         hci_dev_unlock(hdev);
5435 }
5436
5437 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5438                                          struct sk_buff *skb)
5439 {
5440         struct hci_ev_user_confirm_req *ev = data;
5441         int loc_mitm, rem_mitm, confirm_hint = 0;
5442         struct hci_conn *conn;
5443
5444         bt_dev_dbg(hdev, "");
5445
5446         hci_dev_lock(hdev);
5447
5448         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5449                 goto unlock;
5450
5451         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5452         if (!conn)
5453                 goto unlock;
5454
5455         loc_mitm = (conn->auth_type & 0x01);
5456         rem_mitm = (conn->remote_auth & 0x01);
5457
5458         /* If we require MITM but the remote device can't provide that
5459          * (it has NoInputNoOutput) then reject the confirmation
5460          * request. We check the security level here since it doesn't
5461          * necessarily match conn->auth_type.
5462          */
5463         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5464             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5465                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5466                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5467                              sizeof(ev->bdaddr), &ev->bdaddr);
5468                 goto unlock;
5469         }
5470
5471         /* If no side requires MITM protection; auto-accept */
5472         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5473             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5474
5475                 /* If we're not the initiators request authorization to
5476                  * proceed from user space (mgmt_user_confirm with
5477                  * confirm_hint set to 1). The exception is if neither
5478                  * side had MITM or if the local IO capability is
5479                  * NoInputNoOutput, in which case we do auto-accept
5480                  */
5481                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5482                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5483                     (loc_mitm || rem_mitm)) {
5484                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5485                         confirm_hint = 1;
5486                         goto confirm;
5487                 }
5488
5489                 /* If there already exists link key in local host, leave the
5490                  * decision to user space since the remote device could be
5491                  * legitimate or malicious.
5492                  */
5493                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5494                         bt_dev_dbg(hdev, "Local host already has link key");
5495                         confirm_hint = 1;
5496                         goto confirm;
5497                 }
5498
5499                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5500                        hdev->auto_accept_delay);
5501
5502                 if (hdev->auto_accept_delay > 0) {
5503                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5504                         queue_delayed_work(conn->hdev->workqueue,
5505                                            &conn->auto_accept_work, delay);
5506                         goto unlock;
5507                 }
5508
5509                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5510                              sizeof(ev->bdaddr), &ev->bdaddr);
5511                 goto unlock;
5512         }
5513
5514 confirm:
5515         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5516                                   le32_to_cpu(ev->passkey), confirm_hint);
5517
5518 unlock:
5519         hci_dev_unlock(hdev);
5520 }
5521
5522 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5523                                          struct sk_buff *skb)
5524 {
5525         struct hci_ev_user_passkey_req *ev = data;
5526
5527         bt_dev_dbg(hdev, "");
5528
5529         if (hci_dev_test_flag(hdev, HCI_MGMT))
5530                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5531 }
5532
5533 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5534                                         struct sk_buff *skb)
5535 {
5536         struct hci_ev_user_passkey_notify *ev = data;
5537         struct hci_conn *conn;
5538
5539         bt_dev_dbg(hdev, "");
5540
5541         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5542         if (!conn)
5543                 return;
5544
5545         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5546         conn->passkey_entered = 0;
5547
5548         if (hci_dev_test_flag(hdev, HCI_MGMT))
5549                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5550                                          conn->dst_type, conn->passkey_notify,
5551                                          conn->passkey_entered);
5552 }
5553
5554 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5555                                     struct sk_buff *skb)
5556 {
5557         struct hci_ev_keypress_notify *ev = data;
5558         struct hci_conn *conn;
5559
5560         bt_dev_dbg(hdev, "");
5561
5562         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5563         if (!conn)
5564                 return;
5565
5566         switch (ev->type) {
5567         case HCI_KEYPRESS_STARTED:
5568                 conn->passkey_entered = 0;
5569                 return;
5570
5571         case HCI_KEYPRESS_ENTERED:
5572                 conn->passkey_entered++;
5573                 break;
5574
5575         case HCI_KEYPRESS_ERASED:
5576                 conn->passkey_entered--;
5577                 break;
5578
5579         case HCI_KEYPRESS_CLEARED:
5580                 conn->passkey_entered = 0;
5581                 break;
5582
5583         case HCI_KEYPRESS_COMPLETED:
5584                 return;
5585         }
5586
5587         if (hci_dev_test_flag(hdev, HCI_MGMT))
5588                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5589                                          conn->dst_type, conn->passkey_notify,
5590                                          conn->passkey_entered);
5591 }
5592
5593 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5594                                          struct sk_buff *skb)
5595 {
5596         struct hci_ev_simple_pair_complete *ev = data;
5597         struct hci_conn *conn;
5598
5599         bt_dev_dbg(hdev, "");
5600
5601         hci_dev_lock(hdev);
5602
5603         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5604         if (!conn || !hci_conn_ssp_enabled(conn))
5605                 goto unlock;
5606
5607         /* Reset the authentication requirement to unknown */
5608         conn->remote_auth = 0xff;
5609
5610         /* To avoid duplicate auth_failed events to user space we check
5611          * the HCI_CONN_AUTH_PEND flag which will be set if we
5612          * initiated the authentication. A traditional auth_complete
5613          * event gets always produced as initiator and is also mapped to
5614          * the mgmt_auth_failed event */
5615         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5616                 mgmt_auth_failed(conn, ev->status);
5617
5618         hci_conn_drop(conn);
5619
5620 unlock:
5621         hci_dev_unlock(hdev);
5622 }
5623
5624 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5625                                          struct sk_buff *skb)
5626 {
5627         struct hci_ev_remote_host_features *ev = data;
5628         struct inquiry_entry *ie;
5629         struct hci_conn *conn;
5630
5631         bt_dev_dbg(hdev, "");
5632
5633         hci_dev_lock(hdev);
5634
5635         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5636         if (conn)
5637                 memcpy(conn->features[1], ev->features, 8);
5638
5639         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5640         if (ie)
5641                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5642
5643         hci_dev_unlock(hdev);
5644 }
5645
5646 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5647                                             struct sk_buff *skb)
5648 {
5649         struct hci_ev_remote_oob_data_request *ev = edata;
5650         struct oob_data *data;
5651
5652         bt_dev_dbg(hdev, "");
5653
5654         hci_dev_lock(hdev);
5655
5656         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5657                 goto unlock;
5658
5659         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5660         if (!data) {
5661                 struct hci_cp_remote_oob_data_neg_reply cp;
5662
5663                 bacpy(&cp.bdaddr, &ev->bdaddr);
5664                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5665                              sizeof(cp), &cp);
5666                 goto unlock;
5667         }
5668
5669         if (bredr_sc_enabled(hdev)) {
5670                 struct hci_cp_remote_oob_ext_data_reply cp;
5671
5672                 bacpy(&cp.bdaddr, &ev->bdaddr);
5673                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5674                         memset(cp.hash192, 0, sizeof(cp.hash192));
5675                         memset(cp.rand192, 0, sizeof(cp.rand192));
5676                 } else {
5677                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5678                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5679                 }
5680                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5681                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5682
5683                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5684                              sizeof(cp), &cp);
5685         } else {
5686                 struct hci_cp_remote_oob_data_reply cp;
5687
5688                 bacpy(&cp.bdaddr, &ev->bdaddr);
5689                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5690                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5691
5692                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5693                              sizeof(cp), &cp);
5694         }
5695
5696 unlock:
5697         hci_dev_unlock(hdev);
5698 }
5699
5700 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5701                                 u8 bdaddr_type, bdaddr_t *local_rpa)
5702 {
5703         if (conn->out) {
5704                 conn->dst_type = bdaddr_type;
5705                 conn->resp_addr_type = bdaddr_type;
5706                 bacpy(&conn->resp_addr, bdaddr);
5707
5708                 /* Check if the controller has set a Local RPA then it must be
5709                  * used instead or hdev->rpa.
5710                  */
5711                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5712                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5713                         bacpy(&conn->init_addr, local_rpa);
5714                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5715                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5716                         bacpy(&conn->init_addr, &conn->hdev->rpa);
5717                 } else {
5718                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
5719                                                   &conn->init_addr_type);
5720                 }
5721         } else {
5722                 conn->resp_addr_type = conn->hdev->adv_addr_type;
5723                 /* Check if the controller has set a Local RPA then it must be
5724                  * used instead or hdev->rpa.
5725                  */
5726                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5727                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5728                         bacpy(&conn->resp_addr, local_rpa);
5729                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5730                         /* In case of ext adv, resp_addr will be updated in
5731                          * Adv Terminated event.
5732                          */
5733                         if (!ext_adv_capable(conn->hdev))
5734                                 bacpy(&conn->resp_addr,
5735                                       &conn->hdev->random_addr);
5736                 } else {
5737                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5738                 }
5739
5740                 conn->init_addr_type = bdaddr_type;
5741                 bacpy(&conn->init_addr, bdaddr);
5742
5743                 /* For incoming connections, set the default minimum
5744                  * and maximum connection interval. They will be used
5745                  * to check if the parameters are in range and if not
5746                  * trigger the connection update procedure.
5747                  */
5748                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5749                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5750         }
5751 }
5752
5753 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5754                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5755                                  bdaddr_t *local_rpa, u8 role, u16 handle,
5756                                  u16 interval, u16 latency,
5757                                  u16 supervision_timeout)
5758 {
5759         struct hci_conn_params *params;
5760         struct hci_conn *conn;
5761         struct smp_irk *irk;
5762         u8 addr_type;
5763
5764         hci_dev_lock(hdev);
5765
5766         /* All controllers implicitly stop advertising in the event of a
5767          * connection, so ensure that the state bit is cleared.
5768          */
5769         hci_dev_clear_flag(hdev, HCI_LE_ADV);
5770
5771         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5772         if (!conn) {
5773                 /* In case of error status and there is no connection pending
5774                  * just unlock as there is nothing to cleanup.
5775                  */
5776                 if (status)
5777                         goto unlock;
5778
5779                 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5780                 if (IS_ERR(conn)) {
5781                         bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5782                         goto unlock;
5783                 }
5784
5785                 conn->dst_type = bdaddr_type;
5786
5787                 /* If we didn't have a hci_conn object previously
5788                  * but we're in central role this must be something
5789                  * initiated using an accept list. Since accept list based
5790                  * connections are not "first class citizens" we don't
5791                  * have full tracking of them. Therefore, we go ahead
5792                  * with a "best effort" approach of determining the
5793                  * initiator address based on the HCI_PRIVACY flag.
5794                  */
5795                 if (conn->out) {
5796                         conn->resp_addr_type = bdaddr_type;
5797                         bacpy(&conn->resp_addr, bdaddr);
5798                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5799                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5800                                 bacpy(&conn->init_addr, &hdev->rpa);
5801                         } else {
5802                                 hci_copy_identity_address(hdev,
5803                                                           &conn->init_addr,
5804                                                           &conn->init_addr_type);
5805                         }
5806                 }
5807         } else {
5808                 cancel_delayed_work(&conn->le_conn_timeout);
5809         }
5810
5811         /* The HCI_LE_Connection_Complete event is only sent once per connection.
5812          * Processing it more than once per connection can corrupt kernel memory.
5813          *
5814          * As the connection handle is set here for the first time, it indicates
5815          * whether the connection is already set up.
5816          */
5817         if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5818                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5819                 goto unlock;
5820         }
5821
5822         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5823
5824         /* Lookup the identity address from the stored connection
5825          * address and address type.
5826          *
5827          * When establishing connections to an identity address, the
5828          * connection procedure will store the resolvable random
5829          * address first. Now if it can be converted back into the
5830          * identity address, start using the identity address from
5831          * now on.
5832          */
5833         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5834         if (irk) {
5835                 bacpy(&conn->dst, &irk->bdaddr);
5836                 conn->dst_type = irk->addr_type;
5837         }
5838
5839         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5840
5841         /* All connection failure handling is taken care of by the
5842          * hci_conn_failed function which is triggered by the HCI
5843          * request completion callbacks used for connecting.
5844          */
5845         if (status || hci_conn_set_handle(conn, handle))
5846                 goto unlock;
5847
5848         /* Drop the connection if it has been aborted */
5849         if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5850                 hci_conn_drop(conn);
5851                 goto unlock;
5852         }
5853
5854         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5855                 addr_type = BDADDR_LE_PUBLIC;
5856         else
5857                 addr_type = BDADDR_LE_RANDOM;
5858
5859         /* Drop the connection if the device is blocked */
5860         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5861                 hci_conn_drop(conn);
5862                 goto unlock;
5863         }
5864
5865         mgmt_device_connected(hdev, conn, NULL, 0);
5866
5867         conn->sec_level = BT_SECURITY_LOW;
5868         conn->state = BT_CONFIG;
5869
5870         /* Store current advertising instance as connection advertising instance
5871          * when sotfware rotation is in use so it can be re-enabled when
5872          * disconnected.
5873          */
5874         if (!ext_adv_capable(hdev))
5875                 conn->adv_instance = hdev->cur_adv_instance;
5876
5877         conn->le_conn_interval = interval;
5878         conn->le_conn_latency = latency;
5879         conn->le_supv_timeout = supervision_timeout;
5880
5881         hci_debugfs_create_conn(conn);
5882         hci_conn_add_sysfs(conn);
5883
5884         /* The remote features procedure is defined for central
5885          * role only. So only in case of an initiated connection
5886          * request the remote features.
5887          *
5888          * If the local controller supports peripheral-initiated features
5889          * exchange, then requesting the remote features in peripheral
5890          * role is possible. Otherwise just transition into the
5891          * connected state without requesting the remote features.
5892          */
5893         if (conn->out ||
5894             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5895                 struct hci_cp_le_read_remote_features cp;
5896
5897                 cp.handle = __cpu_to_le16(conn->handle);
5898
5899                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5900                              sizeof(cp), &cp);
5901
5902                 hci_conn_hold(conn);
5903         } else {
5904                 conn->state = BT_CONNECTED;
5905                 hci_connect_cfm(conn, status);
5906         }
5907
5908         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5909                                            conn->dst_type);
5910         if (params) {
5911                 hci_pend_le_list_del_init(params);
5912                 if (params->conn) {
5913                         hci_conn_drop(params->conn);
5914                         hci_conn_put(params->conn);
5915                         params->conn = NULL;
5916                 }
5917         }
5918
5919 unlock:
5920         hci_update_passive_scan(hdev);
5921         hci_dev_unlock(hdev);
5922 }
5923
5924 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5925                                      struct sk_buff *skb)
5926 {
5927         struct hci_ev_le_conn_complete *ev = data;
5928
5929         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5930
5931         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5932                              NULL, ev->role, le16_to_cpu(ev->handle),
5933                              le16_to_cpu(ev->interval),
5934                              le16_to_cpu(ev->latency),
5935                              le16_to_cpu(ev->supervision_timeout));
5936 }
5937
5938 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5939                                          struct sk_buff *skb)
5940 {
5941         struct hci_ev_le_enh_conn_complete *ev = data;
5942
5943         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5944
5945         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5946                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5947                              le16_to_cpu(ev->interval),
5948                              le16_to_cpu(ev->latency),
5949                              le16_to_cpu(ev->supervision_timeout));
5950 }
5951
5952 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5953                                     struct sk_buff *skb)
5954 {
5955         struct hci_evt_le_ext_adv_set_term *ev = data;
5956         struct hci_conn *conn;
5957         struct adv_info *adv, *n;
5958
5959         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5960
5961         /* The Bluetooth Core 5.3 specification clearly states that this event
5962          * shall not be sent when the Host disables the advertising set. So in
5963          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5964          *
5965          * When the Host disables an advertising set, all cleanup is done via
5966          * its command callback and not needed to be duplicated here.
5967          */
5968         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5969                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5970                 return;
5971         }
5972
5973         hci_dev_lock(hdev);
5974
5975         adv = hci_find_adv_instance(hdev, ev->handle);
5976
5977         if (ev->status) {
5978                 if (!adv)
5979                         goto unlock;
5980
5981                 /* Remove advertising as it has been terminated */
5982                 hci_remove_adv_instance(hdev, ev->handle);
5983                 mgmt_advertising_removed(NULL, hdev, ev->handle);
5984
5985                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5986                         if (adv->enabled)
5987                                 goto unlock;
5988                 }
5989
5990                 /* We are no longer advertising, clear HCI_LE_ADV */
5991                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5992                 goto unlock;
5993         }
5994
5995         if (adv)
5996                 adv->enabled = false;
5997
5998         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5999         if (conn) {
6000                 /* Store handle in the connection so the correct advertising
6001                  * instance can be re-enabled when disconnected.
6002                  */
6003                 conn->adv_instance = ev->handle;
6004
6005                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6006                     bacmp(&conn->resp_addr, BDADDR_ANY))
6007                         goto unlock;
6008
6009                 if (!ev->handle) {
6010                         bacpy(&conn->resp_addr, &hdev->random_addr);
6011                         goto unlock;
6012                 }
6013
6014                 if (adv)
6015                         bacpy(&conn->resp_addr, &adv->random_addr);
6016         }
6017
6018 unlock:
6019         hci_dev_unlock(hdev);
6020 }
6021
6022 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6023                                             struct sk_buff *skb)
6024 {
6025         struct hci_ev_le_conn_update_complete *ev = data;
6026         struct hci_conn *conn;
6027
6028         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6029
6030         if (ev->status)
6031                 return;
6032
6033         hci_dev_lock(hdev);
6034
6035         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6036         if (conn) {
6037                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6038                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6039                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6040         }
6041
6042         hci_dev_unlock(hdev);
6043 }
6044
6045 /* This function requires the caller holds hdev->lock */
6046 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6047                                               bdaddr_t *addr,
6048                                               u8 addr_type, bool addr_resolved,
6049                                               u8 adv_type, u8 phy, u8 sec_phy)
6050 {
6051         struct hci_conn *conn;
6052         struct hci_conn_params *params;
6053
6054         /* If the event is not connectable don't proceed further */
6055         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6056                 return NULL;
6057
6058         /* Ignore if the device is blocked or hdev is suspended */
6059         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6060             hdev->suspended)
6061                 return NULL;
6062
6063         /* Most controller will fail if we try to create new connections
6064          * while we have an existing one in peripheral role.
6065          */
6066         if (hdev->conn_hash.le_num_peripheral > 0 &&
6067             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6068              !(hdev->le_states[3] & 0x10)))
6069                 return NULL;
6070
6071         /* If we're not connectable only connect devices that we have in
6072          * our pend_le_conns list.
6073          */
6074         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6075                                            addr_type);
6076         if (!params)
6077                 return NULL;
6078
6079         if (!params->explicit_connect) {
6080                 switch (params->auto_connect) {
6081                 case HCI_AUTO_CONN_DIRECT:
6082                         /* Only devices advertising with ADV_DIRECT_IND are
6083                          * triggering a connection attempt. This is allowing
6084                          * incoming connections from peripheral devices.
6085                          */
6086                         if (adv_type != LE_ADV_DIRECT_IND)
6087                                 return NULL;
6088                         break;
6089                 case HCI_AUTO_CONN_ALWAYS:
6090                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6091                          * are triggering a connection attempt. This means
6092                          * that incoming connections from peripheral device are
6093                          * accepted and also outgoing connections to peripheral
6094                          * devices are established when found.
6095                          */
6096                         break;
6097                 default:
6098                         return NULL;
6099                 }
6100         }
6101
6102         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6103                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6104                               HCI_ROLE_MASTER, phy, sec_phy);
6105         if (!IS_ERR(conn)) {
6106                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6107                  * by higher layer that tried to connect, if no then
6108                  * store the pointer since we don't really have any
6109                  * other owner of the object besides the params that
6110                  * triggered it. This way we can abort the connection if
6111                  * the parameters get removed and keep the reference
6112                  * count consistent once the connection is established.
6113                  */
6114
6115                 if (!params->explicit_connect)
6116                         params->conn = hci_conn_get(conn);
6117
6118                 return conn;
6119         }
6120
6121         switch (PTR_ERR(conn)) {
6122         case -EBUSY:
6123                 /* If hci_connect() returns -EBUSY it means there is already
6124                  * an LE connection attempt going on. Since controllers don't
6125                  * support more than one connection attempt at the time, we
6126                  * don't consider this an error case.
6127                  */
6128                 break;
6129         default:
6130                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6131                 return NULL;
6132         }
6133
6134         return NULL;
6135 }
6136
6137 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6138                                u8 bdaddr_type, bdaddr_t *direct_addr,
6139                                u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
6140                                u8 *data, u8 len, bool ext_adv, bool ctl_time,
6141                                u64 instant)
6142 {
6143         struct discovery_state *d = &hdev->discovery;
6144         struct smp_irk *irk;
6145         struct hci_conn *conn;
6146         bool match, bdaddr_resolved;
6147         u32 flags;
6148         u8 *ptr;
6149
6150         switch (type) {
6151         case LE_ADV_IND:
6152         case LE_ADV_DIRECT_IND:
6153         case LE_ADV_SCAN_IND:
6154         case LE_ADV_NONCONN_IND:
6155         case LE_ADV_SCAN_RSP:
6156                 break;
6157         default:
6158                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6159                                        "type: 0x%02x", type);
6160                 return;
6161         }
6162
6163         if (len > max_adv_len(hdev)) {
6164                 bt_dev_err_ratelimited(hdev,
6165                                        "adv larger than maximum supported");
6166                 return;
6167         }
6168
6169         /* Find the end of the data in case the report contains padded zero
6170          * bytes at the end causing an invalid length value.
6171          *
6172          * When data is NULL, len is 0 so there is no need for extra ptr
6173          * check as 'ptr < data + 0' is already false in such case.
6174          */
6175         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6176                 if (ptr + 1 + *ptr > data + len)
6177                         break;
6178         }
6179
6180         /* Adjust for actual length. This handles the case when remote
6181          * device is advertising with incorrect data length.
6182          */
6183         len = ptr - data;
6184
6185         /* If the direct address is present, then this report is from
6186          * a LE Direct Advertising Report event. In that case it is
6187          * important to see if the address is matching the local
6188          * controller address.
6189          */
6190         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6191                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6192                                                   &bdaddr_resolved);
6193
6194                 /* Only resolvable random addresses are valid for these
6195                  * kind of reports and others can be ignored.
6196                  */
6197                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6198                         return;
6199
6200                 /* If the controller is not using resolvable random
6201                  * addresses, then this report can be ignored.
6202                  */
6203                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6204                         return;
6205
6206                 /* If the local IRK of the controller does not match
6207                  * with the resolvable random address provided, then
6208                  * this report can be ignored.
6209                  */
6210                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6211                         return;
6212         }
6213
6214         /* Check if we need to convert to identity address */
6215         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6216         if (irk) {
6217                 bdaddr = &irk->bdaddr;
6218                 bdaddr_type = irk->addr_type;
6219         }
6220
6221         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6222
6223         /* Check if we have been requested to connect to this device.
6224          *
6225          * direct_addr is set only for directed advertising reports (it is NULL
6226          * for advertising reports) and is already verified to be RPA above.
6227          */
6228         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6229                                      type, phy, sec_phy);
6230         if (!ext_adv && conn && type == LE_ADV_IND &&
6231             len <= max_adv_len(hdev)) {
6232                 /* Store report for later inclusion by
6233                  * mgmt_device_connected
6234                  */
6235                 memcpy(conn->le_adv_data, data, len);
6236                 conn->le_adv_data_len = len;
6237         }
6238
6239         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6240                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6241         else
6242                 flags = 0;
6243
6244         /* All scan results should be sent up for Mesh systems */
6245         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6246                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6247                                   rssi, flags, data, len, NULL, 0, instant);
6248                 return;
6249         }
6250
6251         /* Passive scanning shouldn't trigger any device found events,
6252          * except for devices marked as CONN_REPORT for which we do send
6253          * device found events, or advertisement monitoring requested.
6254          */
6255         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6256                 if (type == LE_ADV_DIRECT_IND)
6257                         return;
6258
6259                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6260                                                bdaddr, bdaddr_type) &&
6261                     idr_is_empty(&hdev->adv_monitors_idr))
6262                         return;
6263
6264                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6265                                   rssi, flags, data, len, NULL, 0, 0);
6266                 return;
6267         }
6268
6269         /* When receiving a scan response, then there is no way to
6270          * know if the remote device is connectable or not. However
6271          * since scan responses are merged with a previously seen
6272          * advertising report, the flags field from that report
6273          * will be used.
6274          *
6275          * In the unlikely case that a controller just sends a scan
6276          * response event that doesn't match the pending report, then
6277          * it is marked as a standalone SCAN_RSP.
6278          */
6279         if (type == LE_ADV_SCAN_RSP)
6280                 flags = MGMT_DEV_FOUND_SCAN_RSP;
6281
6282         /* If there's nothing pending either store the data from this
6283          * event or send an immediate device found event if the data
6284          * should not be stored for later.
6285          */
6286         if (!ext_adv && !has_pending_adv_report(hdev)) {
6287                 /* If the report will trigger a SCAN_REQ store it for
6288                  * later merging.
6289                  */
6290                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6291                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6292                                                  rssi, flags, data, len);
6293                         return;
6294                 }
6295
6296                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6297                                   rssi, flags, data, len, NULL, 0, 0);
6298                 return;
6299         }
6300
6301         /* Check if the pending report is for the same device as the new one */
6302         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6303                  bdaddr_type == d->last_adv_addr_type);
6304
6305         /* If the pending data doesn't match this report or this isn't a
6306          * scan response (e.g. we got a duplicate ADV_IND) then force
6307          * sending of the pending data.
6308          */
6309         if (type != LE_ADV_SCAN_RSP || !match) {
6310                 /* Send out whatever is in the cache, but skip duplicates */
6311                 if (!match)
6312                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6313                                           d->last_adv_addr_type, NULL,
6314                                           d->last_adv_rssi, d->last_adv_flags,
6315                                           d->last_adv_data,
6316                                           d->last_adv_data_len, NULL, 0, 0);
6317
6318                 /* If the new report will trigger a SCAN_REQ store it for
6319                  * later merging.
6320                  */
6321                 if (!ext_adv && (type == LE_ADV_IND ||
6322                                  type == LE_ADV_SCAN_IND)) {
6323                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6324                                                  rssi, flags, data, len);
6325                         return;
6326                 }
6327
6328                 /* The advertising reports cannot be merged, so clear
6329                  * the pending report and send out a device found event.
6330                  */
6331                 clear_pending_adv_report(hdev);
6332                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6333                                   rssi, flags, data, len, NULL, 0, 0);
6334                 return;
6335         }
6336
6337         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6338          * the new event is a SCAN_RSP. We can therefore proceed with
6339          * sending a merged device found event.
6340          */
6341         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6342                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6343                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6344         clear_pending_adv_report(hdev);
6345 }
6346
6347 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6348                                   struct sk_buff *skb)
6349 {
6350         struct hci_ev_le_advertising_report *ev = data;
6351         u64 instant = jiffies;
6352
6353         if (!ev->num)
6354                 return;
6355
6356         hci_dev_lock(hdev);
6357
6358         while (ev->num--) {
6359                 struct hci_ev_le_advertising_info *info;
6360                 s8 rssi;
6361
6362                 info = hci_le_ev_skb_pull(hdev, skb,
6363                                           HCI_EV_LE_ADVERTISING_REPORT,
6364                                           sizeof(*info));
6365                 if (!info)
6366                         break;
6367
6368                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6369                                         info->length + 1))
6370                         break;
6371
6372                 if (info->length <= max_adv_len(hdev)) {
6373                         rssi = info->data[info->length];
6374                         process_adv_report(hdev, info->type, &info->bdaddr,
6375                                            info->bdaddr_type, NULL, 0,
6376                                            HCI_ADV_PHY_1M, 0, rssi,
6377                                            info->data, info->length, false,
6378                                            false, instant);
6379                 } else {
6380                         bt_dev_err(hdev, "Dropping invalid advertising data");
6381                 }
6382         }
6383
6384         hci_dev_unlock(hdev);
6385 }
6386
6387 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6388 {
6389         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6390                 switch (evt_type) {
6391                 case LE_LEGACY_ADV_IND:
6392                         return LE_ADV_IND;
6393                 case LE_LEGACY_ADV_DIRECT_IND:
6394                         return LE_ADV_DIRECT_IND;
6395                 case LE_LEGACY_ADV_SCAN_IND:
6396                         return LE_ADV_SCAN_IND;
6397                 case LE_LEGACY_NONCONN_IND:
6398                         return LE_ADV_NONCONN_IND;
6399                 case LE_LEGACY_SCAN_RSP_ADV:
6400                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6401                         return LE_ADV_SCAN_RSP;
6402                 }
6403
6404                 goto invalid;
6405         }
6406
6407         if (evt_type & LE_EXT_ADV_CONN_IND) {
6408                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6409                         return LE_ADV_DIRECT_IND;
6410
6411                 return LE_ADV_IND;
6412         }
6413
6414         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6415                 return LE_ADV_SCAN_RSP;
6416
6417         if (evt_type & LE_EXT_ADV_SCAN_IND)
6418                 return LE_ADV_SCAN_IND;
6419
6420         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6421             evt_type & LE_EXT_ADV_DIRECT_IND)
6422                 return LE_ADV_NONCONN_IND;
6423
6424 invalid:
6425         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6426                                evt_type);
6427
6428         return LE_ADV_INVALID;
6429 }
6430
6431 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6432                                       struct sk_buff *skb)
6433 {
6434         struct hci_ev_le_ext_adv_report *ev = data;
6435         u64 instant = jiffies;
6436
6437         if (!ev->num)
6438                 return;
6439
6440         hci_dev_lock(hdev);
6441
6442         while (ev->num--) {
6443                 struct hci_ev_le_ext_adv_info *info;
6444                 u8 legacy_evt_type;
6445                 u16 evt_type;
6446
6447                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6448                                           sizeof(*info));
6449                 if (!info)
6450                         break;
6451
6452                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6453                                         info->length))
6454                         break;
6455
6456                 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6457                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6458                 if (legacy_evt_type != LE_ADV_INVALID) {
6459                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6460                                            info->bdaddr_type, NULL, 0,
6461                                            info->primary_phy,
6462                                            info->secondary_phy,
6463                                            info->rssi, info->data, info->length,
6464                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6465                                            false, instant);
6466                 }
6467         }
6468
6469         hci_dev_unlock(hdev);
6470 }
6471
6472 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6473 {
6474         struct hci_cp_le_pa_term_sync cp;
6475
6476         memset(&cp, 0, sizeof(cp));
6477         cp.handle = handle;
6478
6479         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6480 }
6481
6482 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6483                                             struct sk_buff *skb)
6484 {
6485         struct hci_ev_le_pa_sync_established *ev = data;
6486         int mask = hdev->link_mode;
6487         __u8 flags = 0;
6488         struct hci_conn *pa_sync;
6489
6490         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6491
6492         hci_dev_lock(hdev);
6493
6494         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6495
6496         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6497         if (!(mask & HCI_LM_ACCEPT)) {
6498                 hci_le_pa_term_sync(hdev, ev->handle);
6499                 goto unlock;
6500         }
6501
6502         if (!(flags & HCI_PROTO_DEFER))
6503                 goto unlock;
6504
6505         if (ev->status) {
6506                 /* Add connection to indicate the failed PA sync event */
6507                 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6508                                              HCI_ROLE_SLAVE);
6509
6510                 if (!pa_sync)
6511                         goto unlock;
6512
6513                 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6514
6515                 /* Notify iso layer */
6516                 hci_connect_cfm(pa_sync, ev->status);
6517         }
6518
6519 unlock:
6520         hci_dev_unlock(hdev);
6521 }
6522
6523 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6524                                       struct sk_buff *skb)
6525 {
6526         struct hci_ev_le_per_adv_report *ev = data;
6527         int mask = hdev->link_mode;
6528         __u8 flags = 0;
6529
6530         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6531
6532         hci_dev_lock(hdev);
6533
6534         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6535         if (!(mask & HCI_LM_ACCEPT))
6536                 hci_le_pa_term_sync(hdev, ev->sync_handle);
6537
6538         hci_dev_unlock(hdev);
6539 }
6540
6541 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6542                                             struct sk_buff *skb)
6543 {
6544         struct hci_ev_le_remote_feat_complete *ev = data;
6545         struct hci_conn *conn;
6546
6547         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6548
6549         hci_dev_lock(hdev);
6550
6551         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6552         if (conn) {
6553                 if (!ev->status)
6554                         memcpy(conn->features[0], ev->features, 8);
6555
6556                 if (conn->state == BT_CONFIG) {
6557                         __u8 status;
6558
6559                         /* If the local controller supports peripheral-initiated
6560                          * features exchange, but the remote controller does
6561                          * not, then it is possible that the error code 0x1a
6562                          * for unsupported remote feature gets returned.
6563                          *
6564                          * In this specific case, allow the connection to
6565                          * transition into connected state and mark it as
6566                          * successful.
6567                          */
6568                         if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6569                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6570                                 status = 0x00;
6571                         else
6572                                 status = ev->status;
6573
6574                         conn->state = BT_CONNECTED;
6575                         hci_connect_cfm(conn, status);
6576                         hci_conn_drop(conn);
6577                 }
6578         }
6579
6580         hci_dev_unlock(hdev);
6581 }
6582
6583 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6584                                    struct sk_buff *skb)
6585 {
6586         struct hci_ev_le_ltk_req *ev = data;
6587         struct hci_cp_le_ltk_reply cp;
6588         struct hci_cp_le_ltk_neg_reply neg;
6589         struct hci_conn *conn;
6590         struct smp_ltk *ltk;
6591
6592         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6593
6594         hci_dev_lock(hdev);
6595
6596         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6597         if (conn == NULL)
6598                 goto not_found;
6599
6600         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6601         if (!ltk)
6602                 goto not_found;
6603
6604         if (smp_ltk_is_sc(ltk)) {
6605                 /* With SC both EDiv and Rand are set to zero */
6606                 if (ev->ediv || ev->rand)
6607                         goto not_found;
6608         } else {
6609                 /* For non-SC keys check that EDiv and Rand match */
6610                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6611                         goto not_found;
6612         }
6613
6614         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6615         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6616         cp.handle = cpu_to_le16(conn->handle);
6617
6618         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6619
6620         conn->enc_key_size = ltk->enc_size;
6621
6622         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6623
6624         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6625          * temporary key used to encrypt a connection following
6626          * pairing. It is used during the Encrypted Session Setup to
6627          * distribute the keys. Later, security can be re-established
6628          * using a distributed LTK.
6629          */
6630         if (ltk->type == SMP_STK) {
6631                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6632                 list_del_rcu(&ltk->list);
6633                 kfree_rcu(ltk, rcu);
6634         } else {
6635                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6636         }
6637
6638         hci_dev_unlock(hdev);
6639
6640         return;
6641
6642 not_found:
6643         neg.handle = ev->handle;
6644         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6645         hci_dev_unlock(hdev);
6646 }
6647
6648 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6649                                       u8 reason)
6650 {
6651         struct hci_cp_le_conn_param_req_neg_reply cp;
6652
6653         cp.handle = cpu_to_le16(handle);
6654         cp.reason = reason;
6655
6656         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6657                      &cp);
6658 }
6659
6660 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6661                                              struct sk_buff *skb)
6662 {
6663         struct hci_ev_le_remote_conn_param_req *ev = data;
6664         struct hci_cp_le_conn_param_req_reply cp;
6665         struct hci_conn *hcon;
6666         u16 handle, min, max, latency, timeout;
6667
6668         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6669
6670         handle = le16_to_cpu(ev->handle);
6671         min = le16_to_cpu(ev->interval_min);
6672         max = le16_to_cpu(ev->interval_max);
6673         latency = le16_to_cpu(ev->latency);
6674         timeout = le16_to_cpu(ev->timeout);
6675
6676         hcon = hci_conn_hash_lookup_handle(hdev, handle);
6677         if (!hcon || hcon->state != BT_CONNECTED)
6678                 return send_conn_param_neg_reply(hdev, handle,
6679                                                  HCI_ERROR_UNKNOWN_CONN_ID);
6680
6681         if (max > hcon->le_conn_max_interval)
6682                 return send_conn_param_neg_reply(hdev, handle,
6683                                                  HCI_ERROR_INVALID_LL_PARAMS);
6684
6685         if (hci_check_conn_params(min, max, latency, timeout))
6686                 return send_conn_param_neg_reply(hdev, handle,
6687                                                  HCI_ERROR_INVALID_LL_PARAMS);
6688
6689         if (hcon->role == HCI_ROLE_MASTER) {
6690                 struct hci_conn_params *params;
6691                 u8 store_hint;
6692
6693                 hci_dev_lock(hdev);
6694
6695                 params = hci_conn_params_lookup(hdev, &hcon->dst,
6696                                                 hcon->dst_type);
6697                 if (params) {
6698                         params->conn_min_interval = min;
6699                         params->conn_max_interval = max;
6700                         params->conn_latency = latency;
6701                         params->supervision_timeout = timeout;
6702                         store_hint = 0x01;
6703                 } else {
6704                         store_hint = 0x00;
6705                 }
6706
6707                 hci_dev_unlock(hdev);
6708
6709                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6710                                     store_hint, min, max, latency, timeout);
6711         }
6712
6713         cp.handle = ev->handle;
6714         cp.interval_min = ev->interval_min;
6715         cp.interval_max = ev->interval_max;
6716         cp.latency = ev->latency;
6717         cp.timeout = ev->timeout;
6718         cp.min_ce_len = 0;
6719         cp.max_ce_len = 0;
6720
6721         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6722 }
6723
6724 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6725                                          struct sk_buff *skb)
6726 {
6727         struct hci_ev_le_direct_adv_report *ev = data;
6728         u64 instant = jiffies;
6729         int i;
6730
6731         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6732                                 flex_array_size(ev, info, ev->num)))
6733                 return;
6734
6735         if (!ev->num)
6736                 return;
6737
6738         hci_dev_lock(hdev);
6739
6740         for (i = 0; i < ev->num; i++) {
6741                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6742
6743                 process_adv_report(hdev, info->type, &info->bdaddr,
6744                                    info->bdaddr_type, &info->direct_addr,
6745                                    info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6746                                    info->rssi, NULL, 0, false, false, instant);
6747         }
6748
6749         hci_dev_unlock(hdev);
6750 }
6751
6752 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6753                                   struct sk_buff *skb)
6754 {
6755         struct hci_ev_le_phy_update_complete *ev = data;
6756         struct hci_conn *conn;
6757
6758         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6759
6760         if (ev->status)
6761                 return;
6762
6763         hci_dev_lock(hdev);
6764
6765         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6766         if (!conn)
6767                 goto unlock;
6768
6769         conn->le_tx_phy = ev->tx_phy;
6770         conn->le_rx_phy = ev->rx_phy;
6771
6772 unlock:
6773         hci_dev_unlock(hdev);
6774 }
6775
6776 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6777                                         struct sk_buff *skb)
6778 {
6779         struct hci_evt_le_cis_established *ev = data;
6780         struct hci_conn *conn;
6781         struct bt_iso_qos *qos;
6782         bool pending = false;
6783         u16 handle = __le16_to_cpu(ev->handle);
6784
6785         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6786
6787         hci_dev_lock(hdev);
6788
6789         conn = hci_conn_hash_lookup_handle(hdev, handle);
6790         if (!conn) {
6791                 bt_dev_err(hdev,
6792                            "Unable to find connection with handle 0x%4.4x",
6793                            handle);
6794                 goto unlock;
6795         }
6796
6797         if (conn->type != ISO_LINK) {
6798                 bt_dev_err(hdev,
6799                            "Invalid connection link type handle 0x%4.4x",
6800                            handle);
6801                 goto unlock;
6802         }
6803
6804         qos = &conn->iso_qos;
6805
6806         pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6807
6808         /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6809         qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6810         qos->ucast.out.interval = qos->ucast.in.interval;
6811
6812         switch (conn->role) {
6813         case HCI_ROLE_SLAVE:
6814                 /* Convert Transport Latency (us) to Latency (msec) */
6815                 qos->ucast.in.latency =
6816                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6817                                           1000);
6818                 qos->ucast.out.latency =
6819                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6820                                           1000);
6821                 qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6822                 qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6823                 qos->ucast.in.phy = ev->c_phy;
6824                 qos->ucast.out.phy = ev->p_phy;
6825                 break;
6826         case HCI_ROLE_MASTER:
6827                 /* Convert Transport Latency (us) to Latency (msec) */
6828                 qos->ucast.out.latency =
6829                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6830                                           1000);
6831                 qos->ucast.in.latency =
6832                         DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6833                                           1000);
6834                 qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6835                 qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6836                 qos->ucast.out.phy = ev->c_phy;
6837                 qos->ucast.in.phy = ev->p_phy;
6838                 break;
6839         }
6840
6841         if (!ev->status) {
6842                 conn->state = BT_CONNECTED;
6843                 hci_debugfs_create_conn(conn);
6844                 hci_conn_add_sysfs(conn);
6845                 hci_iso_setup_path(conn);
6846                 goto unlock;
6847         }
6848
6849         conn->state = BT_CLOSED;
6850         hci_connect_cfm(conn, ev->status);
6851         hci_conn_del(conn);
6852
6853 unlock:
6854         if (pending)
6855                 hci_le_create_cis_pending(hdev);
6856
6857         hci_dev_unlock(hdev);
6858 }
6859
6860 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6861 {
6862         struct hci_cp_le_reject_cis cp;
6863
6864         memset(&cp, 0, sizeof(cp));
6865         cp.handle = handle;
6866         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6867         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6868 }
6869
6870 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6871 {
6872         struct hci_cp_le_accept_cis cp;
6873
6874         memset(&cp, 0, sizeof(cp));
6875         cp.handle = handle;
6876         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6877 }
6878
6879 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6880                                struct sk_buff *skb)
6881 {
6882         struct hci_evt_le_cis_req *ev = data;
6883         u16 acl_handle, cis_handle;
6884         struct hci_conn *acl, *cis;
6885         int mask;
6886         __u8 flags = 0;
6887
6888         acl_handle = __le16_to_cpu(ev->acl_handle);
6889         cis_handle = __le16_to_cpu(ev->cis_handle);
6890
6891         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6892                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6893
6894         hci_dev_lock(hdev);
6895
6896         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6897         if (!acl)
6898                 goto unlock;
6899
6900         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6901         if (!(mask & HCI_LM_ACCEPT)) {
6902                 hci_le_reject_cis(hdev, ev->cis_handle);
6903                 goto unlock;
6904         }
6905
6906         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6907         if (!cis) {
6908                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
6909                                    cis_handle);
6910                 if (IS_ERR(cis)) {
6911                         hci_le_reject_cis(hdev, ev->cis_handle);
6912                         goto unlock;
6913                 }
6914         }
6915
6916         cis->iso_qos.ucast.cig = ev->cig_id;
6917         cis->iso_qos.ucast.cis = ev->cis_id;
6918
6919         if (!(flags & HCI_PROTO_DEFER)) {
6920                 hci_le_accept_cis(hdev, ev->cis_handle);
6921         } else {
6922                 cis->state = BT_CONNECT2;
6923                 hci_connect_cfm(cis, 0);
6924         }
6925
6926 unlock:
6927         hci_dev_unlock(hdev);
6928 }
6929
6930 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6931 {
6932         u8 handle = PTR_UINT(data);
6933
6934         return hci_le_terminate_big_sync(hdev, handle,
6935                                          HCI_ERROR_LOCAL_HOST_TERM);
6936 }
6937
6938 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6939                                            struct sk_buff *skb)
6940 {
6941         struct hci_evt_le_create_big_complete *ev = data;
6942         struct hci_conn *conn;
6943         __u8 i = 0;
6944
6945         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6946
6947         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6948                                 flex_array_size(ev, bis_handle, ev->num_bis)))
6949                 return;
6950
6951         hci_dev_lock(hdev);
6952         rcu_read_lock();
6953
6954         /* Connect all BISes that are bound to the BIG */
6955         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6956                 if (bacmp(&conn->dst, BDADDR_ANY) ||
6957                     conn->type != ISO_LINK ||
6958                     conn->iso_qos.bcast.big != ev->handle)
6959                         continue;
6960
6961                 if (hci_conn_set_handle(conn,
6962                                         __le16_to_cpu(ev->bis_handle[i++])))
6963                         continue;
6964
6965                 if (!ev->status) {
6966                         conn->state = BT_CONNECTED;
6967                         set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6968                         rcu_read_unlock();
6969                         hci_debugfs_create_conn(conn);
6970                         hci_conn_add_sysfs(conn);
6971                         hci_iso_setup_path(conn);
6972                         rcu_read_lock();
6973                         continue;
6974                 }
6975
6976                 hci_connect_cfm(conn, ev->status);
6977                 rcu_read_unlock();
6978                 hci_conn_del(conn);
6979                 rcu_read_lock();
6980         }
6981
6982         rcu_read_unlock();
6983
6984         if (!ev->status && !i)
6985                 /* If no BISes have been connected for the BIG,
6986                  * terminate. This is in case all bound connections
6987                  * have been closed before the BIG creation
6988                  * has completed.
6989                  */
6990                 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6991                                    UINT_PTR(ev->handle), NULL);
6992
6993         hci_dev_unlock(hdev);
6994 }
6995
6996 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6997                                             struct sk_buff *skb)
6998 {
6999         struct hci_evt_le_big_sync_estabilished *ev = data;
7000         struct hci_conn *bis;
7001         int i;
7002
7003         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7004
7005         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7006                                 flex_array_size(ev, bis, ev->num_bis)))
7007                 return;
7008
7009         hci_dev_lock(hdev);
7010
7011         for (i = 0; i < ev->num_bis; i++) {
7012                 u16 handle = le16_to_cpu(ev->bis[i]);
7013                 __le32 interval;
7014
7015                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7016                 if (!bis) {
7017                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7018                                            HCI_ROLE_SLAVE, handle);
7019                         if (IS_ERR(bis))
7020                                 continue;
7021                 }
7022
7023                 if (ev->status != 0x42)
7024                         /* Mark PA sync as established */
7025                         set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7026
7027                 bis->iso_qos.bcast.big = ev->handle;
7028                 memset(&interval, 0, sizeof(interval));
7029                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7030                 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7031                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7032                 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7033                 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7034
7035                 if (!ev->status) {
7036                         set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7037                         hci_iso_setup_path(bis);
7038                 }
7039         }
7040
7041         /* In case BIG sync failed, notify each failed connection to
7042          * the user after all hci connections have been added
7043          */
7044         if (ev->status)
7045                 for (i = 0; i < ev->num_bis; i++) {
7046                         u16 handle = le16_to_cpu(ev->bis[i]);
7047
7048                         bis = hci_conn_hash_lookup_handle(hdev, handle);
7049                         if (!bis)
7050                                 continue;
7051
7052                         set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7053                         hci_connect_cfm(bis, ev->status);
7054                 }
7055
7056         hci_dev_unlock(hdev);
7057 }
7058
7059 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7060                                            struct sk_buff *skb)
7061 {
7062         struct hci_evt_le_big_info_adv_report *ev = data;
7063         int mask = hdev->link_mode;
7064         __u8 flags = 0;
7065         struct hci_conn *pa_sync;
7066
7067         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7068
7069         hci_dev_lock(hdev);
7070
7071         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7072         if (!(mask & HCI_LM_ACCEPT)) {
7073                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7074                 goto unlock;
7075         }
7076
7077         if (!(flags & HCI_PROTO_DEFER))
7078                 goto unlock;
7079
7080         pa_sync = hci_conn_hash_lookup_pa_sync_handle
7081                         (hdev,
7082                         le16_to_cpu(ev->sync_handle));
7083
7084         if (pa_sync)
7085                 goto unlock;
7086
7087         /* Add connection to indicate the PA sync event */
7088         pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7089                                      HCI_ROLE_SLAVE);
7090
7091         if (IS_ERR(pa_sync))
7092                 goto unlock;
7093
7094         pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7095         set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7096
7097         /* Notify iso layer */
7098         hci_connect_cfm(pa_sync, 0x00);
7099
7100         /* Notify MGMT layer */
7101         mgmt_device_connected(hdev, pa_sync, NULL, 0);
7102
7103 unlock:
7104         hci_dev_unlock(hdev);
7105 }
7106
7107 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7108 [_op] = { \
7109         .func = _func, \
7110         .min_len = _min_len, \
7111         .max_len = _max_len, \
7112 }
7113
7114 #define HCI_LE_EV(_op, _func, _len) \
7115         HCI_LE_EV_VL(_op, _func, _len, _len)
7116
7117 #define HCI_LE_EV_STATUS(_op, _func) \
7118         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7119
7120 /* Entries in this table shall have their position according to the subevent
7121  * opcode they handle so the use of the macros above is recommend since it does
7122  * attempt to initialize at its proper index using Designated Initializers that
7123  * way events without a callback function can be ommited.
7124  */
7125 static const struct hci_le_ev {
7126         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7127         u16  min_len;
7128         u16  max_len;
7129 } hci_le_ev_table[U8_MAX + 1] = {
7130         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7131         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7132                   sizeof(struct hci_ev_le_conn_complete)),
7133         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7134         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7135                      sizeof(struct hci_ev_le_advertising_report),
7136                      HCI_MAX_EVENT_SIZE),
7137         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7138         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7139                   hci_le_conn_update_complete_evt,
7140                   sizeof(struct hci_ev_le_conn_update_complete)),
7141         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7142         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7143                   hci_le_remote_feat_complete_evt,
7144                   sizeof(struct hci_ev_le_remote_feat_complete)),
7145         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7146         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7147                   sizeof(struct hci_ev_le_ltk_req)),
7148         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7149         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7150                   hci_le_remote_conn_param_req_evt,
7151                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7152         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7153         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7154                   hci_le_enh_conn_complete_evt,
7155                   sizeof(struct hci_ev_le_enh_conn_complete)),
7156         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7157         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7158                      sizeof(struct hci_ev_le_direct_adv_report),
7159                      HCI_MAX_EVENT_SIZE),
7160         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7161         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7162                   sizeof(struct hci_ev_le_phy_update_complete)),
7163         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7164         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7165                      sizeof(struct hci_ev_le_ext_adv_report),
7166                      HCI_MAX_EVENT_SIZE),
7167         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7168         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7169                   hci_le_pa_sync_estabilished_evt,
7170                   sizeof(struct hci_ev_le_pa_sync_established)),
7171         /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7172         HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7173                                  hci_le_per_adv_report_evt,
7174                                  sizeof(struct hci_ev_le_per_adv_report),
7175                                  HCI_MAX_EVENT_SIZE),
7176         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7177         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7178                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7179         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7180         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7181                   sizeof(struct hci_evt_le_cis_established)),
7182         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7183         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7184                   sizeof(struct hci_evt_le_cis_req)),
7185         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7186         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7187                      hci_le_create_big_complete_evt,
7188                      sizeof(struct hci_evt_le_create_big_complete),
7189                      HCI_MAX_EVENT_SIZE),
7190         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7191         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7192                      hci_le_big_sync_established_evt,
7193                      sizeof(struct hci_evt_le_big_sync_estabilished),
7194                      HCI_MAX_EVENT_SIZE),
7195         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7196         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7197                      hci_le_big_info_adv_report_evt,
7198                      sizeof(struct hci_evt_le_big_info_adv_report),
7199                      HCI_MAX_EVENT_SIZE),
7200 };
7201
7202 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7203                             struct sk_buff *skb, u16 *opcode, u8 *status,
7204                             hci_req_complete_t *req_complete,
7205                             hci_req_complete_skb_t *req_complete_skb)
7206 {
7207         struct hci_ev_le_meta *ev = data;
7208         const struct hci_le_ev *subev;
7209
7210         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7211
7212         /* Only match event if command OGF is for LE */
7213         if (hdev->req_skb &&
7214             hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7215             hci_skb_event(hdev->req_skb) == ev->subevent) {
7216                 *opcode = hci_skb_opcode(hdev->req_skb);
7217                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7218                                      req_complete_skb);
7219         }
7220
7221         subev = &hci_le_ev_table[ev->subevent];
7222         if (!subev->func)
7223                 return;
7224
7225         if (skb->len < subev->min_len) {
7226                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7227                            ev->subevent, skb->len, subev->min_len);
7228                 return;
7229         }
7230
7231         /* Just warn if the length is over max_len size it still be
7232          * possible to partially parse the event so leave to callback to
7233          * decide if that is acceptable.
7234          */
7235         if (skb->len > subev->max_len)
7236                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7237                             ev->subevent, skb->len, subev->max_len);
7238         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7239         if (!data)
7240                 return;
7241
7242         subev->func(hdev, data, skb);
7243 }
7244
7245 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7246                                  u8 event, struct sk_buff *skb)
7247 {
7248         struct hci_ev_cmd_complete *ev;
7249         struct hci_event_hdr *hdr;
7250
7251         if (!skb)
7252                 return false;
7253
7254         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7255         if (!hdr)
7256                 return false;
7257
7258         if (event) {
7259                 if (hdr->evt != event)
7260                         return false;
7261                 return true;
7262         }
7263
7264         /* Check if request ended in Command Status - no way to retrieve
7265          * any extra parameters in this case.
7266          */
7267         if (hdr->evt == HCI_EV_CMD_STATUS)
7268                 return false;
7269
7270         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7271                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7272                            hdr->evt);
7273                 return false;
7274         }
7275
7276         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7277         if (!ev)
7278                 return false;
7279
7280         if (opcode != __le16_to_cpu(ev->opcode)) {
7281                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7282                        __le16_to_cpu(ev->opcode));
7283                 return false;
7284         }
7285
7286         return true;
7287 }
7288
7289 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7290                                   struct sk_buff *skb)
7291 {
7292         struct hci_ev_le_advertising_info *adv;
7293         struct hci_ev_le_direct_adv_info *direct_adv;
7294         struct hci_ev_le_ext_adv_info *ext_adv;
7295         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7296         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7297
7298         hci_dev_lock(hdev);
7299
7300         /* If we are currently suspended and this is the first BT event seen,
7301          * save the wake reason associated with the event.
7302          */
7303         if (!hdev->suspended || hdev->wake_reason)
7304                 goto unlock;
7305
7306         /* Default to remote wake. Values for wake_reason are documented in the
7307          * Bluez mgmt api docs.
7308          */
7309         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7310
7311         /* Once configured for remote wakeup, we should only wake up for
7312          * reconnections. It's useful to see which device is waking us up so
7313          * keep track of the bdaddr of the connection event that woke us up.
7314          */
7315         if (event == HCI_EV_CONN_REQUEST) {
7316                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7317                 hdev->wake_addr_type = BDADDR_BREDR;
7318         } else if (event == HCI_EV_CONN_COMPLETE) {
7319                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7320                 hdev->wake_addr_type = BDADDR_BREDR;
7321         } else if (event == HCI_EV_LE_META) {
7322                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7323                 u8 subevent = le_ev->subevent;
7324                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7325                 u8 num_reports = *ptr;
7326
7327                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7328                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7329                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7330                     num_reports) {
7331                         adv = (void *)(ptr + 1);
7332                         direct_adv = (void *)(ptr + 1);
7333                         ext_adv = (void *)(ptr + 1);
7334
7335                         switch (subevent) {
7336                         case HCI_EV_LE_ADVERTISING_REPORT:
7337                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7338                                 hdev->wake_addr_type = adv->bdaddr_type;
7339                                 break;
7340                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7341                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7342                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7343                                 break;
7344                         case HCI_EV_LE_EXT_ADV_REPORT:
7345                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7346                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7347                                 break;
7348                         }
7349                 }
7350         } else {
7351                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7352         }
7353
7354 unlock:
7355         hci_dev_unlock(hdev);
7356 }
7357
7358 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7359 [_op] = { \
7360         .req = false, \
7361         .func = _func, \
7362         .min_len = _min_len, \
7363         .max_len = _max_len, \
7364 }
7365
7366 #define HCI_EV(_op, _func, _len) \
7367         HCI_EV_VL(_op, _func, _len, _len)
7368
7369 #define HCI_EV_STATUS(_op, _func) \
7370         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7371
7372 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7373 [_op] = { \
7374         .req = true, \
7375         .func_req = _func, \
7376         .min_len = _min_len, \
7377         .max_len = _max_len, \
7378 }
7379
7380 #define HCI_EV_REQ(_op, _func, _len) \
7381         HCI_EV_REQ_VL(_op, _func, _len, _len)
7382
7383 /* Entries in this table shall have their position according to the event opcode
7384  * they handle so the use of the macros above is recommend since it does attempt
7385  * to initialize at its proper index using Designated Initializers that way
7386  * events without a callback function don't have entered.
7387  */
7388 static const struct hci_ev {
7389         bool req;
7390         union {
7391                 void (*func)(struct hci_dev *hdev, void *data,
7392                              struct sk_buff *skb);
7393                 void (*func_req)(struct hci_dev *hdev, void *data,
7394                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7395                                  hci_req_complete_t *req_complete,
7396                                  hci_req_complete_skb_t *req_complete_skb);
7397         };
7398         u16  min_len;
7399         u16  max_len;
7400 } hci_ev_table[U8_MAX + 1] = {
7401         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7402         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7403         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7404         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7405                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7406         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7407         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7408                sizeof(struct hci_ev_conn_complete)),
7409         /* [0x04 = HCI_EV_CONN_REQUEST] */
7410         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7411                sizeof(struct hci_ev_conn_request)),
7412         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7413         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7414                sizeof(struct hci_ev_disconn_complete)),
7415         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7416         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7417                sizeof(struct hci_ev_auth_complete)),
7418         /* [0x07 = HCI_EV_REMOTE_NAME] */
7419         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7420                sizeof(struct hci_ev_remote_name)),
7421         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7422         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7423                sizeof(struct hci_ev_encrypt_change)),
7424         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7425         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7426                hci_change_link_key_complete_evt,
7427                sizeof(struct hci_ev_change_link_key_complete)),
7428         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7429         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7430                sizeof(struct hci_ev_remote_features)),
7431         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7432         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7433                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7434         /* [0x0f = HCI_EV_CMD_STATUS] */
7435         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7436                    sizeof(struct hci_ev_cmd_status)),
7437         /* [0x10 = HCI_EV_CMD_STATUS] */
7438         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7439                sizeof(struct hci_ev_hardware_error)),
7440         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7441         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7442                sizeof(struct hci_ev_role_change)),
7443         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7444         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7445                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7446         /* [0x14 = HCI_EV_MODE_CHANGE] */
7447         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7448                sizeof(struct hci_ev_mode_change)),
7449         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7450         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7451                sizeof(struct hci_ev_pin_code_req)),
7452         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7453         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7454                sizeof(struct hci_ev_link_key_req)),
7455         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7456         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7457                sizeof(struct hci_ev_link_key_notify)),
7458         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7459         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7460                sizeof(struct hci_ev_clock_offset)),
7461         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7462         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7463                sizeof(struct hci_ev_pkt_type_change)),
7464         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7465         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7466                sizeof(struct hci_ev_pscan_rep_mode)),
7467         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7468         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7469                   hci_inquiry_result_with_rssi_evt,
7470                   sizeof(struct hci_ev_inquiry_result_rssi),
7471                   HCI_MAX_EVENT_SIZE),
7472         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7473         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7474                sizeof(struct hci_ev_remote_ext_features)),
7475         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7476         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7477                sizeof(struct hci_ev_sync_conn_complete)),
7478         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7479         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7480                   hci_extended_inquiry_result_evt,
7481                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7482         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7483         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7484                sizeof(struct hci_ev_key_refresh_complete)),
7485         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7486         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7487                sizeof(struct hci_ev_io_capa_request)),
7488         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7489         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7490                sizeof(struct hci_ev_io_capa_reply)),
7491         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7492         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7493                sizeof(struct hci_ev_user_confirm_req)),
7494         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7495         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7496                sizeof(struct hci_ev_user_passkey_req)),
7497         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7498         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7499                sizeof(struct hci_ev_remote_oob_data_request)),
7500         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7501         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7502                sizeof(struct hci_ev_simple_pair_complete)),
7503         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7504         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7505                sizeof(struct hci_ev_user_passkey_notify)),
7506         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7507         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7508                sizeof(struct hci_ev_keypress_notify)),
7509         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7510         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7511                sizeof(struct hci_ev_remote_host_features)),
7512         /* [0x3e = HCI_EV_LE_META] */
7513         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7514                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7515         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7516         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7517                sizeof(struct hci_ev_num_comp_blocks)),
7518         /* [0xff = HCI_EV_VENDOR] */
7519         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7520 };
7521
7522 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7523                            u16 *opcode, u8 *status,
7524                            hci_req_complete_t *req_complete,
7525                            hci_req_complete_skb_t *req_complete_skb)
7526 {
7527         const struct hci_ev *ev = &hci_ev_table[event];
7528         void *data;
7529
7530         if (!ev->func)
7531                 return;
7532
7533         if (skb->len < ev->min_len) {
7534                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7535                            event, skb->len, ev->min_len);
7536                 return;
7537         }
7538
7539         /* Just warn if the length is over max_len size it still be
7540          * possible to partially parse the event so leave to callback to
7541          * decide if that is acceptable.
7542          */
7543         if (skb->len > ev->max_len)
7544                 bt_dev_warn_ratelimited(hdev,
7545                                         "unexpected event 0x%2.2x length: %u > %u",
7546                                         event, skb->len, ev->max_len);
7547
7548         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7549         if (!data)
7550                 return;
7551
7552         if (ev->req)
7553                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7554                              req_complete_skb);
7555         else
7556                 ev->func(hdev, data, skb);
7557 }
7558
7559 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7560 {
7561         struct hci_event_hdr *hdr = (void *) skb->data;
7562         hci_req_complete_t req_complete = NULL;
7563         hci_req_complete_skb_t req_complete_skb = NULL;
7564         struct sk_buff *orig_skb = NULL;
7565         u8 status = 0, event, req_evt = 0;
7566         u16 opcode = HCI_OP_NOP;
7567
7568         if (skb->len < sizeof(*hdr)) {
7569                 bt_dev_err(hdev, "Malformed HCI Event");
7570                 goto done;
7571         }
7572
7573         kfree_skb(hdev->recv_event);
7574         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7575
7576         event = hdr->evt;
7577         if (!event) {
7578                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7579                             event);
7580                 goto done;
7581         }
7582
7583         /* Only match event if command OGF is not for LE */
7584         if (hdev->req_skb &&
7585             hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7586             hci_skb_event(hdev->req_skb) == event) {
7587                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7588                                      status, &req_complete, &req_complete_skb);
7589                 req_evt = event;
7590         }
7591
7592         /* If it looks like we might end up having to call
7593          * req_complete_skb, store a pristine copy of the skb since the
7594          * various handlers may modify the original one through
7595          * skb_pull() calls, etc.
7596          */
7597         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7598             event == HCI_EV_CMD_COMPLETE)
7599                 orig_skb = skb_clone(skb, GFP_KERNEL);
7600
7601         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7602
7603         /* Store wake reason if we're suspended */
7604         hci_store_wake_reason(hdev, event, skb);
7605
7606         bt_dev_dbg(hdev, "event 0x%2.2x", event);
7607
7608         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7609                        &req_complete_skb);
7610
7611         if (req_complete) {
7612                 req_complete(hdev, status, opcode);
7613         } else if (req_complete_skb) {
7614                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7615                         kfree_skb(orig_skb);
7616                         orig_skb = NULL;
7617                 }
7618                 req_complete_skb(hdev, status, opcode, orig_skb);
7619         }
7620
7621 done:
7622         kfree_skb(orig_skb);
7623         kfree_skb(skb);
7624         hdev->stat.evt_rx++;
7625 }