GNU Linux-libre 5.19-rc6-gnu
[releases.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 #include "eir.h"
40
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42                  "\x00\x00\x00\x00\x00\x00\x00\x00"
43
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45
46 /* Handle HCI Event packets */
47
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
49                              u8 ev, size_t len)
50 {
51         void *data;
52
53         data = skb_pull_data(skb, len);
54         if (!data)
55                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
56
57         return data;
58 }
59
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
61                              u16 op, size_t len)
62 {
63         void *data;
64
65         data = skb_pull_data(skb, len);
66         if (!data)
67                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
68
69         return data;
70 }
71
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
73                                 u8 ev, size_t len)
74 {
75         void *data;
76
77         data = skb_pull_data(skb, len);
78         if (!data)
79                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
80
81         return data;
82 }
83
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
85                                 struct sk_buff *skb)
86 {
87         struct hci_ev_status *rp = data;
88
89         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
90
91         /* It is possible that we receive Inquiry Complete event right
92          * before we receive Inquiry Cancel Command Complete event, in
93          * which case the latter event should have status of Command
94          * Disallowed (0x0c). This should not be treated as error, since
95          * we actually achieve what Inquiry Cancel wants to achieve,
96          * which is to end the last Inquiry session.
97          */
98         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
100                 rp->status = 0x00;
101         }
102
103         if (rp->status)
104                 return rp->status;
105
106         clear_bit(HCI_INQUIRY, &hdev->flags);
107         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108         wake_up_bit(&hdev->flags, HCI_INQUIRY);
109
110         hci_dev_lock(hdev);
111         /* Set discovery state to stopped if we're not doing LE active
112          * scanning.
113          */
114         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115             hdev->le_scan_type != LE_SCAN_ACTIVE)
116                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117         hci_dev_unlock(hdev);
118
119         hci_conn_check_pending(hdev);
120
121         return rp->status;
122 }
123
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125                               struct sk_buff *skb)
126 {
127         struct hci_ev_status *rp = data;
128
129         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130
131         if (rp->status)
132                 return rp->status;
133
134         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135
136         return rp->status;
137 }
138
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140                                    struct sk_buff *skb)
141 {
142         struct hci_ev_status *rp = data;
143
144         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145
146         if (rp->status)
147                 return rp->status;
148
149         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150
151         hci_conn_check_pending(hdev);
152
153         return rp->status;
154 }
155
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
157                                         struct sk_buff *skb)
158 {
159         struct hci_ev_status *rp = data;
160
161         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
162
163         return rp->status;
164 }
165
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
167                                 struct sk_buff *skb)
168 {
169         struct hci_rp_role_discovery *rp = data;
170         struct hci_conn *conn;
171
172         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
173
174         if (rp->status)
175                 return rp->status;
176
177         hci_dev_lock(hdev);
178
179         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180         if (conn)
181                 conn->role = rp->role;
182
183         hci_dev_unlock(hdev);
184
185         return rp->status;
186 }
187
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
189                                   struct sk_buff *skb)
190 {
191         struct hci_rp_read_link_policy *rp = data;
192         struct hci_conn *conn;
193
194         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
195
196         if (rp->status)
197                 return rp->status;
198
199         hci_dev_lock(hdev);
200
201         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
202         if (conn)
203                 conn->link_policy = __le16_to_cpu(rp->policy);
204
205         hci_dev_unlock(hdev);
206
207         return rp->status;
208 }
209
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
211                                    struct sk_buff *skb)
212 {
213         struct hci_rp_write_link_policy *rp = data;
214         struct hci_conn *conn;
215         void *sent;
216
217         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
218
219         if (rp->status)
220                 return rp->status;
221
222         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
223         if (!sent)
224                 return rp->status;
225
226         hci_dev_lock(hdev);
227
228         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
229         if (conn)
230                 conn->link_policy = get_unaligned_le16(sent + 2);
231
232         hci_dev_unlock(hdev);
233
234         return rp->status;
235 }
236
237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
238                                       struct sk_buff *skb)
239 {
240         struct hci_rp_read_def_link_policy *rp = data;
241
242         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
243
244         if (rp->status)
245                 return rp->status;
246
247         hdev->link_policy = __le16_to_cpu(rp->policy);
248
249         return rp->status;
250 }
251
252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
253                                        struct sk_buff *skb)
254 {
255         struct hci_ev_status *rp = data;
256         void *sent;
257
258         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
259
260         if (rp->status)
261                 return rp->status;
262
263         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
264         if (!sent)
265                 return rp->status;
266
267         hdev->link_policy = get_unaligned_le16(sent);
268
269         return rp->status;
270 }
271
272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
273 {
274         struct hci_ev_status *rp = data;
275
276         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
277
278         clear_bit(HCI_RESET, &hdev->flags);
279
280         if (rp->status)
281                 return rp->status;
282
283         /* Reset all non-persistent flags */
284         hci_dev_clear_volatile_flags(hdev);
285
286         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
287
288         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
289         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
290
291         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
292         hdev->adv_data_len = 0;
293
294         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
295         hdev->scan_rsp_data_len = 0;
296
297         hdev->le_scan_type = LE_SCAN_PASSIVE;
298
299         hdev->ssp_debug_mode = 0;
300
301         hci_bdaddr_list_clear(&hdev->le_accept_list);
302         hci_bdaddr_list_clear(&hdev->le_resolv_list);
303
304         return rp->status;
305 }
306
307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
308                                       struct sk_buff *skb)
309 {
310         struct hci_rp_read_stored_link_key *rp = data;
311         struct hci_cp_read_stored_link_key *sent;
312
313         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
314
315         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
316         if (!sent)
317                 return rp->status;
318
319         if (!rp->status && sent->read_all == 0x01) {
320                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
321                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
322         }
323
324         return rp->status;
325 }
326
327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
328                                         struct sk_buff *skb)
329 {
330         struct hci_rp_delete_stored_link_key *rp = data;
331
332         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
333
334         if (rp->status)
335                 return rp->status;
336
337         if (rp->num_keys <= hdev->stored_num_keys)
338                 hdev->stored_num_keys -= le16_to_cpu(rp->num_keys);
339         else
340                 hdev->stored_num_keys = 0;
341
342         return rp->status;
343 }
344
345 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
346                                   struct sk_buff *skb)
347 {
348         struct hci_ev_status *rp = data;
349         void *sent;
350
351         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
352
353         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
354         if (!sent)
355                 return rp->status;
356
357         hci_dev_lock(hdev);
358
359         if (hci_dev_test_flag(hdev, HCI_MGMT))
360                 mgmt_set_local_name_complete(hdev, sent, rp->status);
361         else if (!rp->status)
362                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
363
364         hci_dev_unlock(hdev);
365
366         return rp->status;
367 }
368
369 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
370                                  struct sk_buff *skb)
371 {
372         struct hci_rp_read_local_name *rp = data;
373
374         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
375
376         if (rp->status)
377                 return rp->status;
378
379         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
380             hci_dev_test_flag(hdev, HCI_CONFIG))
381                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
382
383         return rp->status;
384 }
385
386 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
387                                    struct sk_buff *skb)
388 {
389         struct hci_ev_status *rp = data;
390         void *sent;
391
392         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
393
394         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
395         if (!sent)
396                 return rp->status;
397
398         hci_dev_lock(hdev);
399
400         if (!rp->status) {
401                 __u8 param = *((__u8 *) sent);
402
403                 if (param == AUTH_ENABLED)
404                         set_bit(HCI_AUTH, &hdev->flags);
405                 else
406                         clear_bit(HCI_AUTH, &hdev->flags);
407         }
408
409         if (hci_dev_test_flag(hdev, HCI_MGMT))
410                 mgmt_auth_enable_complete(hdev, rp->status);
411
412         hci_dev_unlock(hdev);
413
414         return rp->status;
415 }
416
417 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
418                                     struct sk_buff *skb)
419 {
420         struct hci_ev_status *rp = data;
421         __u8 param;
422         void *sent;
423
424         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
425
426         if (rp->status)
427                 return rp->status;
428
429         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
430         if (!sent)
431                 return rp->status;
432
433         param = *((__u8 *) sent);
434
435         if (param)
436                 set_bit(HCI_ENCRYPT, &hdev->flags);
437         else
438                 clear_bit(HCI_ENCRYPT, &hdev->flags);
439
440         return rp->status;
441 }
442
443 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
444                                    struct sk_buff *skb)
445 {
446         struct hci_ev_status *rp = data;
447         __u8 param;
448         void *sent;
449
450         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
451
452         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
453         if (!sent)
454                 return rp->status;
455
456         param = *((__u8 *) sent);
457
458         hci_dev_lock(hdev);
459
460         if (rp->status) {
461                 hdev->discov_timeout = 0;
462                 goto done;
463         }
464
465         if (param & SCAN_INQUIRY)
466                 set_bit(HCI_ISCAN, &hdev->flags);
467         else
468                 clear_bit(HCI_ISCAN, &hdev->flags);
469
470         if (param & SCAN_PAGE)
471                 set_bit(HCI_PSCAN, &hdev->flags);
472         else
473                 clear_bit(HCI_PSCAN, &hdev->flags);
474
475 done:
476         hci_dev_unlock(hdev);
477
478         return rp->status;
479 }
480
481 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
482                                   struct sk_buff *skb)
483 {
484         struct hci_ev_status *rp = data;
485         struct hci_cp_set_event_filter *cp;
486         void *sent;
487
488         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
489
490         if (rp->status)
491                 return rp->status;
492
493         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
494         if (!sent)
495                 return rp->status;
496
497         cp = (struct hci_cp_set_event_filter *)sent;
498
499         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
500                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
501         else
502                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
503
504         return rp->status;
505 }
506
507 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
508                                    struct sk_buff *skb)
509 {
510         struct hci_rp_read_class_of_dev *rp = data;
511
512         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
513
514         if (rp->status)
515                 return rp->status;
516
517         memcpy(hdev->dev_class, rp->dev_class, 3);
518
519         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
520                    hdev->dev_class[1], hdev->dev_class[0]);
521
522         return rp->status;
523 }
524
525 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
526                                     struct sk_buff *skb)
527 {
528         struct hci_ev_status *rp = data;
529         void *sent;
530
531         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
532
533         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
534         if (!sent)
535                 return rp->status;
536
537         hci_dev_lock(hdev);
538
539         if (!rp->status)
540                 memcpy(hdev->dev_class, sent, 3);
541
542         if (hci_dev_test_flag(hdev, HCI_MGMT))
543                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
544
545         hci_dev_unlock(hdev);
546
547         return rp->status;
548 }
549
550 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
551                                     struct sk_buff *skb)
552 {
553         struct hci_rp_read_voice_setting *rp = data;
554         __u16 setting;
555
556         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
557
558         if (rp->status)
559                 return rp->status;
560
561         setting = __le16_to_cpu(rp->voice_setting);
562
563         if (hdev->voice_setting == setting)
564                 return rp->status;
565
566         hdev->voice_setting = setting;
567
568         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
569
570         if (hdev->notify)
571                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
572
573         return rp->status;
574 }
575
576 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
577                                      struct sk_buff *skb)
578 {
579         struct hci_ev_status *rp = data;
580         __u16 setting;
581         void *sent;
582
583         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
584
585         if (rp->status)
586                 return rp->status;
587
588         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
589         if (!sent)
590                 return rp->status;
591
592         setting = get_unaligned_le16(sent);
593
594         if (hdev->voice_setting == setting)
595                 return rp->status;
596
597         hdev->voice_setting = setting;
598
599         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
600
601         if (hdev->notify)
602                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
603
604         return rp->status;
605 }
606
607 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
608                                         struct sk_buff *skb)
609 {
610         struct hci_rp_read_num_supported_iac *rp = data;
611
612         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
613
614         if (rp->status)
615                 return rp->status;
616
617         hdev->num_iac = rp->num_iac;
618
619         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
620
621         return rp->status;
622 }
623
624 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
625                                 struct sk_buff *skb)
626 {
627         struct hci_ev_status *rp = data;
628         struct hci_cp_write_ssp_mode *sent;
629
630         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
631
632         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
633         if (!sent)
634                 return rp->status;
635
636         hci_dev_lock(hdev);
637
638         if (!rp->status) {
639                 if (sent->mode)
640                         hdev->features[1][0] |= LMP_HOST_SSP;
641                 else
642                         hdev->features[1][0] &= ~LMP_HOST_SSP;
643         }
644
645         if (!rp->status) {
646                 if (sent->mode)
647                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
648                 else
649                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
650         }
651
652         hci_dev_unlock(hdev);
653
654         return rp->status;
655 }
656
657 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
658                                   struct sk_buff *skb)
659 {
660         struct hci_ev_status *rp = data;
661         struct hci_cp_write_sc_support *sent;
662
663         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
664
665         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
666         if (!sent)
667                 return rp->status;
668
669         hci_dev_lock(hdev);
670
671         if (!rp->status) {
672                 if (sent->support)
673                         hdev->features[1][0] |= LMP_HOST_SC;
674                 else
675                         hdev->features[1][0] &= ~LMP_HOST_SC;
676         }
677
678         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
679                 if (sent->support)
680                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
681                 else
682                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
683         }
684
685         hci_dev_unlock(hdev);
686
687         return rp->status;
688 }
689
690 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
691                                     struct sk_buff *skb)
692 {
693         struct hci_rp_read_local_version *rp = data;
694
695         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
696
697         if (rp->status)
698                 return rp->status;
699
700         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
701             hci_dev_test_flag(hdev, HCI_CONFIG)) {
702                 hdev->hci_ver = rp->hci_ver;
703                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
704                 hdev->lmp_ver = rp->lmp_ver;
705                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
706                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
707         }
708
709         return rp->status;
710 }
711
712 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
713                                      struct sk_buff *skb)
714 {
715         struct hci_rp_read_local_commands *rp = data;
716
717         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
718
719         if (rp->status)
720                 return rp->status;
721
722         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
723             hci_dev_test_flag(hdev, HCI_CONFIG))
724                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
725
726         return rp->status;
727 }
728
729 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
730                                            struct sk_buff *skb)
731 {
732         struct hci_rp_read_auth_payload_to *rp = data;
733         struct hci_conn *conn;
734
735         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
736
737         if (rp->status)
738                 return rp->status;
739
740         hci_dev_lock(hdev);
741
742         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
743         if (conn)
744                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
745
746         hci_dev_unlock(hdev);
747
748         return rp->status;
749 }
750
751 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
752                                             struct sk_buff *skb)
753 {
754         struct hci_rp_write_auth_payload_to *rp = data;
755         struct hci_conn *conn;
756         void *sent;
757
758         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
759
760         if (rp->status)
761                 return rp->status;
762
763         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
764         if (!sent)
765                 return rp->status;
766
767         hci_dev_lock(hdev);
768
769         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
770         if (conn)
771                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
772
773         hci_dev_unlock(hdev);
774
775         return rp->status;
776 }
777
778 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
779                                      struct sk_buff *skb)
780 {
781         struct hci_rp_read_local_features *rp = data;
782
783         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
784
785         if (rp->status)
786                 return rp->status;
787
788         memcpy(hdev->features, rp->features, 8);
789
790         /* Adjust default settings according to features
791          * supported by device. */
792
793         if (hdev->features[0][0] & LMP_3SLOT)
794                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
795
796         if (hdev->features[0][0] & LMP_5SLOT)
797                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
798
799         if (hdev->features[0][1] & LMP_HV2) {
800                 hdev->pkt_type  |= (HCI_HV2);
801                 hdev->esco_type |= (ESCO_HV2);
802         }
803
804         if (hdev->features[0][1] & LMP_HV3) {
805                 hdev->pkt_type  |= (HCI_HV3);
806                 hdev->esco_type |= (ESCO_HV3);
807         }
808
809         if (lmp_esco_capable(hdev))
810                 hdev->esco_type |= (ESCO_EV3);
811
812         if (hdev->features[0][4] & LMP_EV4)
813                 hdev->esco_type |= (ESCO_EV4);
814
815         if (hdev->features[0][4] & LMP_EV5)
816                 hdev->esco_type |= (ESCO_EV5);
817
818         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
819                 hdev->esco_type |= (ESCO_2EV3);
820
821         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
822                 hdev->esco_type |= (ESCO_3EV3);
823
824         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
825                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
826
827         return rp->status;
828 }
829
830 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
831                                          struct sk_buff *skb)
832 {
833         struct hci_rp_read_local_ext_features *rp = data;
834
835         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
836
837         if (rp->status)
838                 return rp->status;
839
840         if (hdev->max_page < rp->max_page)
841                 hdev->max_page = rp->max_page;
842
843         if (rp->page < HCI_MAX_PAGES)
844                 memcpy(hdev->features[rp->page], rp->features, 8);
845
846         return rp->status;
847 }
848
849 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
850                                         struct sk_buff *skb)
851 {
852         struct hci_rp_read_flow_control_mode *rp = data;
853
854         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
855
856         if (rp->status)
857                 return rp->status;
858
859         hdev->flow_ctl_mode = rp->mode;
860
861         return rp->status;
862 }
863
864 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
865                                   struct sk_buff *skb)
866 {
867         struct hci_rp_read_buffer_size *rp = data;
868
869         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
870
871         if (rp->status)
872                 return rp->status;
873
874         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
875         hdev->sco_mtu  = rp->sco_mtu;
876         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
877         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
878
879         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
880                 hdev->sco_mtu  = 64;
881                 hdev->sco_pkts = 8;
882         }
883
884         hdev->acl_cnt = hdev->acl_pkts;
885         hdev->sco_cnt = hdev->sco_pkts;
886
887         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
888                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
889
890         return rp->status;
891 }
892
893 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
894                               struct sk_buff *skb)
895 {
896         struct hci_rp_read_bd_addr *rp = data;
897
898         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
899
900         if (rp->status)
901                 return rp->status;
902
903         if (test_bit(HCI_INIT, &hdev->flags))
904                 bacpy(&hdev->bdaddr, &rp->bdaddr);
905
906         if (hci_dev_test_flag(hdev, HCI_SETUP))
907                 bacpy(&hdev->setup_addr, &rp->bdaddr);
908
909         return rp->status;
910 }
911
912 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
913                                          struct sk_buff *skb)
914 {
915         struct hci_rp_read_local_pairing_opts *rp = data;
916
917         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
918
919         if (rp->status)
920                 return rp->status;
921
922         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
923             hci_dev_test_flag(hdev, HCI_CONFIG)) {
924                 hdev->pairing_opts = rp->pairing_opts;
925                 hdev->max_enc_key_size = rp->max_key_size;
926         }
927
928         return rp->status;
929 }
930
931 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
932                                          struct sk_buff *skb)
933 {
934         struct hci_rp_read_page_scan_activity *rp = data;
935
936         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
937
938         if (rp->status)
939                 return rp->status;
940
941         if (test_bit(HCI_INIT, &hdev->flags)) {
942                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
943                 hdev->page_scan_window = __le16_to_cpu(rp->window);
944         }
945
946         return rp->status;
947 }
948
949 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
950                                           struct sk_buff *skb)
951 {
952         struct hci_ev_status *rp = data;
953         struct hci_cp_write_page_scan_activity *sent;
954
955         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
956
957         if (rp->status)
958                 return rp->status;
959
960         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
961         if (!sent)
962                 return rp->status;
963
964         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
965         hdev->page_scan_window = __le16_to_cpu(sent->window);
966
967         return rp->status;
968 }
969
970 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
971                                      struct sk_buff *skb)
972 {
973         struct hci_rp_read_page_scan_type *rp = data;
974
975         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
976
977         if (rp->status)
978                 return rp->status;
979
980         if (test_bit(HCI_INIT, &hdev->flags))
981                 hdev->page_scan_type = rp->type;
982
983         return rp->status;
984 }
985
986 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
987                                       struct sk_buff *skb)
988 {
989         struct hci_ev_status *rp = data;
990         u8 *type;
991
992         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
993
994         if (rp->status)
995                 return rp->status;
996
997         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
998         if (type)
999                 hdev->page_scan_type = *type;
1000
1001         return rp->status;
1002 }
1003
1004 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1005                                       struct sk_buff *skb)
1006 {
1007         struct hci_rp_read_data_block_size *rp = data;
1008
1009         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1010
1011         if (rp->status)
1012                 return rp->status;
1013
1014         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1015         hdev->block_len = __le16_to_cpu(rp->block_len);
1016         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1017
1018         hdev->block_cnt = hdev->num_blocks;
1019
1020         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1021                hdev->block_cnt, hdev->block_len);
1022
1023         return rp->status;
1024 }
1025
1026 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1027                             struct sk_buff *skb)
1028 {
1029         struct hci_rp_read_clock *rp = data;
1030         struct hci_cp_read_clock *cp;
1031         struct hci_conn *conn;
1032
1033         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1034
1035         if (rp->status)
1036                 return rp->status;
1037
1038         hci_dev_lock(hdev);
1039
1040         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1041         if (!cp)
1042                 goto unlock;
1043
1044         if (cp->which == 0x00) {
1045                 hdev->clock = le32_to_cpu(rp->clock);
1046                 goto unlock;
1047         }
1048
1049         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1050         if (conn) {
1051                 conn->clock = le32_to_cpu(rp->clock);
1052                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1053         }
1054
1055 unlock:
1056         hci_dev_unlock(hdev);
1057         return rp->status;
1058 }
1059
1060 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1061                                      struct sk_buff *skb)
1062 {
1063         struct hci_rp_read_local_amp_info *rp = data;
1064
1065         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1066
1067         if (rp->status)
1068                 return rp->status;
1069
1070         hdev->amp_status = rp->amp_status;
1071         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1072         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1073         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1074         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1075         hdev->amp_type = rp->amp_type;
1076         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1077         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1078         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1079         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1080
1081         return rp->status;
1082 }
1083
1084 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1085                                        struct sk_buff *skb)
1086 {
1087         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1088
1089         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1090
1091         if (rp->status)
1092                 return rp->status;
1093
1094         hdev->inq_tx_power = rp->tx_power;
1095
1096         return rp->status;
1097 }
1098
1099 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1100                                              struct sk_buff *skb)
1101 {
1102         struct hci_rp_read_def_err_data_reporting *rp = data;
1103
1104         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1105
1106         if (rp->status)
1107                 return rp->status;
1108
1109         hdev->err_data_reporting = rp->err_data_reporting;
1110
1111         return rp->status;
1112 }
1113
1114 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1115                                               struct sk_buff *skb)
1116 {
1117         struct hci_ev_status *rp = data;
1118         struct hci_cp_write_def_err_data_reporting *cp;
1119
1120         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1121
1122         if (rp->status)
1123                 return rp->status;
1124
1125         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1126         if (!cp)
1127                 return rp->status;
1128
1129         hdev->err_data_reporting = cp->err_data_reporting;
1130
1131         return rp->status;
1132 }
1133
1134 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1135                                 struct sk_buff *skb)
1136 {
1137         struct hci_rp_pin_code_reply *rp = data;
1138         struct hci_cp_pin_code_reply *cp;
1139         struct hci_conn *conn;
1140
1141         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1142
1143         hci_dev_lock(hdev);
1144
1145         if (hci_dev_test_flag(hdev, HCI_MGMT))
1146                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1147
1148         if (rp->status)
1149                 goto unlock;
1150
1151         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1152         if (!cp)
1153                 goto unlock;
1154
1155         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1156         if (conn)
1157                 conn->pin_length = cp->pin_len;
1158
1159 unlock:
1160         hci_dev_unlock(hdev);
1161         return rp->status;
1162 }
1163
1164 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1165                                     struct sk_buff *skb)
1166 {
1167         struct hci_rp_pin_code_neg_reply *rp = data;
1168
1169         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1170
1171         hci_dev_lock(hdev);
1172
1173         if (hci_dev_test_flag(hdev, HCI_MGMT))
1174                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1175                                                  rp->status);
1176
1177         hci_dev_unlock(hdev);
1178
1179         return rp->status;
1180 }
1181
1182 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1183                                      struct sk_buff *skb)
1184 {
1185         struct hci_rp_le_read_buffer_size *rp = data;
1186
1187         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1188
1189         if (rp->status)
1190                 return rp->status;
1191
1192         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1193         hdev->le_pkts = rp->le_max_pkt;
1194
1195         hdev->le_cnt = hdev->le_pkts;
1196
1197         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1198
1199         return rp->status;
1200 }
1201
1202 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1203                                         struct sk_buff *skb)
1204 {
1205         struct hci_rp_le_read_local_features *rp = data;
1206
1207         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1208
1209         if (rp->status)
1210                 return rp->status;
1211
1212         memcpy(hdev->le_features, rp->features, 8);
1213
1214         return rp->status;
1215 }
1216
1217 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1218                                       struct sk_buff *skb)
1219 {
1220         struct hci_rp_le_read_adv_tx_power *rp = data;
1221
1222         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1223
1224         if (rp->status)
1225                 return rp->status;
1226
1227         hdev->adv_tx_power = rp->tx_power;
1228
1229         return rp->status;
1230 }
1231
1232 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1233                                     struct sk_buff *skb)
1234 {
1235         struct hci_rp_user_confirm_reply *rp = data;
1236
1237         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1238
1239         hci_dev_lock(hdev);
1240
1241         if (hci_dev_test_flag(hdev, HCI_MGMT))
1242                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1243                                                  rp->status);
1244
1245         hci_dev_unlock(hdev);
1246
1247         return rp->status;
1248 }
1249
1250 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1251                                         struct sk_buff *skb)
1252 {
1253         struct hci_rp_user_confirm_reply *rp = data;
1254
1255         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1256
1257         hci_dev_lock(hdev);
1258
1259         if (hci_dev_test_flag(hdev, HCI_MGMT))
1260                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1261                                                      ACL_LINK, 0, rp->status);
1262
1263         hci_dev_unlock(hdev);
1264
1265         return rp->status;
1266 }
1267
1268 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1269                                     struct sk_buff *skb)
1270 {
1271         struct hci_rp_user_confirm_reply *rp = data;
1272
1273         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1274
1275         hci_dev_lock(hdev);
1276
1277         if (hci_dev_test_flag(hdev, HCI_MGMT))
1278                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1279                                                  0, rp->status);
1280
1281         hci_dev_unlock(hdev);
1282
1283         return rp->status;
1284 }
1285
1286 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1287                                         struct sk_buff *skb)
1288 {
1289         struct hci_rp_user_confirm_reply *rp = data;
1290
1291         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1292
1293         hci_dev_lock(hdev);
1294
1295         if (hci_dev_test_flag(hdev, HCI_MGMT))
1296                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1297                                                      ACL_LINK, 0, rp->status);
1298
1299         hci_dev_unlock(hdev);
1300
1301         return rp->status;
1302 }
1303
1304 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1305                                      struct sk_buff *skb)
1306 {
1307         struct hci_rp_read_local_oob_data *rp = data;
1308
1309         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1310
1311         return rp->status;
1312 }
1313
1314 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1315                                          struct sk_buff *skb)
1316 {
1317         struct hci_rp_read_local_oob_ext_data *rp = data;
1318
1319         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1320
1321         return rp->status;
1322 }
1323
1324 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1325                                     struct sk_buff *skb)
1326 {
1327         struct hci_ev_status *rp = data;
1328         bdaddr_t *sent;
1329
1330         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1331
1332         if (rp->status)
1333                 return rp->status;
1334
1335         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1336         if (!sent)
1337                 return rp->status;
1338
1339         hci_dev_lock(hdev);
1340
1341         bacpy(&hdev->random_addr, sent);
1342
1343         if (!bacmp(&hdev->rpa, sent)) {
1344                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1345                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1346                                    secs_to_jiffies(hdev->rpa_timeout));
1347         }
1348
1349         hci_dev_unlock(hdev);
1350
1351         return rp->status;
1352 }
1353
1354 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1355                                     struct sk_buff *skb)
1356 {
1357         struct hci_ev_status *rp = data;
1358         struct hci_cp_le_set_default_phy *cp;
1359
1360         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1361
1362         if (rp->status)
1363                 return rp->status;
1364
1365         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1366         if (!cp)
1367                 return rp->status;
1368
1369         hci_dev_lock(hdev);
1370
1371         hdev->le_tx_def_phys = cp->tx_phys;
1372         hdev->le_rx_def_phys = cp->rx_phys;
1373
1374         hci_dev_unlock(hdev);
1375
1376         return rp->status;
1377 }
1378
1379 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1380                                             struct sk_buff *skb)
1381 {
1382         struct hci_ev_status *rp = data;
1383         struct hci_cp_le_set_adv_set_rand_addr *cp;
1384         struct adv_info *adv;
1385
1386         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1387
1388         if (rp->status)
1389                 return rp->status;
1390
1391         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1392         /* Update only in case the adv instance since handle 0x00 shall be using
1393          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1394          * non-extended adverting.
1395          */
1396         if (!cp || !cp->handle)
1397                 return rp->status;
1398
1399         hci_dev_lock(hdev);
1400
1401         adv = hci_find_adv_instance(hdev, cp->handle);
1402         if (adv) {
1403                 bacpy(&adv->random_addr, &cp->bdaddr);
1404                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1405                         adv->rpa_expired = false;
1406                         queue_delayed_work(hdev->workqueue,
1407                                            &adv->rpa_expired_cb,
1408                                            secs_to_jiffies(hdev->rpa_timeout));
1409                 }
1410         }
1411
1412         hci_dev_unlock(hdev);
1413
1414         return rp->status;
1415 }
1416
1417 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1418                                    struct sk_buff *skb)
1419 {
1420         struct hci_ev_status *rp = data;
1421         u8 *instance;
1422         int err;
1423
1424         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1425
1426         if (rp->status)
1427                 return rp->status;
1428
1429         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1430         if (!instance)
1431                 return rp->status;
1432
1433         hci_dev_lock(hdev);
1434
1435         err = hci_remove_adv_instance(hdev, *instance);
1436         if (!err)
1437                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1438                                          *instance);
1439
1440         hci_dev_unlock(hdev);
1441
1442         return rp->status;
1443 }
1444
1445 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1446                                    struct sk_buff *skb)
1447 {
1448         struct hci_ev_status *rp = data;
1449         struct adv_info *adv, *n;
1450         int err;
1451
1452         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1453
1454         if (rp->status)
1455                 return rp->status;
1456
1457         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1458                 return rp->status;
1459
1460         hci_dev_lock(hdev);
1461
1462         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1463                 u8 instance = adv->instance;
1464
1465                 err = hci_remove_adv_instance(hdev, instance);
1466                 if (!err)
1467                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1468                                                  hdev, instance);
1469         }
1470
1471         hci_dev_unlock(hdev);
1472
1473         return rp->status;
1474 }
1475
1476 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1477                                         struct sk_buff *skb)
1478 {
1479         struct hci_rp_le_read_transmit_power *rp = data;
1480
1481         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1482
1483         if (rp->status)
1484                 return rp->status;
1485
1486         hdev->min_le_tx_power = rp->min_le_tx_power;
1487         hdev->max_le_tx_power = rp->max_le_tx_power;
1488
1489         return rp->status;
1490 }
1491
1492 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1493                                      struct sk_buff *skb)
1494 {
1495         struct hci_ev_status *rp = data;
1496         struct hci_cp_le_set_privacy_mode *cp;
1497         struct hci_conn_params *params;
1498
1499         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1500
1501         if (rp->status)
1502                 return rp->status;
1503
1504         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1505         if (!cp)
1506                 return rp->status;
1507
1508         hci_dev_lock(hdev);
1509
1510         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1511         if (params)
1512                 params->privacy_mode = cp->mode;
1513
1514         hci_dev_unlock(hdev);
1515
1516         return rp->status;
1517 }
1518
1519 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1520                                    struct sk_buff *skb)
1521 {
1522         struct hci_ev_status *rp = data;
1523         __u8 *sent;
1524
1525         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1526
1527         if (rp->status)
1528                 return rp->status;
1529
1530         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1531         if (!sent)
1532                 return rp->status;
1533
1534         hci_dev_lock(hdev);
1535
1536         /* If we're doing connection initiation as peripheral. Set a
1537          * timeout in case something goes wrong.
1538          */
1539         if (*sent) {
1540                 struct hci_conn *conn;
1541
1542                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1543
1544                 conn = hci_lookup_le_connect(hdev);
1545                 if (conn)
1546                         queue_delayed_work(hdev->workqueue,
1547                                            &conn->le_conn_timeout,
1548                                            conn->conn_timeout);
1549         } else {
1550                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1551         }
1552
1553         hci_dev_unlock(hdev);
1554
1555         return rp->status;
1556 }
1557
1558 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1559                                        struct sk_buff *skb)
1560 {
1561         struct hci_cp_le_set_ext_adv_enable *cp;
1562         struct hci_cp_ext_adv_set *set;
1563         struct adv_info *adv = NULL, *n;
1564         struct hci_ev_status *rp = data;
1565
1566         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1567
1568         if (rp->status)
1569                 return rp->status;
1570
1571         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1572         if (!cp)
1573                 return rp->status;
1574
1575         set = (void *)cp->data;
1576
1577         hci_dev_lock(hdev);
1578
1579         if (cp->num_of_sets)
1580                 adv = hci_find_adv_instance(hdev, set->handle);
1581
1582         if (cp->enable) {
1583                 struct hci_conn *conn;
1584
1585                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1586
1587                 if (adv)
1588                         adv->enabled = true;
1589
1590                 conn = hci_lookup_le_connect(hdev);
1591                 if (conn)
1592                         queue_delayed_work(hdev->workqueue,
1593                                            &conn->le_conn_timeout,
1594                                            conn->conn_timeout);
1595         } else {
1596                 if (cp->num_of_sets) {
1597                         if (adv)
1598                                 adv->enabled = false;
1599
1600                         /* If just one instance was disabled check if there are
1601                          * any other instance enabled before clearing HCI_LE_ADV
1602                          */
1603                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1604                                                  list) {
1605                                 if (adv->enabled)
1606                                         goto unlock;
1607                         }
1608                 } else {
1609                         /* All instances shall be considered disabled */
1610                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1611                                                  list)
1612                                 adv->enabled = false;
1613                 }
1614
1615                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1616         }
1617
1618 unlock:
1619         hci_dev_unlock(hdev);
1620         return rp->status;
1621 }
1622
1623 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1624                                    struct sk_buff *skb)
1625 {
1626         struct hci_cp_le_set_scan_param *cp;
1627         struct hci_ev_status *rp = data;
1628
1629         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1630
1631         if (rp->status)
1632                 return rp->status;
1633
1634         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1635         if (!cp)
1636                 return rp->status;
1637
1638         hci_dev_lock(hdev);
1639
1640         hdev->le_scan_type = cp->type;
1641
1642         hci_dev_unlock(hdev);
1643
1644         return rp->status;
1645 }
1646
1647 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1648                                        struct sk_buff *skb)
1649 {
1650         struct hci_cp_le_set_ext_scan_params *cp;
1651         struct hci_ev_status *rp = data;
1652         struct hci_cp_le_scan_phy_params *phy_param;
1653
1654         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1655
1656         if (rp->status)
1657                 return rp->status;
1658
1659         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1660         if (!cp)
1661                 return rp->status;
1662
1663         phy_param = (void *)cp->data;
1664
1665         hci_dev_lock(hdev);
1666
1667         hdev->le_scan_type = phy_param->type;
1668
1669         hci_dev_unlock(hdev);
1670
1671         return rp->status;
1672 }
1673
1674 static bool has_pending_adv_report(struct hci_dev *hdev)
1675 {
1676         struct discovery_state *d = &hdev->discovery;
1677
1678         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1679 }
1680
1681 static void clear_pending_adv_report(struct hci_dev *hdev)
1682 {
1683         struct discovery_state *d = &hdev->discovery;
1684
1685         bacpy(&d->last_adv_addr, BDADDR_ANY);
1686         d->last_adv_data_len = 0;
1687 }
1688
1689 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1690                                      u8 bdaddr_type, s8 rssi, u32 flags,
1691                                      u8 *data, u8 len)
1692 {
1693         struct discovery_state *d = &hdev->discovery;
1694
1695         if (len > HCI_MAX_AD_LENGTH)
1696                 return;
1697
1698         bacpy(&d->last_adv_addr, bdaddr);
1699         d->last_adv_addr_type = bdaddr_type;
1700         d->last_adv_rssi = rssi;
1701         d->last_adv_flags = flags;
1702         memcpy(d->last_adv_data, data, len);
1703         d->last_adv_data_len = len;
1704 }
1705
1706 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1707 {
1708         hci_dev_lock(hdev);
1709
1710         switch (enable) {
1711         case LE_SCAN_ENABLE:
1712                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1713                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1714                         clear_pending_adv_report(hdev);
1715                 break;
1716
1717         case LE_SCAN_DISABLE:
1718                 /* We do this here instead of when setting DISCOVERY_STOPPED
1719                  * since the latter would potentially require waiting for
1720                  * inquiry to stop too.
1721                  */
1722                 if (has_pending_adv_report(hdev)) {
1723                         struct discovery_state *d = &hdev->discovery;
1724
1725                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1726                                           d->last_adv_addr_type, NULL,
1727                                           d->last_adv_rssi, d->last_adv_flags,
1728                                           d->last_adv_data,
1729                                           d->last_adv_data_len, NULL, 0);
1730                 }
1731
1732                 /* Cancel this timer so that we don't try to disable scanning
1733                  * when it's already disabled.
1734                  */
1735                 cancel_delayed_work(&hdev->le_scan_disable);
1736
1737                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1738
1739                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1740                  * interrupted scanning due to a connect request. Mark
1741                  * therefore discovery as stopped.
1742                  */
1743                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1744                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1745
1746                 break;
1747
1748         default:
1749                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1750                            enable);
1751                 break;
1752         }
1753
1754         hci_dev_unlock(hdev);
1755 }
1756
1757 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1758                                     struct sk_buff *skb)
1759 {
1760         struct hci_cp_le_set_scan_enable *cp;
1761         struct hci_ev_status *rp = data;
1762
1763         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1764
1765         if (rp->status)
1766                 return rp->status;
1767
1768         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1769         if (!cp)
1770                 return rp->status;
1771
1772         le_set_scan_enable_complete(hdev, cp->enable);
1773
1774         return rp->status;
1775 }
1776
1777 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1778                                         struct sk_buff *skb)
1779 {
1780         struct hci_cp_le_set_ext_scan_enable *cp;
1781         struct hci_ev_status *rp = data;
1782
1783         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1784
1785         if (rp->status)
1786                 return rp->status;
1787
1788         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1789         if (!cp)
1790                 return rp->status;
1791
1792         le_set_scan_enable_complete(hdev, cp->enable);
1793
1794         return rp->status;
1795 }
1796
1797 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1798                                       struct sk_buff *skb)
1799 {
1800         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1801
1802         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1803                    rp->num_of_sets);
1804
1805         if (rp->status)
1806                 return rp->status;
1807
1808         hdev->le_num_of_adv_sets = rp->num_of_sets;
1809
1810         return rp->status;
1811 }
1812
1813 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1814                                           struct sk_buff *skb)
1815 {
1816         struct hci_rp_le_read_accept_list_size *rp = data;
1817
1818         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1819
1820         if (rp->status)
1821                 return rp->status;
1822
1823         hdev->le_accept_list_size = rp->size;
1824
1825         return rp->status;
1826 }
1827
1828 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1829                                       struct sk_buff *skb)
1830 {
1831         struct hci_ev_status *rp = data;
1832
1833         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1834
1835         if (rp->status)
1836                 return rp->status;
1837
1838         hci_dev_lock(hdev);
1839         hci_bdaddr_list_clear(&hdev->le_accept_list);
1840         hci_dev_unlock(hdev);
1841
1842         return rp->status;
1843 }
1844
1845 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1846                                        struct sk_buff *skb)
1847 {
1848         struct hci_cp_le_add_to_accept_list *sent;
1849         struct hci_ev_status *rp = data;
1850
1851         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1852
1853         if (rp->status)
1854                 return rp->status;
1855
1856         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1857         if (!sent)
1858                 return rp->status;
1859
1860         hci_dev_lock(hdev);
1861         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1862                             sent->bdaddr_type);
1863         hci_dev_unlock(hdev);
1864
1865         return rp->status;
1866 }
1867
1868 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1869                                          struct sk_buff *skb)
1870 {
1871         struct hci_cp_le_del_from_accept_list *sent;
1872         struct hci_ev_status *rp = data;
1873
1874         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1875
1876         if (rp->status)
1877                 return rp->status;
1878
1879         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1880         if (!sent)
1881                 return rp->status;
1882
1883         hci_dev_lock(hdev);
1884         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1885                             sent->bdaddr_type);
1886         hci_dev_unlock(hdev);
1887
1888         return rp->status;
1889 }
1890
1891 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1892                                           struct sk_buff *skb)
1893 {
1894         struct hci_rp_le_read_supported_states *rp = data;
1895
1896         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1897
1898         if (rp->status)
1899                 return rp->status;
1900
1901         memcpy(hdev->le_states, rp->le_states, 8);
1902
1903         return rp->status;
1904 }
1905
1906 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1907                                       struct sk_buff *skb)
1908 {
1909         struct hci_rp_le_read_def_data_len *rp = data;
1910
1911         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1912
1913         if (rp->status)
1914                 return rp->status;
1915
1916         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1917         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1918
1919         return rp->status;
1920 }
1921
1922 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1923                                        struct sk_buff *skb)
1924 {
1925         struct hci_cp_le_write_def_data_len *sent;
1926         struct hci_ev_status *rp = data;
1927
1928         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1929
1930         if (rp->status)
1931                 return rp->status;
1932
1933         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1934         if (!sent)
1935                 return rp->status;
1936
1937         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1938         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1939
1940         return rp->status;
1941 }
1942
1943 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1944                                        struct sk_buff *skb)
1945 {
1946         struct hci_cp_le_add_to_resolv_list *sent;
1947         struct hci_ev_status *rp = data;
1948
1949         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1950
1951         if (rp->status)
1952                 return rp->status;
1953
1954         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1955         if (!sent)
1956                 return rp->status;
1957
1958         hci_dev_lock(hdev);
1959         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1960                                 sent->bdaddr_type, sent->peer_irk,
1961                                 sent->local_irk);
1962         hci_dev_unlock(hdev);
1963
1964         return rp->status;
1965 }
1966
1967 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
1968                                          struct sk_buff *skb)
1969 {
1970         struct hci_cp_le_del_from_resolv_list *sent;
1971         struct hci_ev_status *rp = data;
1972
1973         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1974
1975         if (rp->status)
1976                 return rp->status;
1977
1978         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1979         if (!sent)
1980                 return rp->status;
1981
1982         hci_dev_lock(hdev);
1983         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1984                             sent->bdaddr_type);
1985         hci_dev_unlock(hdev);
1986
1987         return rp->status;
1988 }
1989
1990 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
1991                                       struct sk_buff *skb)
1992 {
1993         struct hci_ev_status *rp = data;
1994
1995         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1996
1997         if (rp->status)
1998                 return rp->status;
1999
2000         hci_dev_lock(hdev);
2001         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2002         hci_dev_unlock(hdev);
2003
2004         return rp->status;
2005 }
2006
2007 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2008                                           struct sk_buff *skb)
2009 {
2010         struct hci_rp_le_read_resolv_list_size *rp = data;
2011
2012         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2013
2014         if (rp->status)
2015                 return rp->status;
2016
2017         hdev->le_resolv_list_size = rp->size;
2018
2019         return rp->status;
2020 }
2021
2022 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2023                                                struct sk_buff *skb)
2024 {
2025         struct hci_ev_status *rp = data;
2026         __u8 *sent;
2027
2028         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2029
2030         if (rp->status)
2031                 return rp->status;
2032
2033         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2034         if (!sent)
2035                 return rp->status;
2036
2037         hci_dev_lock(hdev);
2038
2039         if (*sent)
2040                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2041         else
2042                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2043
2044         hci_dev_unlock(hdev);
2045
2046         return rp->status;
2047 }
2048
2049 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2050                                       struct sk_buff *skb)
2051 {
2052         struct hci_rp_le_read_max_data_len *rp = data;
2053
2054         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2055
2056         if (rp->status)
2057                 return rp->status;
2058
2059         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2060         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2061         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2062         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2063
2064         return rp->status;
2065 }
2066
2067 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2068                                          struct sk_buff *skb)
2069 {
2070         struct hci_cp_write_le_host_supported *sent;
2071         struct hci_ev_status *rp = data;
2072
2073         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2074
2075         if (rp->status)
2076                 return rp->status;
2077
2078         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2079         if (!sent)
2080                 return rp->status;
2081
2082         hci_dev_lock(hdev);
2083
2084         if (sent->le) {
2085                 hdev->features[1][0] |= LMP_HOST_LE;
2086                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2087         } else {
2088                 hdev->features[1][0] &= ~LMP_HOST_LE;
2089                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2090                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2091         }
2092
2093         if (sent->simul)
2094                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2095         else
2096                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2097
2098         hci_dev_unlock(hdev);
2099
2100         return rp->status;
2101 }
2102
2103 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2104                                struct sk_buff *skb)
2105 {
2106         struct hci_cp_le_set_adv_param *cp;
2107         struct hci_ev_status *rp = data;
2108
2109         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2110
2111         if (rp->status)
2112                 return rp->status;
2113
2114         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2115         if (!cp)
2116                 return rp->status;
2117
2118         hci_dev_lock(hdev);
2119         hdev->adv_addr_type = cp->own_address_type;
2120         hci_dev_unlock(hdev);
2121
2122         return rp->status;
2123 }
2124
2125 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2126                                    struct sk_buff *skb)
2127 {
2128         struct hci_rp_le_set_ext_adv_params *rp = data;
2129         struct hci_cp_le_set_ext_adv_params *cp;
2130         struct adv_info *adv_instance;
2131
2132         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2133
2134         if (rp->status)
2135                 return rp->status;
2136
2137         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2138         if (!cp)
2139                 return rp->status;
2140
2141         hci_dev_lock(hdev);
2142         hdev->adv_addr_type = cp->own_addr_type;
2143         if (!cp->handle) {
2144                 /* Store in hdev for instance 0 */
2145                 hdev->adv_tx_power = rp->tx_power;
2146         } else {
2147                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2148                 if (adv_instance)
2149                         adv_instance->tx_power = rp->tx_power;
2150         }
2151         /* Update adv data as tx power is known now */
2152         hci_req_update_adv_data(hdev, cp->handle);
2153
2154         hci_dev_unlock(hdev);
2155
2156         return rp->status;
2157 }
2158
2159 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2160                            struct sk_buff *skb)
2161 {
2162         struct hci_rp_read_rssi *rp = data;
2163         struct hci_conn *conn;
2164
2165         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2166
2167         if (rp->status)
2168                 return rp->status;
2169
2170         hci_dev_lock(hdev);
2171
2172         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2173         if (conn)
2174                 conn->rssi = rp->rssi;
2175
2176         hci_dev_unlock(hdev);
2177
2178         return rp->status;
2179 }
2180
2181 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2182                                struct sk_buff *skb)
2183 {
2184         struct hci_cp_read_tx_power *sent;
2185         struct hci_rp_read_tx_power *rp = data;
2186         struct hci_conn *conn;
2187
2188         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2189
2190         if (rp->status)
2191                 return rp->status;
2192
2193         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2194         if (!sent)
2195                 return rp->status;
2196
2197         hci_dev_lock(hdev);
2198
2199         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2200         if (!conn)
2201                 goto unlock;
2202
2203         switch (sent->type) {
2204         case 0x00:
2205                 conn->tx_power = rp->tx_power;
2206                 break;
2207         case 0x01:
2208                 conn->max_tx_power = rp->tx_power;
2209                 break;
2210         }
2211
2212 unlock:
2213         hci_dev_unlock(hdev);
2214         return rp->status;
2215 }
2216
2217 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2218                                       struct sk_buff *skb)
2219 {
2220         struct hci_ev_status *rp = data;
2221         u8 *mode;
2222
2223         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2224
2225         if (rp->status)
2226                 return rp->status;
2227
2228         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2229         if (mode)
2230                 hdev->ssp_debug_mode = *mode;
2231
2232         return rp->status;
2233 }
2234
2235 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2236 {
2237         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2238
2239         if (status) {
2240                 hci_conn_check_pending(hdev);
2241                 return;
2242         }
2243
2244         set_bit(HCI_INQUIRY, &hdev->flags);
2245 }
2246
2247 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2248 {
2249         struct hci_cp_create_conn *cp;
2250         struct hci_conn *conn;
2251
2252         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2253
2254         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2255         if (!cp)
2256                 return;
2257
2258         hci_dev_lock(hdev);
2259
2260         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2261
2262         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2263
2264         if (status) {
2265                 if (conn && conn->state == BT_CONNECT) {
2266                         if (status != 0x0c || conn->attempt > 2) {
2267                                 conn->state = BT_CLOSED;
2268                                 hci_connect_cfm(conn, status);
2269                                 hci_conn_del(conn);
2270                         } else
2271                                 conn->state = BT_CONNECT2;
2272                 }
2273         } else {
2274                 if (!conn) {
2275                         conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2276                                             HCI_ROLE_MASTER);
2277                         if (!conn)
2278                                 bt_dev_err(hdev, "no memory for new connection");
2279                 }
2280         }
2281
2282         hci_dev_unlock(hdev);
2283 }
2284
2285 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2286 {
2287         struct hci_cp_add_sco *cp;
2288         struct hci_conn *acl, *sco;
2289         __u16 handle;
2290
2291         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2292
2293         if (!status)
2294                 return;
2295
2296         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2297         if (!cp)
2298                 return;
2299
2300         handle = __le16_to_cpu(cp->handle);
2301
2302         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2303
2304         hci_dev_lock(hdev);
2305
2306         acl = hci_conn_hash_lookup_handle(hdev, handle);
2307         if (acl) {
2308                 sco = acl->link;
2309                 if (sco) {
2310                         sco->state = BT_CLOSED;
2311
2312                         hci_connect_cfm(sco, status);
2313                         hci_conn_del(sco);
2314                 }
2315         }
2316
2317         hci_dev_unlock(hdev);
2318 }
2319
2320 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2321 {
2322         struct hci_cp_auth_requested *cp;
2323         struct hci_conn *conn;
2324
2325         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2326
2327         if (!status)
2328                 return;
2329
2330         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2331         if (!cp)
2332                 return;
2333
2334         hci_dev_lock(hdev);
2335
2336         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2337         if (conn) {
2338                 if (conn->state == BT_CONFIG) {
2339                         hci_connect_cfm(conn, status);
2340                         hci_conn_drop(conn);
2341                 }
2342         }
2343
2344         hci_dev_unlock(hdev);
2345 }
2346
2347 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2348 {
2349         struct hci_cp_set_conn_encrypt *cp;
2350         struct hci_conn *conn;
2351
2352         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2353
2354         if (!status)
2355                 return;
2356
2357         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2358         if (!cp)
2359                 return;
2360
2361         hci_dev_lock(hdev);
2362
2363         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2364         if (conn) {
2365                 if (conn->state == BT_CONFIG) {
2366                         hci_connect_cfm(conn, status);
2367                         hci_conn_drop(conn);
2368                 }
2369         }
2370
2371         hci_dev_unlock(hdev);
2372 }
2373
2374 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2375                                     struct hci_conn *conn)
2376 {
2377         if (conn->state != BT_CONFIG || !conn->out)
2378                 return 0;
2379
2380         if (conn->pending_sec_level == BT_SECURITY_SDP)
2381                 return 0;
2382
2383         /* Only request authentication for SSP connections or non-SSP
2384          * devices with sec_level MEDIUM or HIGH or if MITM protection
2385          * is requested.
2386          */
2387         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2388             conn->pending_sec_level != BT_SECURITY_FIPS &&
2389             conn->pending_sec_level != BT_SECURITY_HIGH &&
2390             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2391                 return 0;
2392
2393         return 1;
2394 }
2395
2396 static int hci_resolve_name(struct hci_dev *hdev,
2397                                    struct inquiry_entry *e)
2398 {
2399         struct hci_cp_remote_name_req cp;
2400
2401         memset(&cp, 0, sizeof(cp));
2402
2403         bacpy(&cp.bdaddr, &e->data.bdaddr);
2404         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2405         cp.pscan_mode = e->data.pscan_mode;
2406         cp.clock_offset = e->data.clock_offset;
2407
2408         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2409 }
2410
2411 static bool hci_resolve_next_name(struct hci_dev *hdev)
2412 {
2413         struct discovery_state *discov = &hdev->discovery;
2414         struct inquiry_entry *e;
2415
2416         if (list_empty(&discov->resolve))
2417                 return false;
2418
2419         /* We should stop if we already spent too much time resolving names. */
2420         if (time_after(jiffies, discov->name_resolve_timeout)) {
2421                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2422                 return false;
2423         }
2424
2425         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2426         if (!e)
2427                 return false;
2428
2429         if (hci_resolve_name(hdev, e) == 0) {
2430                 e->name_state = NAME_PENDING;
2431                 return true;
2432         }
2433
2434         return false;
2435 }
2436
2437 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2438                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2439 {
2440         struct discovery_state *discov = &hdev->discovery;
2441         struct inquiry_entry *e;
2442
2443         /* Update the mgmt connected state if necessary. Be careful with
2444          * conn objects that exist but are not (yet) connected however.
2445          * Only those in BT_CONFIG or BT_CONNECTED states can be
2446          * considered connected.
2447          */
2448         if (conn &&
2449             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2450             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2451                 mgmt_device_connected(hdev, conn, name, name_len);
2452
2453         if (discov->state == DISCOVERY_STOPPED)
2454                 return;
2455
2456         if (discov->state == DISCOVERY_STOPPING)
2457                 goto discov_complete;
2458
2459         if (discov->state != DISCOVERY_RESOLVING)
2460                 return;
2461
2462         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2463         /* If the device was not found in a list of found devices names of which
2464          * are pending. there is no need to continue resolving a next name as it
2465          * will be done upon receiving another Remote Name Request Complete
2466          * Event */
2467         if (!e)
2468                 return;
2469
2470         list_del(&e->list);
2471
2472         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2473         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2474                          name, name_len);
2475
2476         if (hci_resolve_next_name(hdev))
2477                 return;
2478
2479 discov_complete:
2480         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2481 }
2482
2483 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2484 {
2485         struct hci_cp_remote_name_req *cp;
2486         struct hci_conn *conn;
2487
2488         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2489
2490         /* If successful wait for the name req complete event before
2491          * checking for the need to do authentication */
2492         if (!status)
2493                 return;
2494
2495         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2496         if (!cp)
2497                 return;
2498
2499         hci_dev_lock(hdev);
2500
2501         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2502
2503         if (hci_dev_test_flag(hdev, HCI_MGMT))
2504                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2505
2506         if (!conn)
2507                 goto unlock;
2508
2509         if (!hci_outgoing_auth_needed(hdev, conn))
2510                 goto unlock;
2511
2512         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2513                 struct hci_cp_auth_requested auth_cp;
2514
2515                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2516
2517                 auth_cp.handle = __cpu_to_le16(conn->handle);
2518                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2519                              sizeof(auth_cp), &auth_cp);
2520         }
2521
2522 unlock:
2523         hci_dev_unlock(hdev);
2524 }
2525
2526 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2527 {
2528         struct hci_cp_read_remote_features *cp;
2529         struct hci_conn *conn;
2530
2531         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2532
2533         if (!status)
2534                 return;
2535
2536         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2537         if (!cp)
2538                 return;
2539
2540         hci_dev_lock(hdev);
2541
2542         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2543         if (conn) {
2544                 if (conn->state == BT_CONFIG) {
2545                         hci_connect_cfm(conn, status);
2546                         hci_conn_drop(conn);
2547                 }
2548         }
2549
2550         hci_dev_unlock(hdev);
2551 }
2552
2553 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2554 {
2555         struct hci_cp_read_remote_ext_features *cp;
2556         struct hci_conn *conn;
2557
2558         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2559
2560         if (!status)
2561                 return;
2562
2563         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2564         if (!cp)
2565                 return;
2566
2567         hci_dev_lock(hdev);
2568
2569         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2570         if (conn) {
2571                 if (conn->state == BT_CONFIG) {
2572                         hci_connect_cfm(conn, status);
2573                         hci_conn_drop(conn);
2574                 }
2575         }
2576
2577         hci_dev_unlock(hdev);
2578 }
2579
2580 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2581 {
2582         struct hci_cp_setup_sync_conn *cp;
2583         struct hci_conn *acl, *sco;
2584         __u16 handle;
2585
2586         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2587
2588         if (!status)
2589                 return;
2590
2591         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2592         if (!cp)
2593                 return;
2594
2595         handle = __le16_to_cpu(cp->handle);
2596
2597         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2598
2599         hci_dev_lock(hdev);
2600
2601         acl = hci_conn_hash_lookup_handle(hdev, handle);
2602         if (acl) {
2603                 sco = acl->link;
2604                 if (sco) {
2605                         sco->state = BT_CLOSED;
2606
2607                         hci_connect_cfm(sco, status);
2608                         hci_conn_del(sco);
2609                 }
2610         }
2611
2612         hci_dev_unlock(hdev);
2613 }
2614
2615 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2616 {
2617         struct hci_cp_enhanced_setup_sync_conn *cp;
2618         struct hci_conn *acl, *sco;
2619         __u16 handle;
2620
2621         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2622
2623         if (!status)
2624                 return;
2625
2626         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2627         if (!cp)
2628                 return;
2629
2630         handle = __le16_to_cpu(cp->handle);
2631
2632         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2633
2634         hci_dev_lock(hdev);
2635
2636         acl = hci_conn_hash_lookup_handle(hdev, handle);
2637         if (acl) {
2638                 sco = acl->link;
2639                 if (sco) {
2640                         sco->state = BT_CLOSED;
2641
2642                         hci_connect_cfm(sco, status);
2643                         hci_conn_del(sco);
2644                 }
2645         }
2646
2647         hci_dev_unlock(hdev);
2648 }
2649
2650 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2651 {
2652         struct hci_cp_sniff_mode *cp;
2653         struct hci_conn *conn;
2654
2655         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2656
2657         if (!status)
2658                 return;
2659
2660         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2661         if (!cp)
2662                 return;
2663
2664         hci_dev_lock(hdev);
2665
2666         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2667         if (conn) {
2668                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2669
2670                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2671                         hci_sco_setup(conn, status);
2672         }
2673
2674         hci_dev_unlock(hdev);
2675 }
2676
2677 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2678 {
2679         struct hci_cp_exit_sniff_mode *cp;
2680         struct hci_conn *conn;
2681
2682         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2683
2684         if (!status)
2685                 return;
2686
2687         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2688         if (!cp)
2689                 return;
2690
2691         hci_dev_lock(hdev);
2692
2693         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2694         if (conn) {
2695                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2696
2697                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2698                         hci_sco_setup(conn, status);
2699         }
2700
2701         hci_dev_unlock(hdev);
2702 }
2703
2704 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2705 {
2706         struct hci_cp_disconnect *cp;
2707         struct hci_conn_params *params;
2708         struct hci_conn *conn;
2709         bool mgmt_conn;
2710
2711         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2712
2713         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2714          * otherwise cleanup the connection immediately.
2715          */
2716         if (!status && !hdev->suspended)
2717                 return;
2718
2719         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2720         if (!cp)
2721                 return;
2722
2723         hci_dev_lock(hdev);
2724
2725         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2726         if (!conn)
2727                 goto unlock;
2728
2729         if (status) {
2730                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2731                                        conn->dst_type, status);
2732
2733                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2734                         hdev->cur_adv_instance = conn->adv_instance;
2735                         hci_enable_advertising(hdev);
2736                 }
2737
2738                 goto done;
2739         }
2740
2741         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2742
2743         if (conn->type == ACL_LINK) {
2744                 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2745                         hci_remove_link_key(hdev, &conn->dst);
2746         }
2747
2748         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2749         if (params) {
2750                 switch (params->auto_connect) {
2751                 case HCI_AUTO_CONN_LINK_LOSS:
2752                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2753                                 break;
2754                         fallthrough;
2755
2756                 case HCI_AUTO_CONN_DIRECT:
2757                 case HCI_AUTO_CONN_ALWAYS:
2758                         list_del_init(&params->action);
2759                         list_add(&params->action, &hdev->pend_le_conns);
2760                         break;
2761
2762                 default:
2763                         break;
2764                 }
2765         }
2766
2767         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2768                                  cp->reason, mgmt_conn);
2769
2770         hci_disconn_cfm(conn, cp->reason);
2771
2772 done:
2773         /* If the disconnection failed for any reason, the upper layer
2774          * does not retry to disconnect in current implementation.
2775          * Hence, we need to do some basic cleanup here and re-enable
2776          * advertising if necessary.
2777          */
2778         hci_conn_del(conn);
2779 unlock:
2780         hci_dev_unlock(hdev);
2781 }
2782
2783 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2784 {
2785         /* When using controller based address resolution, then the new
2786          * address types 0x02 and 0x03 are used. These types need to be
2787          * converted back into either public address or random address type
2788          */
2789         switch (type) {
2790         case ADDR_LE_DEV_PUBLIC_RESOLVED:
2791                 if (resolved)
2792                         *resolved = true;
2793                 return ADDR_LE_DEV_PUBLIC;
2794         case ADDR_LE_DEV_RANDOM_RESOLVED:
2795                 if (resolved)
2796                         *resolved = true;
2797                 return ADDR_LE_DEV_RANDOM;
2798         }
2799
2800         if (resolved)
2801                 *resolved = false;
2802         return type;
2803 }
2804
2805 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2806                               u8 peer_addr_type, u8 own_address_type,
2807                               u8 filter_policy)
2808 {
2809         struct hci_conn *conn;
2810
2811         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2812                                        peer_addr_type);
2813         if (!conn)
2814                 return;
2815
2816         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2817
2818         /* Store the initiator and responder address information which
2819          * is needed for SMP. These values will not change during the
2820          * lifetime of the connection.
2821          */
2822         conn->init_addr_type = own_address_type;
2823         if (own_address_type == ADDR_LE_DEV_RANDOM)
2824                 bacpy(&conn->init_addr, &hdev->random_addr);
2825         else
2826                 bacpy(&conn->init_addr, &hdev->bdaddr);
2827
2828         conn->resp_addr_type = peer_addr_type;
2829         bacpy(&conn->resp_addr, peer_addr);
2830
2831         /* We don't want the connection attempt to stick around
2832          * indefinitely since LE doesn't have a page timeout concept
2833          * like BR/EDR. Set a timer for any connection that doesn't use
2834          * the accept list for connecting.
2835          */
2836         if (filter_policy == HCI_LE_USE_PEER_ADDR)
2837                 queue_delayed_work(conn->hdev->workqueue,
2838                                    &conn->le_conn_timeout,
2839                                    conn->conn_timeout);
2840 }
2841
2842 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2843 {
2844         struct hci_cp_le_create_conn *cp;
2845
2846         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2847
2848         /* All connection failure handling is taken care of by the
2849          * hci_conn_failed function which is triggered by the HCI
2850          * request completion callbacks used for connecting.
2851          */
2852         if (status)
2853                 return;
2854
2855         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2856         if (!cp)
2857                 return;
2858
2859         hci_dev_lock(hdev);
2860
2861         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2862                           cp->own_address_type, cp->filter_policy);
2863
2864         hci_dev_unlock(hdev);
2865 }
2866
2867 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2868 {
2869         struct hci_cp_le_ext_create_conn *cp;
2870
2871         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2872
2873         /* All connection failure handling is taken care of by the
2874          * hci_conn_failed function which is triggered by the HCI
2875          * request completion callbacks used for connecting.
2876          */
2877         if (status)
2878                 return;
2879
2880         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2881         if (!cp)
2882                 return;
2883
2884         hci_dev_lock(hdev);
2885
2886         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2887                           cp->own_addr_type, cp->filter_policy);
2888
2889         hci_dev_unlock(hdev);
2890 }
2891
2892 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2893 {
2894         struct hci_cp_le_read_remote_features *cp;
2895         struct hci_conn *conn;
2896
2897         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2898
2899         if (!status)
2900                 return;
2901
2902         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2903         if (!cp)
2904                 return;
2905
2906         hci_dev_lock(hdev);
2907
2908         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2909         if (conn) {
2910                 if (conn->state == BT_CONFIG) {
2911                         hci_connect_cfm(conn, status);
2912                         hci_conn_drop(conn);
2913                 }
2914         }
2915
2916         hci_dev_unlock(hdev);
2917 }
2918
2919 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2920 {
2921         struct hci_cp_le_start_enc *cp;
2922         struct hci_conn *conn;
2923
2924         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2925
2926         if (!status)
2927                 return;
2928
2929         hci_dev_lock(hdev);
2930
2931         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2932         if (!cp)
2933                 goto unlock;
2934
2935         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2936         if (!conn)
2937                 goto unlock;
2938
2939         if (conn->state != BT_CONNECTED)
2940                 goto unlock;
2941
2942         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2943         hci_conn_drop(conn);
2944
2945 unlock:
2946         hci_dev_unlock(hdev);
2947 }
2948
2949 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2950 {
2951         struct hci_cp_switch_role *cp;
2952         struct hci_conn *conn;
2953
2954         BT_DBG("%s status 0x%2.2x", hdev->name, status);
2955
2956         if (!status)
2957                 return;
2958
2959         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2960         if (!cp)
2961                 return;
2962
2963         hci_dev_lock(hdev);
2964
2965         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2966         if (conn)
2967                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2968
2969         hci_dev_unlock(hdev);
2970 }
2971
2972 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2973                                      struct sk_buff *skb)
2974 {
2975         struct hci_ev_status *ev = data;
2976         struct discovery_state *discov = &hdev->discovery;
2977         struct inquiry_entry *e;
2978
2979         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2980
2981         hci_conn_check_pending(hdev);
2982
2983         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2984                 return;
2985
2986         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2987         wake_up_bit(&hdev->flags, HCI_INQUIRY);
2988
2989         if (!hci_dev_test_flag(hdev, HCI_MGMT))
2990                 return;
2991
2992         hci_dev_lock(hdev);
2993
2994         if (discov->state != DISCOVERY_FINDING)
2995                 goto unlock;
2996
2997         if (list_empty(&discov->resolve)) {
2998                 /* When BR/EDR inquiry is active and no LE scanning is in
2999                  * progress, then change discovery state to indicate completion.
3000                  *
3001                  * When running LE scanning and BR/EDR inquiry simultaneously
3002                  * and the LE scan already finished, then change the discovery
3003                  * state to indicate completion.
3004                  */
3005                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3006                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3007                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3008                 goto unlock;
3009         }
3010
3011         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3012         if (e && hci_resolve_name(hdev, e) == 0) {
3013                 e->name_state = NAME_PENDING;
3014                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3015                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3016         } else {
3017                 /* When BR/EDR inquiry is active and no LE scanning is in
3018                  * progress, then change discovery state to indicate completion.
3019                  *
3020                  * When running LE scanning and BR/EDR inquiry simultaneously
3021                  * and the LE scan already finished, then change the discovery
3022                  * state to indicate completion.
3023                  */
3024                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3025                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3026                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3027         }
3028
3029 unlock:
3030         hci_dev_unlock(hdev);
3031 }
3032
3033 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3034                                    struct sk_buff *skb)
3035 {
3036         struct hci_ev_inquiry_result *ev = edata;
3037         struct inquiry_data data;
3038         int i;
3039
3040         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3041                              flex_array_size(ev, info, ev->num)))
3042                 return;
3043
3044         bt_dev_dbg(hdev, "num %d", ev->num);
3045
3046         if (!ev->num)
3047                 return;
3048
3049         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3050                 return;
3051
3052         hci_dev_lock(hdev);
3053
3054         for (i = 0; i < ev->num; i++) {
3055                 struct inquiry_info *info = &ev->info[i];
3056                 u32 flags;
3057
3058                 bacpy(&data.bdaddr, &info->bdaddr);
3059                 data.pscan_rep_mode     = info->pscan_rep_mode;
3060                 data.pscan_period_mode  = info->pscan_period_mode;
3061                 data.pscan_mode         = info->pscan_mode;
3062                 memcpy(data.dev_class, info->dev_class, 3);
3063                 data.clock_offset       = info->clock_offset;
3064                 data.rssi               = HCI_RSSI_INVALID;
3065                 data.ssp_mode           = 0x00;
3066
3067                 flags = hci_inquiry_cache_update(hdev, &data, false);
3068
3069                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3070                                   info->dev_class, HCI_RSSI_INVALID,
3071                                   flags, NULL, 0, NULL, 0);
3072         }
3073
3074         hci_dev_unlock(hdev);
3075 }
3076
3077 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3078                                   struct sk_buff *skb)
3079 {
3080         struct hci_ev_conn_complete *ev = data;
3081         struct hci_conn *conn;
3082         u8 status = ev->status;
3083
3084         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3085
3086         hci_dev_lock(hdev);
3087
3088         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3089         if (!conn) {
3090                 /* In case of error status and there is no connection pending
3091                  * just unlock as there is nothing to cleanup.
3092                  */
3093                 if (ev->status)
3094                         goto unlock;
3095
3096                 /* Connection may not exist if auto-connected. Check the bredr
3097                  * allowlist to see if this device is allowed to auto connect.
3098                  * If link is an ACL type, create a connection class
3099                  * automatically.
3100                  *
3101                  * Auto-connect will only occur if the event filter is
3102                  * programmed with a given address. Right now, event filter is
3103                  * only used during suspend.
3104                  */
3105                 if (ev->link_type == ACL_LINK &&
3106                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3107                                                       &ev->bdaddr,
3108                                                       BDADDR_BREDR)) {
3109                         conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3110                                             HCI_ROLE_SLAVE);
3111                         if (!conn) {
3112                                 bt_dev_err(hdev, "no memory for new conn");
3113                                 goto unlock;
3114                         }
3115                 } else {
3116                         if (ev->link_type != SCO_LINK)
3117                                 goto unlock;
3118
3119                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3120                                                        &ev->bdaddr);
3121                         if (!conn)
3122                                 goto unlock;
3123
3124                         conn->type = SCO_LINK;
3125                 }
3126         }
3127
3128         /* The HCI_Connection_Complete event is only sent once per connection.
3129          * Processing it more than once per connection can corrupt kernel memory.
3130          *
3131          * As the connection handle is set here for the first time, it indicates
3132          * whether the connection is already set up.
3133          */
3134         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3135                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3136                 goto unlock;
3137         }
3138
3139         if (!status) {
3140                 conn->handle = __le16_to_cpu(ev->handle);
3141                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3142                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3143                                    conn->handle, HCI_CONN_HANDLE_MAX);
3144                         status = HCI_ERROR_INVALID_PARAMETERS;
3145                         goto done;
3146                 }
3147
3148                 if (conn->type == ACL_LINK) {
3149                         conn->state = BT_CONFIG;
3150                         hci_conn_hold(conn);
3151
3152                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3153                             !hci_find_link_key(hdev, &ev->bdaddr))
3154                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3155                         else
3156                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3157                 } else
3158                         conn->state = BT_CONNECTED;
3159
3160                 hci_debugfs_create_conn(conn);
3161                 hci_conn_add_sysfs(conn);
3162
3163                 if (test_bit(HCI_AUTH, &hdev->flags))
3164                         set_bit(HCI_CONN_AUTH, &conn->flags);
3165
3166                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3167                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3168
3169                 /* Get remote features */
3170                 if (conn->type == ACL_LINK) {
3171                         struct hci_cp_read_remote_features cp;
3172                         cp.handle = ev->handle;
3173                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3174                                      sizeof(cp), &cp);
3175
3176                         hci_req_update_scan(hdev);
3177                 }
3178
3179                 /* Set packet type for incoming connection */
3180                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3181                         struct hci_cp_change_conn_ptype cp;
3182                         cp.handle = ev->handle;
3183                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3184                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3185                                      &cp);
3186                 }
3187         }
3188
3189         if (conn->type == ACL_LINK)
3190                 hci_sco_setup(conn, ev->status);
3191
3192 done:
3193         if (status) {
3194                 hci_conn_failed(conn, status);
3195         } else if (ev->link_type == SCO_LINK) {
3196                 switch (conn->setting & SCO_AIRMODE_MASK) {
3197                 case SCO_AIRMODE_CVSD:
3198                         if (hdev->notify)
3199                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3200                         break;
3201                 }
3202
3203                 hci_connect_cfm(conn, status);
3204         }
3205
3206 unlock:
3207         hci_dev_unlock(hdev);
3208
3209         hci_conn_check_pending(hdev);
3210 }
3211
3212 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3213 {
3214         struct hci_cp_reject_conn_req cp;
3215
3216         bacpy(&cp.bdaddr, bdaddr);
3217         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3218         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3219 }
3220
3221 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3222                                  struct sk_buff *skb)
3223 {
3224         struct hci_ev_conn_request *ev = data;
3225         int mask = hdev->link_mode;
3226         struct inquiry_entry *ie;
3227         struct hci_conn *conn;
3228         __u8 flags = 0;
3229
3230         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3231
3232         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3233                                       &flags);
3234
3235         if (!(mask & HCI_LM_ACCEPT)) {
3236                 hci_reject_conn(hdev, &ev->bdaddr);
3237                 return;
3238         }
3239
3240         hci_dev_lock(hdev);
3241
3242         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3243                                    BDADDR_BREDR)) {
3244                 hci_reject_conn(hdev, &ev->bdaddr);
3245                 goto unlock;
3246         }
3247
3248         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3249          * connection. These features are only touched through mgmt so
3250          * only do the checks if HCI_MGMT is set.
3251          */
3252         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3253             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3254             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3255                                                BDADDR_BREDR)) {
3256                 hci_reject_conn(hdev, &ev->bdaddr);
3257                 goto unlock;
3258         }
3259
3260         /* Connection accepted */
3261
3262         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3263         if (ie)
3264                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3265
3266         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3267                         &ev->bdaddr);
3268         if (!conn) {
3269                 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3270                                     HCI_ROLE_SLAVE);
3271                 if (!conn) {
3272                         bt_dev_err(hdev, "no memory for new connection");
3273                         goto unlock;
3274                 }
3275         }
3276
3277         memcpy(conn->dev_class, ev->dev_class, 3);
3278
3279         hci_dev_unlock(hdev);
3280
3281         if (ev->link_type == ACL_LINK ||
3282             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3283                 struct hci_cp_accept_conn_req cp;
3284                 conn->state = BT_CONNECT;
3285
3286                 bacpy(&cp.bdaddr, &ev->bdaddr);
3287
3288                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3289                         cp.role = 0x00; /* Become central */
3290                 else
3291                         cp.role = 0x01; /* Remain peripheral */
3292
3293                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3294         } else if (!(flags & HCI_PROTO_DEFER)) {
3295                 struct hci_cp_accept_sync_conn_req cp;
3296                 conn->state = BT_CONNECT;
3297
3298                 bacpy(&cp.bdaddr, &ev->bdaddr);
3299                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3300
3301                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3302                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3303                 cp.max_latency    = cpu_to_le16(0xffff);
3304                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3305                 cp.retrans_effort = 0xff;
3306
3307                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3308                              &cp);
3309         } else {
3310                 conn->state = BT_CONNECT2;
3311                 hci_connect_cfm(conn, 0);
3312         }
3313
3314         return;
3315 unlock:
3316         hci_dev_unlock(hdev);
3317 }
3318
3319 static u8 hci_to_mgmt_reason(u8 err)
3320 {
3321         switch (err) {
3322         case HCI_ERROR_CONNECTION_TIMEOUT:
3323                 return MGMT_DEV_DISCONN_TIMEOUT;
3324         case HCI_ERROR_REMOTE_USER_TERM:
3325         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3326         case HCI_ERROR_REMOTE_POWER_OFF:
3327                 return MGMT_DEV_DISCONN_REMOTE;
3328         case HCI_ERROR_LOCAL_HOST_TERM:
3329                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3330         default:
3331                 return MGMT_DEV_DISCONN_UNKNOWN;
3332         }
3333 }
3334
3335 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3336                                      struct sk_buff *skb)
3337 {
3338         struct hci_ev_disconn_complete *ev = data;
3339         u8 reason;
3340         struct hci_conn_params *params;
3341         struct hci_conn *conn;
3342         bool mgmt_connected;
3343
3344         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3345
3346         hci_dev_lock(hdev);
3347
3348         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3349         if (!conn)
3350                 goto unlock;
3351
3352         if (ev->status) {
3353                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3354                                        conn->dst_type, ev->status);
3355                 goto unlock;
3356         }
3357
3358         conn->state = BT_CLOSED;
3359
3360         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3361
3362         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3363                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3364         else
3365                 reason = hci_to_mgmt_reason(ev->reason);
3366
3367         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3368                                 reason, mgmt_connected);
3369
3370         if (conn->type == ACL_LINK) {
3371                 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3372                         hci_remove_link_key(hdev, &conn->dst);
3373
3374                 hci_req_update_scan(hdev);
3375         }
3376
3377         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3378         if (params) {
3379                 switch (params->auto_connect) {
3380                 case HCI_AUTO_CONN_LINK_LOSS:
3381                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3382                                 break;
3383                         fallthrough;
3384
3385                 case HCI_AUTO_CONN_DIRECT:
3386                 case HCI_AUTO_CONN_ALWAYS:
3387                         list_del_init(&params->action);
3388                         list_add(&params->action, &hdev->pend_le_conns);
3389                         hci_update_passive_scan(hdev);
3390                         break;
3391
3392                 default:
3393                         break;
3394                 }
3395         }
3396
3397         hci_disconn_cfm(conn, ev->reason);
3398
3399         /* Re-enable advertising if necessary, since it might
3400          * have been disabled by the connection. From the
3401          * HCI_LE_Set_Advertise_Enable command description in
3402          * the core specification (v4.0):
3403          * "The Controller shall continue advertising until the Host
3404          * issues an LE_Set_Advertise_Enable command with
3405          * Advertising_Enable set to 0x00 (Advertising is disabled)
3406          * or until a connection is created or until the Advertising
3407          * is timed out due to Directed Advertising."
3408          */
3409         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3410                 hdev->cur_adv_instance = conn->adv_instance;
3411                 hci_enable_advertising(hdev);
3412         }
3413
3414         hci_conn_del(conn);
3415
3416 unlock:
3417         hci_dev_unlock(hdev);
3418 }
3419
3420 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3421                                   struct sk_buff *skb)
3422 {
3423         struct hci_ev_auth_complete *ev = data;
3424         struct hci_conn *conn;
3425
3426         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3427
3428         hci_dev_lock(hdev);
3429
3430         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3431         if (!conn)
3432                 goto unlock;
3433
3434         if (!ev->status) {
3435                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3436
3437                 if (!hci_conn_ssp_enabled(conn) &&
3438                     test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3439                         bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3440                 } else {
3441                         set_bit(HCI_CONN_AUTH, &conn->flags);
3442                         conn->sec_level = conn->pending_sec_level;
3443                 }
3444         } else {
3445                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3446                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3447
3448                 mgmt_auth_failed(conn, ev->status);
3449         }
3450
3451         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3452         clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3453
3454         if (conn->state == BT_CONFIG) {
3455                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3456                         struct hci_cp_set_conn_encrypt cp;
3457                         cp.handle  = ev->handle;
3458                         cp.encrypt = 0x01;
3459                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3460                                      &cp);
3461                 } else {
3462                         conn->state = BT_CONNECTED;
3463                         hci_connect_cfm(conn, ev->status);
3464                         hci_conn_drop(conn);
3465                 }
3466         } else {
3467                 hci_auth_cfm(conn, ev->status);
3468
3469                 hci_conn_hold(conn);
3470                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3471                 hci_conn_drop(conn);
3472         }
3473
3474         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3475                 if (!ev->status) {
3476                         struct hci_cp_set_conn_encrypt cp;
3477                         cp.handle  = ev->handle;
3478                         cp.encrypt = 0x01;
3479                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3480                                      &cp);
3481                 } else {
3482                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3483                         hci_encrypt_cfm(conn, ev->status);
3484                 }
3485         }
3486
3487 unlock:
3488         hci_dev_unlock(hdev);
3489 }
3490
3491 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3492                                 struct sk_buff *skb)
3493 {
3494         struct hci_ev_remote_name *ev = data;
3495         struct hci_conn *conn;
3496
3497         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3498
3499         hci_conn_check_pending(hdev);
3500
3501         hci_dev_lock(hdev);
3502
3503         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3504
3505         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3506                 goto check_auth;
3507
3508         if (ev->status == 0)
3509                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3510                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3511         else
3512                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3513
3514 check_auth:
3515         if (!conn)
3516                 goto unlock;
3517
3518         if (!hci_outgoing_auth_needed(hdev, conn))
3519                 goto unlock;
3520
3521         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3522                 struct hci_cp_auth_requested cp;
3523
3524                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3525
3526                 cp.handle = __cpu_to_le16(conn->handle);
3527                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3528         }
3529
3530 unlock:
3531         hci_dev_unlock(hdev);
3532 }
3533
3534 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3535                                        u16 opcode, struct sk_buff *skb)
3536 {
3537         const struct hci_rp_read_enc_key_size *rp;
3538         struct hci_conn *conn;
3539         u16 handle;
3540
3541         BT_DBG("%s status 0x%02x", hdev->name, status);
3542
3543         if (!skb || skb->len < sizeof(*rp)) {
3544                 bt_dev_err(hdev, "invalid read key size response");
3545                 return;
3546         }
3547
3548         rp = (void *)skb->data;
3549         handle = le16_to_cpu(rp->handle);
3550
3551         hci_dev_lock(hdev);
3552
3553         conn = hci_conn_hash_lookup_handle(hdev, handle);
3554         if (!conn)
3555                 goto unlock;
3556
3557         /* While unexpected, the read_enc_key_size command may fail. The most
3558          * secure approach is to then assume the key size is 0 to force a
3559          * disconnection.
3560          */
3561         if (rp->status) {
3562                 bt_dev_err(hdev, "failed to read key size for handle %u",
3563                            handle);
3564                 conn->enc_key_size = 0;
3565         } else {
3566                 conn->enc_key_size = rp->key_size;
3567         }
3568
3569         hci_encrypt_cfm(conn, 0);
3570
3571 unlock:
3572         hci_dev_unlock(hdev);
3573 }
3574
3575 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3576                                    struct sk_buff *skb)
3577 {
3578         struct hci_ev_encrypt_change *ev = data;
3579         struct hci_conn *conn;
3580
3581         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3582
3583         hci_dev_lock(hdev);
3584
3585         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3586         if (!conn)
3587                 goto unlock;
3588
3589         if (!ev->status) {
3590                 if (ev->encrypt) {
3591                         /* Encryption implies authentication */
3592                         set_bit(HCI_CONN_AUTH, &conn->flags);
3593                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3594                         conn->sec_level = conn->pending_sec_level;
3595
3596                         /* P-256 authentication key implies FIPS */
3597                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3598                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3599
3600                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3601                             conn->type == LE_LINK)
3602                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3603                 } else {
3604                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3605                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3606                 }
3607         }
3608
3609         /* We should disregard the current RPA and generate a new one
3610          * whenever the encryption procedure fails.
3611          */
3612         if (ev->status && conn->type == LE_LINK) {
3613                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3614                 hci_adv_instances_set_rpa_expired(hdev, true);
3615         }
3616
3617         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3618
3619         /* Check link security requirements are met */
3620         if (!hci_conn_check_link_mode(conn))
3621                 ev->status = HCI_ERROR_AUTH_FAILURE;
3622
3623         if (ev->status && conn->state == BT_CONNECTED) {
3624                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3625                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3626
3627                 /* Notify upper layers so they can cleanup before
3628                  * disconnecting.
3629                  */
3630                 hci_encrypt_cfm(conn, ev->status);
3631                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3632                 hci_conn_drop(conn);
3633                 goto unlock;
3634         }
3635
3636         /* Try reading the encryption key size for encrypted ACL links */
3637         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3638                 struct hci_cp_read_enc_key_size cp;
3639                 struct hci_request req;
3640
3641                 /* Only send HCI_Read_Encryption_Key_Size if the
3642                  * controller really supports it. If it doesn't, assume
3643                  * the default size (16).
3644                  */
3645                 if (!(hdev->commands[20] & 0x10)) {
3646                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3647                         goto notify;
3648                 }
3649
3650                 hci_req_init(&req, hdev);
3651
3652                 cp.handle = cpu_to_le16(conn->handle);
3653                 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3654
3655                 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3656                         bt_dev_err(hdev, "sending read key size failed");
3657                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3658                         goto notify;
3659                 }
3660
3661                 goto unlock;
3662         }
3663
3664         /* Set the default Authenticated Payload Timeout after
3665          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3666          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3667          * sent when the link is active and Encryption is enabled, the conn
3668          * type can be either LE or ACL and controller must support LMP Ping.
3669          * Ensure for AES-CCM encryption as well.
3670          */
3671         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3672             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3673             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3674              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3675                 struct hci_cp_write_auth_payload_to cp;
3676
3677                 cp.handle = cpu_to_le16(conn->handle);
3678                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3679                 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3680                              sizeof(cp), &cp);
3681         }
3682
3683 notify:
3684         hci_encrypt_cfm(conn, ev->status);
3685
3686 unlock:
3687         hci_dev_unlock(hdev);
3688 }
3689
3690 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3691                                              struct sk_buff *skb)
3692 {
3693         struct hci_ev_change_link_key_complete *ev = data;
3694         struct hci_conn *conn;
3695
3696         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3697
3698         hci_dev_lock(hdev);
3699
3700         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3701         if (conn) {
3702                 if (!ev->status)
3703                         set_bit(HCI_CONN_SECURE, &conn->flags);
3704
3705                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3706
3707                 hci_key_change_cfm(conn, ev->status);
3708         }
3709
3710         hci_dev_unlock(hdev);
3711 }
3712
3713 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3714                                     struct sk_buff *skb)
3715 {
3716         struct hci_ev_remote_features *ev = data;
3717         struct hci_conn *conn;
3718
3719         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3720
3721         hci_dev_lock(hdev);
3722
3723         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3724         if (!conn)
3725                 goto unlock;
3726
3727         if (!ev->status)
3728                 memcpy(conn->features[0], ev->features, 8);
3729
3730         if (conn->state != BT_CONFIG)
3731                 goto unlock;
3732
3733         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3734             lmp_ext_feat_capable(conn)) {
3735                 struct hci_cp_read_remote_ext_features cp;
3736                 cp.handle = ev->handle;
3737                 cp.page = 0x01;
3738                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3739                              sizeof(cp), &cp);
3740                 goto unlock;
3741         }
3742
3743         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3744                 struct hci_cp_remote_name_req cp;
3745                 memset(&cp, 0, sizeof(cp));
3746                 bacpy(&cp.bdaddr, &conn->dst);
3747                 cp.pscan_rep_mode = 0x02;
3748                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3749         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3750                 mgmt_device_connected(hdev, conn, NULL, 0);
3751
3752         if (!hci_outgoing_auth_needed(hdev, conn)) {
3753                 conn->state = BT_CONNECTED;
3754                 hci_connect_cfm(conn, ev->status);
3755                 hci_conn_drop(conn);
3756         }
3757
3758 unlock:
3759         hci_dev_unlock(hdev);
3760 }
3761
3762 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3763 {
3764         cancel_delayed_work(&hdev->cmd_timer);
3765
3766         if (!test_bit(HCI_RESET, &hdev->flags)) {
3767                 if (ncmd) {
3768                         cancel_delayed_work(&hdev->ncmd_timer);
3769                         atomic_set(&hdev->cmd_cnt, 1);
3770                 } else {
3771                         schedule_delayed_work(&hdev->ncmd_timer,
3772                                               HCI_NCMD_TIMEOUT);
3773                 }
3774         }
3775 }
3776
3777 #define HCI_CC_VL(_op, _func, _min, _max) \
3778 { \
3779         .op = _op, \
3780         .func = _func, \
3781         .min_len = _min, \
3782         .max_len = _max, \
3783 }
3784
3785 #define HCI_CC(_op, _func, _len) \
3786         HCI_CC_VL(_op, _func, _len, _len)
3787
3788 #define HCI_CC_STATUS(_op, _func) \
3789         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
3790
3791 static const struct hci_cc {
3792         u16  op;
3793         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
3794         u16  min_len;
3795         u16  max_len;
3796 } hci_cc_table[] = {
3797         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
3798         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
3799         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
3800         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
3801                       hci_cc_remote_name_req_cancel),
3802         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
3803                sizeof(struct hci_rp_role_discovery)),
3804         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
3805                sizeof(struct hci_rp_read_link_policy)),
3806         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
3807                sizeof(struct hci_rp_write_link_policy)),
3808         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
3809                sizeof(struct hci_rp_read_def_link_policy)),
3810         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
3811                       hci_cc_write_def_link_policy),
3812         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
3813         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
3814                sizeof(struct hci_rp_read_stored_link_key)),
3815         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
3816                sizeof(struct hci_rp_delete_stored_link_key)),
3817         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
3818         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
3819                sizeof(struct hci_rp_read_local_name)),
3820         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
3821         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
3822         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
3823         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
3824         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
3825                sizeof(struct hci_rp_read_class_of_dev)),
3826         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
3827         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
3828                sizeof(struct hci_rp_read_voice_setting)),
3829         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
3830         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
3831                sizeof(struct hci_rp_read_num_supported_iac)),
3832         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
3833         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
3834         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
3835                sizeof(struct hci_rp_read_auth_payload_to)),
3836         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
3837                sizeof(struct hci_rp_write_auth_payload_to)),
3838         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
3839                sizeof(struct hci_rp_read_local_version)),
3840         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
3841                sizeof(struct hci_rp_read_local_commands)),
3842         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
3843                sizeof(struct hci_rp_read_local_features)),
3844         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
3845                sizeof(struct hci_rp_read_local_ext_features)),
3846         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
3847                sizeof(struct hci_rp_read_buffer_size)),
3848         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
3849                sizeof(struct hci_rp_read_bd_addr)),
3850         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
3851                sizeof(struct hci_rp_read_local_pairing_opts)),
3852         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
3853                sizeof(struct hci_rp_read_page_scan_activity)),
3854         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3855                       hci_cc_write_page_scan_activity),
3856         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
3857                sizeof(struct hci_rp_read_page_scan_type)),
3858         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
3859         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
3860                sizeof(struct hci_rp_read_data_block_size)),
3861         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
3862                sizeof(struct hci_rp_read_flow_control_mode)),
3863         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
3864                sizeof(struct hci_rp_read_local_amp_info)),
3865         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
3866                sizeof(struct hci_rp_read_clock)),
3867         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
3868                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
3869         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
3870                hci_cc_read_def_err_data_reporting,
3871                sizeof(struct hci_rp_read_def_err_data_reporting)),
3872         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
3873                       hci_cc_write_def_err_data_reporting),
3874         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
3875                sizeof(struct hci_rp_pin_code_reply)),
3876         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
3877                sizeof(struct hci_rp_pin_code_neg_reply)),
3878         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
3879                sizeof(struct hci_rp_read_local_oob_data)),
3880         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
3881                sizeof(struct hci_rp_read_local_oob_ext_data)),
3882         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
3883                sizeof(struct hci_rp_le_read_buffer_size)),
3884         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
3885                sizeof(struct hci_rp_le_read_local_features)),
3886         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
3887                sizeof(struct hci_rp_le_read_adv_tx_power)),
3888         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
3889                sizeof(struct hci_rp_user_confirm_reply)),
3890         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
3891                sizeof(struct hci_rp_user_confirm_reply)),
3892         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
3893                sizeof(struct hci_rp_user_confirm_reply)),
3894         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
3895                sizeof(struct hci_rp_user_confirm_reply)),
3896         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
3897         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
3898         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
3899         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
3900         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
3901                hci_cc_le_read_accept_list_size,
3902                sizeof(struct hci_rp_le_read_accept_list_size)),
3903         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
3904         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
3905                       hci_cc_le_add_to_accept_list),
3906         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
3907                       hci_cc_le_del_from_accept_list),
3908         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
3909                sizeof(struct hci_rp_le_read_supported_states)),
3910         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
3911                sizeof(struct hci_rp_le_read_def_data_len)),
3912         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
3913                       hci_cc_le_write_def_data_len),
3914         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
3915                       hci_cc_le_add_to_resolv_list),
3916         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
3917                       hci_cc_le_del_from_resolv_list),
3918         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
3919                       hci_cc_le_clear_resolv_list),
3920         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
3921                sizeof(struct hci_rp_le_read_resolv_list_size)),
3922         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
3923                       hci_cc_le_set_addr_resolution_enable),
3924         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
3925                sizeof(struct hci_rp_le_read_max_data_len)),
3926         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
3927                       hci_cc_write_le_host_supported),
3928         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
3929         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
3930                sizeof(struct hci_rp_read_rssi)),
3931         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
3932                sizeof(struct hci_rp_read_tx_power)),
3933         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
3934         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
3935                       hci_cc_le_set_ext_scan_param),
3936         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3937                       hci_cc_le_set_ext_scan_enable),
3938         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
3939         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
3940                hci_cc_le_read_num_adv_sets,
3941                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
3942         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
3943                sizeof(struct hci_rp_le_set_ext_adv_params)),
3944         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
3945                       hci_cc_le_set_ext_adv_enable),
3946         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
3947                       hci_cc_le_set_adv_set_random_addr),
3948         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
3949         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
3950         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
3951                sizeof(struct hci_rp_le_read_transmit_power)),
3952         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode)
3953 };
3954
3955 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
3956                       struct sk_buff *skb)
3957 {
3958         void *data;
3959
3960         if (skb->len < cc->min_len) {
3961                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
3962                            cc->op, skb->len, cc->min_len);
3963                 return HCI_ERROR_UNSPECIFIED;
3964         }
3965
3966         /* Just warn if the length is over max_len size it still be possible to
3967          * partially parse the cc so leave to callback to decide if that is
3968          * acceptable.
3969          */
3970         if (skb->len > cc->max_len)
3971                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
3972                             cc->op, skb->len, cc->max_len);
3973
3974         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
3975         if (!data)
3976                 return HCI_ERROR_UNSPECIFIED;
3977
3978         return cc->func(hdev, data, skb);
3979 }
3980
3981 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
3982                                  struct sk_buff *skb, u16 *opcode, u8 *status,
3983                                  hci_req_complete_t *req_complete,
3984                                  hci_req_complete_skb_t *req_complete_skb)
3985 {
3986         struct hci_ev_cmd_complete *ev = data;
3987         int i;
3988
3989         *opcode = __le16_to_cpu(ev->opcode);
3990
3991         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
3992
3993         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
3994                 if (hci_cc_table[i].op == *opcode) {
3995                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
3996                         break;
3997                 }
3998         }
3999
4000         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4001
4002         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4003                              req_complete_skb);
4004
4005         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4006                 bt_dev_err(hdev,
4007                            "unexpected event for opcode 0x%4.4x", *opcode);
4008                 return;
4009         }
4010
4011         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4012                 queue_work(hdev->workqueue, &hdev->cmd_work);
4013 }
4014
4015 #define HCI_CS(_op, _func) \
4016 { \
4017         .op = _op, \
4018         .func = _func, \
4019 }
4020
4021 static const struct hci_cs {
4022         u16  op;
4023         void (*func)(struct hci_dev *hdev, __u8 status);
4024 } hci_cs_table[] = {
4025         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4026         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4027         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4028         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4029         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4030         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4031         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4032         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4033         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4034                hci_cs_read_remote_ext_features),
4035         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4036         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4037                hci_cs_enhanced_setup_sync_conn),
4038         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4039         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4040         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4041         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4042         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4043         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4044         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn)
4045 };
4046
4047 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4048                                struct sk_buff *skb, u16 *opcode, u8 *status,
4049                                hci_req_complete_t *req_complete,
4050                                hci_req_complete_skb_t *req_complete_skb)
4051 {
4052         struct hci_ev_cmd_status *ev = data;
4053         int i;
4054
4055         *opcode = __le16_to_cpu(ev->opcode);
4056         *status = ev->status;
4057
4058         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4059
4060         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4061                 if (hci_cs_table[i].op == *opcode) {
4062                         hci_cs_table[i].func(hdev, ev->status);
4063                         break;
4064                 }
4065         }
4066
4067         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4068
4069         /* Indicate request completion if the command failed. Also, if
4070          * we're not waiting for a special event and we get a success
4071          * command status we should try to flag the request as completed
4072          * (since for this kind of commands there will not be a command
4073          * complete event).
4074          */
4075         if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4076                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4077                                      req_complete_skb);
4078                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4079                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4080                                    *opcode);
4081                         return;
4082                 }
4083         }
4084
4085         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4086                 queue_work(hdev->workqueue, &hdev->cmd_work);
4087 }
4088
4089 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4090                                    struct sk_buff *skb)
4091 {
4092         struct hci_ev_hardware_error *ev = data;
4093
4094         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4095
4096         hdev->hw_error_code = ev->code;
4097
4098         queue_work(hdev->req_workqueue, &hdev->error_reset);
4099 }
4100
4101 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4102                                 struct sk_buff *skb)
4103 {
4104         struct hci_ev_role_change *ev = data;
4105         struct hci_conn *conn;
4106
4107         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4108
4109         hci_dev_lock(hdev);
4110
4111         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4112         if (conn) {
4113                 if (!ev->status)
4114                         conn->role = ev->role;
4115
4116                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4117
4118                 hci_role_switch_cfm(conn, ev->status, ev->role);
4119         }
4120
4121         hci_dev_unlock(hdev);
4122 }
4123
4124 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4125                                   struct sk_buff *skb)
4126 {
4127         struct hci_ev_num_comp_pkts *ev = data;
4128         int i;
4129
4130         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4131                              flex_array_size(ev, handles, ev->num)))
4132                 return;
4133
4134         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4135                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4136                 return;
4137         }
4138
4139         bt_dev_dbg(hdev, "num %d", ev->num);
4140
4141         for (i = 0; i < ev->num; i++) {
4142                 struct hci_comp_pkts_info *info = &ev->handles[i];
4143                 struct hci_conn *conn;
4144                 __u16  handle, count;
4145
4146                 handle = __le16_to_cpu(info->handle);
4147                 count  = __le16_to_cpu(info->count);
4148
4149                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4150                 if (!conn)
4151                         continue;
4152
4153                 conn->sent -= count;
4154
4155                 switch (conn->type) {
4156                 case ACL_LINK:
4157                         hdev->acl_cnt += count;
4158                         if (hdev->acl_cnt > hdev->acl_pkts)
4159                                 hdev->acl_cnt = hdev->acl_pkts;
4160                         break;
4161
4162                 case LE_LINK:
4163                         if (hdev->le_pkts) {
4164                                 hdev->le_cnt += count;
4165                                 if (hdev->le_cnt > hdev->le_pkts)
4166                                         hdev->le_cnt = hdev->le_pkts;
4167                         } else {
4168                                 hdev->acl_cnt += count;
4169                                 if (hdev->acl_cnt > hdev->acl_pkts)
4170                                         hdev->acl_cnt = hdev->acl_pkts;
4171                         }
4172                         break;
4173
4174                 case SCO_LINK:
4175                         hdev->sco_cnt += count;
4176                         if (hdev->sco_cnt > hdev->sco_pkts)
4177                                 hdev->sco_cnt = hdev->sco_pkts;
4178                         break;
4179
4180                 default:
4181                         bt_dev_err(hdev, "unknown type %d conn %p",
4182                                    conn->type, conn);
4183                         break;
4184                 }
4185         }
4186
4187         queue_work(hdev->workqueue, &hdev->tx_work);
4188 }
4189
4190 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4191                                                  __u16 handle)
4192 {
4193         struct hci_chan *chan;
4194
4195         switch (hdev->dev_type) {
4196         case HCI_PRIMARY:
4197                 return hci_conn_hash_lookup_handle(hdev, handle);
4198         case HCI_AMP:
4199                 chan = hci_chan_lookup_handle(hdev, handle);
4200                 if (chan)
4201                         return chan->conn;
4202                 break;
4203         default:
4204                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4205                 break;
4206         }
4207
4208         return NULL;
4209 }
4210
4211 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4212                                     struct sk_buff *skb)
4213 {
4214         struct hci_ev_num_comp_blocks *ev = data;
4215         int i;
4216
4217         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4218                              flex_array_size(ev, handles, ev->num_hndl)))
4219                 return;
4220
4221         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4222                 bt_dev_err(hdev, "wrong event for mode %d",
4223                            hdev->flow_ctl_mode);
4224                 return;
4225         }
4226
4227         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4228                    ev->num_hndl);
4229
4230         for (i = 0; i < ev->num_hndl; i++) {
4231                 struct hci_comp_blocks_info *info = &ev->handles[i];
4232                 struct hci_conn *conn = NULL;
4233                 __u16  handle, block_count;
4234
4235                 handle = __le16_to_cpu(info->handle);
4236                 block_count = __le16_to_cpu(info->blocks);
4237
4238                 conn = __hci_conn_lookup_handle(hdev, handle);
4239                 if (!conn)
4240                         continue;
4241
4242                 conn->sent -= block_count;
4243
4244                 switch (conn->type) {
4245                 case ACL_LINK:
4246                 case AMP_LINK:
4247                         hdev->block_cnt += block_count;
4248                         if (hdev->block_cnt > hdev->num_blocks)
4249                                 hdev->block_cnt = hdev->num_blocks;
4250                         break;
4251
4252                 default:
4253                         bt_dev_err(hdev, "unknown type %d conn %p",
4254                                    conn->type, conn);
4255                         break;
4256                 }
4257         }
4258
4259         queue_work(hdev->workqueue, &hdev->tx_work);
4260 }
4261
4262 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4263                                 struct sk_buff *skb)
4264 {
4265         struct hci_ev_mode_change *ev = data;
4266         struct hci_conn *conn;
4267
4268         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4269
4270         hci_dev_lock(hdev);
4271
4272         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4273         if (conn) {
4274                 conn->mode = ev->mode;
4275
4276                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4277                                         &conn->flags)) {
4278                         if (conn->mode == HCI_CM_ACTIVE)
4279                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4280                         else
4281                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4282                 }
4283
4284                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4285                         hci_sco_setup(conn, ev->status);
4286         }
4287
4288         hci_dev_unlock(hdev);
4289 }
4290
4291 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4292                                      struct sk_buff *skb)
4293 {
4294         struct hci_ev_pin_code_req *ev = data;
4295         struct hci_conn *conn;
4296
4297         bt_dev_dbg(hdev, "");
4298
4299         hci_dev_lock(hdev);
4300
4301         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4302         if (!conn)
4303                 goto unlock;
4304
4305         if (conn->state == BT_CONNECTED) {
4306                 hci_conn_hold(conn);
4307                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4308                 hci_conn_drop(conn);
4309         }
4310
4311         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4312             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4313                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4314                              sizeof(ev->bdaddr), &ev->bdaddr);
4315         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4316                 u8 secure;
4317
4318                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4319                         secure = 1;
4320                 else
4321                         secure = 0;
4322
4323                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4324         }
4325
4326 unlock:
4327         hci_dev_unlock(hdev);
4328 }
4329
4330 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4331 {
4332         if (key_type == HCI_LK_CHANGED_COMBINATION)
4333                 return;
4334
4335         conn->pin_length = pin_len;
4336         conn->key_type = key_type;
4337
4338         switch (key_type) {
4339         case HCI_LK_LOCAL_UNIT:
4340         case HCI_LK_REMOTE_UNIT:
4341         case HCI_LK_DEBUG_COMBINATION:
4342                 return;
4343         case HCI_LK_COMBINATION:
4344                 if (pin_len == 16)
4345                         conn->pending_sec_level = BT_SECURITY_HIGH;
4346                 else
4347                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4348                 break;
4349         case HCI_LK_UNAUTH_COMBINATION_P192:
4350         case HCI_LK_UNAUTH_COMBINATION_P256:
4351                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4352                 break;
4353         case HCI_LK_AUTH_COMBINATION_P192:
4354                 conn->pending_sec_level = BT_SECURITY_HIGH;
4355                 break;
4356         case HCI_LK_AUTH_COMBINATION_P256:
4357                 conn->pending_sec_level = BT_SECURITY_FIPS;
4358                 break;
4359         }
4360 }
4361
4362 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4363                                      struct sk_buff *skb)
4364 {
4365         struct hci_ev_link_key_req *ev = data;
4366         struct hci_cp_link_key_reply cp;
4367         struct hci_conn *conn;
4368         struct link_key *key;
4369
4370         bt_dev_dbg(hdev, "");
4371
4372         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4373                 return;
4374
4375         hci_dev_lock(hdev);
4376
4377         key = hci_find_link_key(hdev, &ev->bdaddr);
4378         if (!key) {
4379                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4380                 goto not_found;
4381         }
4382
4383         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4384
4385         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4386         if (conn) {
4387                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4388
4389                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4390                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4391                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4392                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4393                         goto not_found;
4394                 }
4395
4396                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4397                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4398                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4399                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4400                         goto not_found;
4401                 }
4402
4403                 conn_set_key(conn, key->type, key->pin_len);
4404         }
4405
4406         bacpy(&cp.bdaddr, &ev->bdaddr);
4407         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4408
4409         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4410
4411         hci_dev_unlock(hdev);
4412
4413         return;
4414
4415 not_found:
4416         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4417         hci_dev_unlock(hdev);
4418 }
4419
4420 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4421                                     struct sk_buff *skb)
4422 {
4423         struct hci_ev_link_key_notify *ev = data;
4424         struct hci_conn *conn;
4425         struct link_key *key;
4426         bool persistent;
4427         u8 pin_len = 0;
4428
4429         bt_dev_dbg(hdev, "");
4430
4431         hci_dev_lock(hdev);
4432
4433         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4434         if (!conn)
4435                 goto unlock;
4436
4437         hci_conn_hold(conn);
4438         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4439         hci_conn_drop(conn);
4440
4441         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4442         conn_set_key(conn, ev->key_type, conn->pin_length);
4443
4444         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4445                 goto unlock;
4446
4447         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4448                                 ev->key_type, pin_len, &persistent);
4449         if (!key)
4450                 goto unlock;
4451
4452         /* Update connection information since adding the key will have
4453          * fixed up the type in the case of changed combination keys.
4454          */
4455         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4456                 conn_set_key(conn, key->type, key->pin_len);
4457
4458         mgmt_new_link_key(hdev, key, persistent);
4459
4460         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4461          * is set. If it's not set simply remove the key from the kernel
4462          * list (we've still notified user space about it but with
4463          * store_hint being 0).
4464          */
4465         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4466             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4467                 list_del_rcu(&key->list);
4468                 kfree_rcu(key, rcu);
4469                 goto unlock;
4470         }
4471
4472         if (persistent)
4473                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4474         else
4475                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4476
4477 unlock:
4478         hci_dev_unlock(hdev);
4479 }
4480
4481 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4482                                  struct sk_buff *skb)
4483 {
4484         struct hci_ev_clock_offset *ev = data;
4485         struct hci_conn *conn;
4486
4487         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4488
4489         hci_dev_lock(hdev);
4490
4491         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4492         if (conn && !ev->status) {
4493                 struct inquiry_entry *ie;
4494
4495                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4496                 if (ie) {
4497                         ie->data.clock_offset = ev->clock_offset;
4498                         ie->timestamp = jiffies;
4499                 }
4500         }
4501
4502         hci_dev_unlock(hdev);
4503 }
4504
4505 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4506                                     struct sk_buff *skb)
4507 {
4508         struct hci_ev_pkt_type_change *ev = data;
4509         struct hci_conn *conn;
4510
4511         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4512
4513         hci_dev_lock(hdev);
4514
4515         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4516         if (conn && !ev->status)
4517                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4518
4519         hci_dev_unlock(hdev);
4520 }
4521
4522 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4523                                    struct sk_buff *skb)
4524 {
4525         struct hci_ev_pscan_rep_mode *ev = data;
4526         struct inquiry_entry *ie;
4527
4528         bt_dev_dbg(hdev, "");
4529
4530         hci_dev_lock(hdev);
4531
4532         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4533         if (ie) {
4534                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4535                 ie->timestamp = jiffies;
4536         }
4537
4538         hci_dev_unlock(hdev);
4539 }
4540
4541 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4542                                              struct sk_buff *skb)
4543 {
4544         struct hci_ev_inquiry_result_rssi *ev = edata;
4545         struct inquiry_data data;
4546         int i;
4547
4548         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4549
4550         if (!ev->num)
4551                 return;
4552
4553         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4554                 return;
4555
4556         hci_dev_lock(hdev);
4557
4558         if (skb->len == array_size(ev->num,
4559                                    sizeof(struct inquiry_info_rssi_pscan))) {
4560                 struct inquiry_info_rssi_pscan *info;
4561
4562                 for (i = 0; i < ev->num; i++) {
4563                         u32 flags;
4564
4565                         info = hci_ev_skb_pull(hdev, skb,
4566                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4567                                                sizeof(*info));
4568                         if (!info) {
4569                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4570                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4571                                 goto unlock;
4572                         }
4573
4574                         bacpy(&data.bdaddr, &info->bdaddr);
4575                         data.pscan_rep_mode     = info->pscan_rep_mode;
4576                         data.pscan_period_mode  = info->pscan_period_mode;
4577                         data.pscan_mode         = info->pscan_mode;
4578                         memcpy(data.dev_class, info->dev_class, 3);
4579                         data.clock_offset       = info->clock_offset;
4580                         data.rssi               = info->rssi;
4581                         data.ssp_mode           = 0x00;
4582
4583                         flags = hci_inquiry_cache_update(hdev, &data, false);
4584
4585                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4586                                           info->dev_class, info->rssi,
4587                                           flags, NULL, 0, NULL, 0);
4588                 }
4589         } else if (skb->len == array_size(ev->num,
4590                                           sizeof(struct inquiry_info_rssi))) {
4591                 struct inquiry_info_rssi *info;
4592
4593                 for (i = 0; i < ev->num; i++) {
4594                         u32 flags;
4595
4596                         info = hci_ev_skb_pull(hdev, skb,
4597                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4598                                                sizeof(*info));
4599                         if (!info) {
4600                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4601                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4602                                 goto unlock;
4603                         }
4604
4605                         bacpy(&data.bdaddr, &info->bdaddr);
4606                         data.pscan_rep_mode     = info->pscan_rep_mode;
4607                         data.pscan_period_mode  = info->pscan_period_mode;
4608                         data.pscan_mode         = 0x00;
4609                         memcpy(data.dev_class, info->dev_class, 3);
4610                         data.clock_offset       = info->clock_offset;
4611                         data.rssi               = info->rssi;
4612                         data.ssp_mode           = 0x00;
4613
4614                         flags = hci_inquiry_cache_update(hdev, &data, false);
4615
4616                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4617                                           info->dev_class, info->rssi,
4618                                           flags, NULL, 0, NULL, 0);
4619                 }
4620         } else {
4621                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4622                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4623         }
4624 unlock:
4625         hci_dev_unlock(hdev);
4626 }
4627
4628 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4629                                         struct sk_buff *skb)
4630 {
4631         struct hci_ev_remote_ext_features *ev = data;
4632         struct hci_conn *conn;
4633
4634         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4635
4636         hci_dev_lock(hdev);
4637
4638         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4639         if (!conn)
4640                 goto unlock;
4641
4642         if (ev->page < HCI_MAX_PAGES)
4643                 memcpy(conn->features[ev->page], ev->features, 8);
4644
4645         if (!ev->status && ev->page == 0x01) {
4646                 struct inquiry_entry *ie;
4647
4648                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4649                 if (ie)
4650                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4651
4652                 if (ev->features[0] & LMP_HOST_SSP) {
4653                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4654                 } else {
4655                         /* It is mandatory by the Bluetooth specification that
4656                          * Extended Inquiry Results are only used when Secure
4657                          * Simple Pairing is enabled, but some devices violate
4658                          * this.
4659                          *
4660                          * To make these devices work, the internal SSP
4661                          * enabled flag needs to be cleared if the remote host
4662                          * features do not indicate SSP support */
4663                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4664                 }
4665
4666                 if (ev->features[0] & LMP_HOST_SC)
4667                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4668         }
4669
4670         if (conn->state != BT_CONFIG)
4671                 goto unlock;
4672
4673         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4674                 struct hci_cp_remote_name_req cp;
4675                 memset(&cp, 0, sizeof(cp));
4676                 bacpy(&cp.bdaddr, &conn->dst);
4677                 cp.pscan_rep_mode = 0x02;
4678                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4679         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4680                 mgmt_device_connected(hdev, conn, NULL, 0);
4681
4682         if (!hci_outgoing_auth_needed(hdev, conn)) {
4683                 conn->state = BT_CONNECTED;
4684                 hci_connect_cfm(conn, ev->status);
4685                 hci_conn_drop(conn);
4686         }
4687
4688 unlock:
4689         hci_dev_unlock(hdev);
4690 }
4691
4692 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4693                                        struct sk_buff *skb)
4694 {
4695         struct hci_ev_sync_conn_complete *ev = data;
4696         struct hci_conn *conn;
4697         u8 status = ev->status;
4698
4699         switch (ev->link_type) {
4700         case SCO_LINK:
4701         case ESCO_LINK:
4702                 break;
4703         default:
4704                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4705                  * for HCI_Synchronous_Connection_Complete is limited to
4706                  * either SCO or eSCO
4707                  */
4708                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4709                 return;
4710         }
4711
4712         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4713
4714         hci_dev_lock(hdev);
4715
4716         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4717         if (!conn) {
4718                 if (ev->link_type == ESCO_LINK)
4719                         goto unlock;
4720
4721                 /* When the link type in the event indicates SCO connection
4722                  * and lookup of the connection object fails, then check
4723                  * if an eSCO connection object exists.
4724                  *
4725                  * The core limits the synchronous connections to either
4726                  * SCO or eSCO. The eSCO connection is preferred and tried
4727                  * to be setup first and until successfully established,
4728                  * the link type will be hinted as eSCO.
4729                  */
4730                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4731                 if (!conn)
4732                         goto unlock;
4733         }
4734
4735         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4736          * Processing it more than once per connection can corrupt kernel memory.
4737          *
4738          * As the connection handle is set here for the first time, it indicates
4739          * whether the connection is already set up.
4740          */
4741         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
4742                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4743                 goto unlock;
4744         }
4745
4746         switch (status) {
4747         case 0x00:
4748                 conn->handle = __le16_to_cpu(ev->handle);
4749                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
4750                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
4751                                    conn->handle, HCI_CONN_HANDLE_MAX);
4752                         status = HCI_ERROR_INVALID_PARAMETERS;
4753                         conn->state = BT_CLOSED;
4754                         break;
4755                 }
4756
4757                 conn->state  = BT_CONNECTED;
4758                 conn->type   = ev->link_type;
4759
4760                 hci_debugfs_create_conn(conn);
4761                 hci_conn_add_sysfs(conn);
4762                 break;
4763
4764         case 0x10:      /* Connection Accept Timeout */
4765         case 0x0d:      /* Connection Rejected due to Limited Resources */
4766         case 0x11:      /* Unsupported Feature or Parameter Value */
4767         case 0x1c:      /* SCO interval rejected */
4768         case 0x1a:      /* Unsupported Remote Feature */
4769         case 0x1e:      /* Invalid LMP Parameters */
4770         case 0x1f:      /* Unspecified error */
4771         case 0x20:      /* Unsupported LMP Parameter value */
4772                 if (conn->out) {
4773                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4774                                         (hdev->esco_type & EDR_ESCO_MASK);
4775                         if (hci_setup_sync(conn, conn->link->handle))
4776                                 goto unlock;
4777                 }
4778                 fallthrough;
4779
4780         default:
4781                 conn->state = BT_CLOSED;
4782                 break;
4783         }
4784
4785         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4786         /* Notify only in case of SCO over HCI transport data path which
4787          * is zero and non-zero value shall be non-HCI transport data path
4788          */
4789         if (conn->codec.data_path == 0 && hdev->notify) {
4790                 switch (ev->air_mode) {
4791                 case 0x02:
4792                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4793                         break;
4794                 case 0x03:
4795                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4796                         break;
4797                 }
4798         }
4799
4800         hci_connect_cfm(conn, status);
4801         if (status)
4802                 hci_conn_del(conn);
4803
4804 unlock:
4805         hci_dev_unlock(hdev);
4806 }
4807
4808 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4809 {
4810         size_t parsed = 0;
4811
4812         while (parsed < eir_len) {
4813                 u8 field_len = eir[0];
4814
4815                 if (field_len == 0)
4816                         return parsed;
4817
4818                 parsed += field_len + 1;
4819                 eir += field_len + 1;
4820         }
4821
4822         return eir_len;
4823 }
4824
4825 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
4826                                             struct sk_buff *skb)
4827 {
4828         struct hci_ev_ext_inquiry_result *ev = edata;
4829         struct inquiry_data data;
4830         size_t eir_len;
4831         int i;
4832
4833         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
4834                              flex_array_size(ev, info, ev->num)))
4835                 return;
4836
4837         bt_dev_dbg(hdev, "num %d", ev->num);
4838
4839         if (!ev->num)
4840                 return;
4841
4842         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4843                 return;
4844
4845         hci_dev_lock(hdev);
4846
4847         for (i = 0; i < ev->num; i++) {
4848                 struct extended_inquiry_info *info = &ev->info[i];
4849                 u32 flags;
4850                 bool name_known;
4851
4852                 bacpy(&data.bdaddr, &info->bdaddr);
4853                 data.pscan_rep_mode     = info->pscan_rep_mode;
4854                 data.pscan_period_mode  = info->pscan_period_mode;
4855                 data.pscan_mode         = 0x00;
4856                 memcpy(data.dev_class, info->dev_class, 3);
4857                 data.clock_offset       = info->clock_offset;
4858                 data.rssi               = info->rssi;
4859                 data.ssp_mode           = 0x01;
4860
4861                 if (hci_dev_test_flag(hdev, HCI_MGMT))
4862                         name_known = eir_get_data(info->data,
4863                                                   sizeof(info->data),
4864                                                   EIR_NAME_COMPLETE, NULL);
4865                 else
4866                         name_known = true;
4867
4868                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4869
4870                 eir_len = eir_get_length(info->data, sizeof(info->data));
4871
4872                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4873                                   info->dev_class, info->rssi,
4874                                   flags, info->data, eir_len, NULL, 0);
4875         }
4876
4877         hci_dev_unlock(hdev);
4878 }
4879
4880 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
4881                                          struct sk_buff *skb)
4882 {
4883         struct hci_ev_key_refresh_complete *ev = data;
4884         struct hci_conn *conn;
4885
4886         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
4887                    __le16_to_cpu(ev->handle));
4888
4889         hci_dev_lock(hdev);
4890
4891         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4892         if (!conn)
4893                 goto unlock;
4894
4895         /* For BR/EDR the necessary steps are taken through the
4896          * auth_complete event.
4897          */
4898         if (conn->type != LE_LINK)
4899                 goto unlock;
4900
4901         if (!ev->status)
4902                 conn->sec_level = conn->pending_sec_level;
4903
4904         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4905
4906         if (ev->status && conn->state == BT_CONNECTED) {
4907                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4908                 hci_conn_drop(conn);
4909                 goto unlock;
4910         }
4911
4912         if (conn->state == BT_CONFIG) {
4913                 if (!ev->status)
4914                         conn->state = BT_CONNECTED;
4915
4916                 hci_connect_cfm(conn, ev->status);
4917                 hci_conn_drop(conn);
4918         } else {
4919                 hci_auth_cfm(conn, ev->status);
4920
4921                 hci_conn_hold(conn);
4922                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4923                 hci_conn_drop(conn);
4924         }
4925
4926 unlock:
4927         hci_dev_unlock(hdev);
4928 }
4929
4930 static u8 hci_get_auth_req(struct hci_conn *conn)
4931 {
4932         /* If remote requests no-bonding follow that lead */
4933         if (conn->remote_auth == HCI_AT_NO_BONDING ||
4934             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4935                 return conn->remote_auth | (conn->auth_type & 0x01);
4936
4937         /* If both remote and local have enough IO capabilities, require
4938          * MITM protection
4939          */
4940         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4941             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4942                 return conn->remote_auth | 0x01;
4943
4944         /* No MITM protection possible so ignore remote requirement */
4945         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4946 }
4947
4948 static u8 bredr_oob_data_present(struct hci_conn *conn)
4949 {
4950         struct hci_dev *hdev = conn->hdev;
4951         struct oob_data *data;
4952
4953         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4954         if (!data)
4955                 return 0x00;
4956
4957         if (bredr_sc_enabled(hdev)) {
4958                 /* When Secure Connections is enabled, then just
4959                  * return the present value stored with the OOB
4960                  * data. The stored value contains the right present
4961                  * information. However it can only be trusted when
4962                  * not in Secure Connection Only mode.
4963                  */
4964                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4965                         return data->present;
4966
4967                 /* When Secure Connections Only mode is enabled, then
4968                  * the P-256 values are required. If they are not
4969                  * available, then do not declare that OOB data is
4970                  * present.
4971                  */
4972                 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4973                     !memcmp(data->hash256, ZERO_KEY, 16))
4974                         return 0x00;
4975
4976                 return 0x02;
4977         }
4978
4979         /* When Secure Connections is not enabled or actually
4980          * not supported by the hardware, then check that if
4981          * P-192 data values are present.
4982          */
4983         if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4984             !memcmp(data->hash192, ZERO_KEY, 16))
4985                 return 0x00;
4986
4987         return 0x01;
4988 }
4989
4990 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
4991                                     struct sk_buff *skb)
4992 {
4993         struct hci_ev_io_capa_request *ev = data;
4994         struct hci_conn *conn;
4995
4996         bt_dev_dbg(hdev, "");
4997
4998         hci_dev_lock(hdev);
4999
5000         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5001         if (!conn)
5002                 goto unlock;
5003
5004         hci_conn_hold(conn);
5005
5006         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5007                 goto unlock;
5008
5009         /* Allow pairing if we're pairable, the initiators of the
5010          * pairing or if the remote is not requesting bonding.
5011          */
5012         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5013             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5014             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5015                 struct hci_cp_io_capability_reply cp;
5016
5017                 bacpy(&cp.bdaddr, &ev->bdaddr);
5018                 /* Change the IO capability from KeyboardDisplay
5019                  * to DisplayYesNo as it is not supported by BT spec. */
5020                 cp.capability = (conn->io_capability == 0x04) ?
5021                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5022
5023                 /* If we are initiators, there is no remote information yet */
5024                 if (conn->remote_auth == 0xff) {
5025                         /* Request MITM protection if our IO caps allow it
5026                          * except for the no-bonding case.
5027                          */
5028                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5029                             conn->auth_type != HCI_AT_NO_BONDING)
5030                                 conn->auth_type |= 0x01;
5031                 } else {
5032                         conn->auth_type = hci_get_auth_req(conn);
5033                 }
5034
5035                 /* If we're not bondable, force one of the non-bondable
5036                  * authentication requirement values.
5037                  */
5038                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5039                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5040
5041                 cp.authentication = conn->auth_type;
5042                 cp.oob_data = bredr_oob_data_present(conn);
5043
5044                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5045                              sizeof(cp), &cp);
5046         } else {
5047                 struct hci_cp_io_capability_neg_reply cp;
5048
5049                 bacpy(&cp.bdaddr, &ev->bdaddr);
5050                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5051
5052                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5053                              sizeof(cp), &cp);
5054         }
5055
5056 unlock:
5057         hci_dev_unlock(hdev);
5058 }
5059
5060 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5061                                   struct sk_buff *skb)
5062 {
5063         struct hci_ev_io_capa_reply *ev = data;
5064         struct hci_conn *conn;
5065
5066         bt_dev_dbg(hdev, "");
5067
5068         hci_dev_lock(hdev);
5069
5070         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5071         if (!conn)
5072                 goto unlock;
5073
5074         conn->remote_cap = ev->capability;
5075         conn->remote_auth = ev->authentication;
5076
5077 unlock:
5078         hci_dev_unlock(hdev);
5079 }
5080
5081 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5082                                          struct sk_buff *skb)
5083 {
5084         struct hci_ev_user_confirm_req *ev = data;
5085         int loc_mitm, rem_mitm, confirm_hint = 0;
5086         struct hci_conn *conn;
5087
5088         bt_dev_dbg(hdev, "");
5089
5090         hci_dev_lock(hdev);
5091
5092         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5093                 goto unlock;
5094
5095         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5096         if (!conn)
5097                 goto unlock;
5098
5099         loc_mitm = (conn->auth_type & 0x01);
5100         rem_mitm = (conn->remote_auth & 0x01);
5101
5102         /* If we require MITM but the remote device can't provide that
5103          * (it has NoInputNoOutput) then reject the confirmation
5104          * request. We check the security level here since it doesn't
5105          * necessarily match conn->auth_type.
5106          */
5107         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5108             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5109                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5110                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5111                              sizeof(ev->bdaddr), &ev->bdaddr);
5112                 goto unlock;
5113         }
5114
5115         /* If no side requires MITM protection; auto-accept */
5116         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5117             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5118
5119                 /* If we're not the initiators request authorization to
5120                  * proceed from user space (mgmt_user_confirm with
5121                  * confirm_hint set to 1). The exception is if neither
5122                  * side had MITM or if the local IO capability is
5123                  * NoInputNoOutput, in which case we do auto-accept
5124                  */
5125                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5126                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5127                     (loc_mitm || rem_mitm)) {
5128                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5129                         confirm_hint = 1;
5130                         goto confirm;
5131                 }
5132
5133                 /* If there already exists link key in local host, leave the
5134                  * decision to user space since the remote device could be
5135                  * legitimate or malicious.
5136                  */
5137                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5138                         bt_dev_dbg(hdev, "Local host already has link key");
5139                         confirm_hint = 1;
5140                         goto confirm;
5141                 }
5142
5143                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5144                        hdev->auto_accept_delay);
5145
5146                 if (hdev->auto_accept_delay > 0) {
5147                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5148                         queue_delayed_work(conn->hdev->workqueue,
5149                                            &conn->auto_accept_work, delay);
5150                         goto unlock;
5151                 }
5152
5153                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5154                              sizeof(ev->bdaddr), &ev->bdaddr);
5155                 goto unlock;
5156         }
5157
5158 confirm:
5159         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5160                                   le32_to_cpu(ev->passkey), confirm_hint);
5161
5162 unlock:
5163         hci_dev_unlock(hdev);
5164 }
5165
5166 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5167                                          struct sk_buff *skb)
5168 {
5169         struct hci_ev_user_passkey_req *ev = data;
5170
5171         bt_dev_dbg(hdev, "");
5172
5173         if (hci_dev_test_flag(hdev, HCI_MGMT))
5174                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5175 }
5176
5177 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5178                                         struct sk_buff *skb)
5179 {
5180         struct hci_ev_user_passkey_notify *ev = data;
5181         struct hci_conn *conn;
5182
5183         bt_dev_dbg(hdev, "");
5184
5185         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5186         if (!conn)
5187                 return;
5188
5189         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5190         conn->passkey_entered = 0;
5191
5192         if (hci_dev_test_flag(hdev, HCI_MGMT))
5193                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5194                                          conn->dst_type, conn->passkey_notify,
5195                                          conn->passkey_entered);
5196 }
5197
5198 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5199                                     struct sk_buff *skb)
5200 {
5201         struct hci_ev_keypress_notify *ev = data;
5202         struct hci_conn *conn;
5203
5204         bt_dev_dbg(hdev, "");
5205
5206         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5207         if (!conn)
5208                 return;
5209
5210         switch (ev->type) {
5211         case HCI_KEYPRESS_STARTED:
5212                 conn->passkey_entered = 0;
5213                 return;
5214
5215         case HCI_KEYPRESS_ENTERED:
5216                 conn->passkey_entered++;
5217                 break;
5218
5219         case HCI_KEYPRESS_ERASED:
5220                 conn->passkey_entered--;
5221                 break;
5222
5223         case HCI_KEYPRESS_CLEARED:
5224                 conn->passkey_entered = 0;
5225                 break;
5226
5227         case HCI_KEYPRESS_COMPLETED:
5228                 return;
5229         }
5230
5231         if (hci_dev_test_flag(hdev, HCI_MGMT))
5232                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5233                                          conn->dst_type, conn->passkey_notify,
5234                                          conn->passkey_entered);
5235 }
5236
5237 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5238                                          struct sk_buff *skb)
5239 {
5240         struct hci_ev_simple_pair_complete *ev = data;
5241         struct hci_conn *conn;
5242
5243         bt_dev_dbg(hdev, "");
5244
5245         hci_dev_lock(hdev);
5246
5247         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5248         if (!conn)
5249                 goto unlock;
5250
5251         /* Reset the authentication requirement to unknown */
5252         conn->remote_auth = 0xff;
5253
5254         /* To avoid duplicate auth_failed events to user space we check
5255          * the HCI_CONN_AUTH_PEND flag which will be set if we
5256          * initiated the authentication. A traditional auth_complete
5257          * event gets always produced as initiator and is also mapped to
5258          * the mgmt_auth_failed event */
5259         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5260                 mgmt_auth_failed(conn, ev->status);
5261
5262         hci_conn_drop(conn);
5263
5264 unlock:
5265         hci_dev_unlock(hdev);
5266 }
5267
5268 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5269                                          struct sk_buff *skb)
5270 {
5271         struct hci_ev_remote_host_features *ev = data;
5272         struct inquiry_entry *ie;
5273         struct hci_conn *conn;
5274
5275         bt_dev_dbg(hdev, "");
5276
5277         hci_dev_lock(hdev);
5278
5279         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5280         if (conn)
5281                 memcpy(conn->features[1], ev->features, 8);
5282
5283         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5284         if (ie)
5285                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5286
5287         hci_dev_unlock(hdev);
5288 }
5289
5290 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5291                                             struct sk_buff *skb)
5292 {
5293         struct hci_ev_remote_oob_data_request *ev = edata;
5294         struct oob_data *data;
5295
5296         bt_dev_dbg(hdev, "");
5297
5298         hci_dev_lock(hdev);
5299
5300         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5301                 goto unlock;
5302
5303         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5304         if (!data) {
5305                 struct hci_cp_remote_oob_data_neg_reply cp;
5306
5307                 bacpy(&cp.bdaddr, &ev->bdaddr);
5308                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5309                              sizeof(cp), &cp);
5310                 goto unlock;
5311         }
5312
5313         if (bredr_sc_enabled(hdev)) {
5314                 struct hci_cp_remote_oob_ext_data_reply cp;
5315
5316                 bacpy(&cp.bdaddr, &ev->bdaddr);
5317                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5318                         memset(cp.hash192, 0, sizeof(cp.hash192));
5319                         memset(cp.rand192, 0, sizeof(cp.rand192));
5320                 } else {
5321                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5322                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5323                 }
5324                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5325                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5326
5327                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5328                              sizeof(cp), &cp);
5329         } else {
5330                 struct hci_cp_remote_oob_data_reply cp;
5331
5332                 bacpy(&cp.bdaddr, &ev->bdaddr);
5333                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5334                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5335
5336                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5337                              sizeof(cp), &cp);
5338         }
5339
5340 unlock:
5341         hci_dev_unlock(hdev);
5342 }
5343
5344 #if IS_ENABLED(CONFIG_BT_HS)
5345 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5346                                   struct sk_buff *skb)
5347 {
5348         struct hci_ev_channel_selected *ev = data;
5349         struct hci_conn *hcon;
5350
5351         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5352
5353         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5354         if (!hcon)
5355                 return;
5356
5357         amp_read_loc_assoc_final_data(hdev, hcon);
5358 }
5359
5360 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5361                                       struct sk_buff *skb)
5362 {
5363         struct hci_ev_phy_link_complete *ev = data;
5364         struct hci_conn *hcon, *bredr_hcon;
5365
5366         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5367                    ev->status);
5368
5369         hci_dev_lock(hdev);
5370
5371         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5372         if (!hcon)
5373                 goto unlock;
5374
5375         if (!hcon->amp_mgr)
5376                 goto unlock;
5377
5378         if (ev->status) {
5379                 hci_conn_del(hcon);
5380                 goto unlock;
5381         }
5382
5383         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5384
5385         hcon->state = BT_CONNECTED;
5386         bacpy(&hcon->dst, &bredr_hcon->dst);
5387
5388         hci_conn_hold(hcon);
5389         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5390         hci_conn_drop(hcon);
5391
5392         hci_debugfs_create_conn(hcon);
5393         hci_conn_add_sysfs(hcon);
5394
5395         amp_physical_cfm(bredr_hcon, hcon);
5396
5397 unlock:
5398         hci_dev_unlock(hdev);
5399 }
5400
5401 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5402                                      struct sk_buff *skb)
5403 {
5404         struct hci_ev_logical_link_complete *ev = data;
5405         struct hci_conn *hcon;
5406         struct hci_chan *hchan;
5407         struct amp_mgr *mgr;
5408
5409         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5410                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5411
5412         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5413         if (!hcon)
5414                 return;
5415
5416         /* Create AMP hchan */
5417         hchan = hci_chan_create(hcon);
5418         if (!hchan)
5419                 return;
5420
5421         hchan->handle = le16_to_cpu(ev->handle);
5422         hchan->amp = true;
5423
5424         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5425
5426         mgr = hcon->amp_mgr;
5427         if (mgr && mgr->bredr_chan) {
5428                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5429
5430                 l2cap_chan_lock(bredr_chan);
5431
5432                 bredr_chan->conn->mtu = hdev->block_mtu;
5433                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5434                 hci_conn_hold(hcon);
5435
5436                 l2cap_chan_unlock(bredr_chan);
5437         }
5438 }
5439
5440 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5441                                              struct sk_buff *skb)
5442 {
5443         struct hci_ev_disconn_logical_link_complete *ev = data;
5444         struct hci_chan *hchan;
5445
5446         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5447                    le16_to_cpu(ev->handle), ev->status);
5448
5449         if (ev->status)
5450                 return;
5451
5452         hci_dev_lock(hdev);
5453
5454         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5455         if (!hchan || !hchan->amp)
5456                 goto unlock;
5457
5458         amp_destroy_logical_link(hchan, ev->reason);
5459
5460 unlock:
5461         hci_dev_unlock(hdev);
5462 }
5463
5464 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5465                                              struct sk_buff *skb)
5466 {
5467         struct hci_ev_disconn_phy_link_complete *ev = data;
5468         struct hci_conn *hcon;
5469
5470         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5471
5472         if (ev->status)
5473                 return;
5474
5475         hci_dev_lock(hdev);
5476
5477         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5478         if (hcon && hcon->type == AMP_LINK) {
5479                 hcon->state = BT_CLOSED;
5480                 hci_disconn_cfm(hcon, ev->reason);
5481                 hci_conn_del(hcon);
5482         }
5483
5484         hci_dev_unlock(hdev);
5485 }
5486 #endif
5487
5488 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5489                                 u8 bdaddr_type, bdaddr_t *local_rpa)
5490 {
5491         if (conn->out) {
5492                 conn->dst_type = bdaddr_type;
5493                 conn->resp_addr_type = bdaddr_type;
5494                 bacpy(&conn->resp_addr, bdaddr);
5495
5496                 /* Check if the controller has set a Local RPA then it must be
5497                  * used instead or hdev->rpa.
5498                  */
5499                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5500                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5501                         bacpy(&conn->init_addr, local_rpa);
5502                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5503                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5504                         bacpy(&conn->init_addr, &conn->hdev->rpa);
5505                 } else {
5506                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
5507                                                   &conn->init_addr_type);
5508                 }
5509         } else {
5510                 conn->resp_addr_type = conn->hdev->adv_addr_type;
5511                 /* Check if the controller has set a Local RPA then it must be
5512                  * used instead or hdev->rpa.
5513                  */
5514                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5515                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5516                         bacpy(&conn->resp_addr, local_rpa);
5517                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5518                         /* In case of ext adv, resp_addr will be updated in
5519                          * Adv Terminated event.
5520                          */
5521                         if (!ext_adv_capable(conn->hdev))
5522                                 bacpy(&conn->resp_addr,
5523                                       &conn->hdev->random_addr);
5524                 } else {
5525                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5526                 }
5527
5528                 conn->init_addr_type = bdaddr_type;
5529                 bacpy(&conn->init_addr, bdaddr);
5530
5531                 /* For incoming connections, set the default minimum
5532                  * and maximum connection interval. They will be used
5533                  * to check if the parameters are in range and if not
5534                  * trigger the connection update procedure.
5535                  */
5536                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5537                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5538         }
5539 }
5540
5541 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5542                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5543                                  bdaddr_t *local_rpa, u8 role, u16 handle,
5544                                  u16 interval, u16 latency,
5545                                  u16 supervision_timeout)
5546 {
5547         struct hci_conn_params *params;
5548         struct hci_conn *conn;
5549         struct smp_irk *irk;
5550         u8 addr_type;
5551
5552         hci_dev_lock(hdev);
5553
5554         /* All controllers implicitly stop advertising in the event of a
5555          * connection, so ensure that the state bit is cleared.
5556          */
5557         hci_dev_clear_flag(hdev, HCI_LE_ADV);
5558
5559         conn = hci_lookup_le_connect(hdev);
5560         if (!conn) {
5561                 /* In case of error status and there is no connection pending
5562                  * just unlock as there is nothing to cleanup.
5563                  */
5564                 if (status)
5565                         goto unlock;
5566
5567                 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5568                 if (!conn) {
5569                         bt_dev_err(hdev, "no memory for new connection");
5570                         goto unlock;
5571                 }
5572
5573                 conn->dst_type = bdaddr_type;
5574
5575                 /* If we didn't have a hci_conn object previously
5576                  * but we're in central role this must be something
5577                  * initiated using an accept list. Since accept list based
5578                  * connections are not "first class citizens" we don't
5579                  * have full tracking of them. Therefore, we go ahead
5580                  * with a "best effort" approach of determining the
5581                  * initiator address based on the HCI_PRIVACY flag.
5582                  */
5583                 if (conn->out) {
5584                         conn->resp_addr_type = bdaddr_type;
5585                         bacpy(&conn->resp_addr, bdaddr);
5586                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5587                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5588                                 bacpy(&conn->init_addr, &hdev->rpa);
5589                         } else {
5590                                 hci_copy_identity_address(hdev,
5591                                                           &conn->init_addr,
5592                                                           &conn->init_addr_type);
5593                         }
5594                 }
5595         } else {
5596                 cancel_delayed_work(&conn->le_conn_timeout);
5597         }
5598
5599         /* The HCI_LE_Connection_Complete event is only sent once per connection.
5600          * Processing it more than once per connection can corrupt kernel memory.
5601          *
5602          * As the connection handle is set here for the first time, it indicates
5603          * whether the connection is already set up.
5604          */
5605         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5606                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5607                 goto unlock;
5608         }
5609
5610         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5611
5612         /* Lookup the identity address from the stored connection
5613          * address and address type.
5614          *
5615          * When establishing connections to an identity address, the
5616          * connection procedure will store the resolvable random
5617          * address first. Now if it can be converted back into the
5618          * identity address, start using the identity address from
5619          * now on.
5620          */
5621         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5622         if (irk) {
5623                 bacpy(&conn->dst, &irk->bdaddr);
5624                 conn->dst_type = irk->addr_type;
5625         }
5626
5627         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5628
5629         if (handle > HCI_CONN_HANDLE_MAX) {
5630                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5631                            HCI_CONN_HANDLE_MAX);
5632                 status = HCI_ERROR_INVALID_PARAMETERS;
5633         }
5634
5635         /* All connection failure handling is taken care of by the
5636          * hci_conn_failed function which is triggered by the HCI
5637          * request completion callbacks used for connecting.
5638          */
5639         if (status)
5640                 goto unlock;
5641
5642         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5643                 addr_type = BDADDR_LE_PUBLIC;
5644         else
5645                 addr_type = BDADDR_LE_RANDOM;
5646
5647         /* Drop the connection if the device is blocked */
5648         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5649                 hci_conn_drop(conn);
5650                 goto unlock;
5651         }
5652
5653         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5654                 mgmt_device_connected(hdev, conn, NULL, 0);
5655
5656         conn->sec_level = BT_SECURITY_LOW;
5657         conn->handle = handle;
5658         conn->state = BT_CONFIG;
5659
5660         /* Store current advertising instance as connection advertising instance
5661          * when sotfware rotation is in use so it can be re-enabled when
5662          * disconnected.
5663          */
5664         if (!ext_adv_capable(hdev))
5665                 conn->adv_instance = hdev->cur_adv_instance;
5666
5667         conn->le_conn_interval = interval;
5668         conn->le_conn_latency = latency;
5669         conn->le_supv_timeout = supervision_timeout;
5670
5671         hci_debugfs_create_conn(conn);
5672         hci_conn_add_sysfs(conn);
5673
5674         /* The remote features procedure is defined for central
5675          * role only. So only in case of an initiated connection
5676          * request the remote features.
5677          *
5678          * If the local controller supports peripheral-initiated features
5679          * exchange, then requesting the remote features in peripheral
5680          * role is possible. Otherwise just transition into the
5681          * connected state without requesting the remote features.
5682          */
5683         if (conn->out ||
5684             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5685                 struct hci_cp_le_read_remote_features cp;
5686
5687                 cp.handle = __cpu_to_le16(conn->handle);
5688
5689                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5690                              sizeof(cp), &cp);
5691
5692                 hci_conn_hold(conn);
5693         } else {
5694                 conn->state = BT_CONNECTED;
5695                 hci_connect_cfm(conn, status);
5696         }
5697
5698         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5699                                            conn->dst_type);
5700         if (params) {
5701                 list_del_init(&params->action);
5702                 if (params->conn) {
5703                         hci_conn_drop(params->conn);
5704                         hci_conn_put(params->conn);
5705                         params->conn = NULL;
5706                 }
5707         }
5708
5709 unlock:
5710         hci_update_passive_scan(hdev);
5711         hci_dev_unlock(hdev);
5712 }
5713
5714 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5715                                      struct sk_buff *skb)
5716 {
5717         struct hci_ev_le_conn_complete *ev = data;
5718
5719         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5720
5721         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5722                              NULL, ev->role, le16_to_cpu(ev->handle),
5723                              le16_to_cpu(ev->interval),
5724                              le16_to_cpu(ev->latency),
5725                              le16_to_cpu(ev->supervision_timeout));
5726 }
5727
5728 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5729                                          struct sk_buff *skb)
5730 {
5731         struct hci_ev_le_enh_conn_complete *ev = data;
5732
5733         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5734
5735         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5736                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5737                              le16_to_cpu(ev->interval),
5738                              le16_to_cpu(ev->latency),
5739                              le16_to_cpu(ev->supervision_timeout));
5740 }
5741
5742 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5743                                     struct sk_buff *skb)
5744 {
5745         struct hci_evt_le_ext_adv_set_term *ev = data;
5746         struct hci_conn *conn;
5747         struct adv_info *adv, *n;
5748
5749         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5750
5751         /* The Bluetooth Core 5.3 specification clearly states that this event
5752          * shall not be sent when the Host disables the advertising set. So in
5753          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5754          *
5755          * When the Host disables an advertising set, all cleanup is done via
5756          * its command callback and not needed to be duplicated here.
5757          */
5758         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5759                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5760                 return;
5761         }
5762
5763         hci_dev_lock(hdev);
5764
5765         adv = hci_find_adv_instance(hdev, ev->handle);
5766
5767         if (ev->status) {
5768                 if (!adv)
5769                         goto unlock;
5770
5771                 /* Remove advertising as it has been terminated */
5772                 hci_remove_adv_instance(hdev, ev->handle);
5773                 mgmt_advertising_removed(NULL, hdev, ev->handle);
5774
5775                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5776                         if (adv->enabled)
5777                                 goto unlock;
5778                 }
5779
5780                 /* We are no longer advertising, clear HCI_LE_ADV */
5781                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5782                 goto unlock;
5783         }
5784
5785         if (adv)
5786                 adv->enabled = false;
5787
5788         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5789         if (conn) {
5790                 /* Store handle in the connection so the correct advertising
5791                  * instance can be re-enabled when disconnected.
5792                  */
5793                 conn->adv_instance = ev->handle;
5794
5795                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5796                     bacmp(&conn->resp_addr, BDADDR_ANY))
5797                         goto unlock;
5798
5799                 if (!ev->handle) {
5800                         bacpy(&conn->resp_addr, &hdev->random_addr);
5801                         goto unlock;
5802                 }
5803
5804                 if (adv)
5805                         bacpy(&conn->resp_addr, &adv->random_addr);
5806         }
5807
5808 unlock:
5809         hci_dev_unlock(hdev);
5810 }
5811
5812 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
5813                                             struct sk_buff *skb)
5814 {
5815         struct hci_ev_le_conn_update_complete *ev = data;
5816         struct hci_conn *conn;
5817
5818         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5819
5820         if (ev->status)
5821                 return;
5822
5823         hci_dev_lock(hdev);
5824
5825         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5826         if (conn) {
5827                 conn->le_conn_interval = le16_to_cpu(ev->interval);
5828                 conn->le_conn_latency = le16_to_cpu(ev->latency);
5829                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5830         }
5831
5832         hci_dev_unlock(hdev);
5833 }
5834
5835 /* This function requires the caller holds hdev->lock */
5836 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5837                                               bdaddr_t *addr,
5838                                               u8 addr_type, bool addr_resolved,
5839                                               u8 adv_type)
5840 {
5841         struct hci_conn *conn;
5842         struct hci_conn_params *params;
5843
5844         /* If the event is not connectable don't proceed further */
5845         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5846                 return NULL;
5847
5848         /* Ignore if the device is blocked or hdev is suspended */
5849         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
5850             hdev->suspended)
5851                 return NULL;
5852
5853         /* Most controller will fail if we try to create new connections
5854          * while we have an existing one in peripheral role.
5855          */
5856         if (hdev->conn_hash.le_num_peripheral > 0 &&
5857             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5858              !(hdev->le_states[3] & 0x10)))
5859                 return NULL;
5860
5861         /* If we're not connectable only connect devices that we have in
5862          * our pend_le_conns list.
5863          */
5864         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5865                                            addr_type);
5866         if (!params)
5867                 return NULL;
5868
5869         if (!params->explicit_connect) {
5870                 switch (params->auto_connect) {
5871                 case HCI_AUTO_CONN_DIRECT:
5872                         /* Only devices advertising with ADV_DIRECT_IND are
5873                          * triggering a connection attempt. This is allowing
5874                          * incoming connections from peripheral devices.
5875                          */
5876                         if (adv_type != LE_ADV_DIRECT_IND)
5877                                 return NULL;
5878                         break;
5879                 case HCI_AUTO_CONN_ALWAYS:
5880                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5881                          * are triggering a connection attempt. This means
5882                          * that incoming connections from peripheral device are
5883                          * accepted and also outgoing connections to peripheral
5884                          * devices are established when found.
5885                          */
5886                         break;
5887                 default:
5888                         return NULL;
5889                 }
5890         }
5891
5892         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5893                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5894                               HCI_ROLE_MASTER);
5895         if (!IS_ERR(conn)) {
5896                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5897                  * by higher layer that tried to connect, if no then
5898                  * store the pointer since we don't really have any
5899                  * other owner of the object besides the params that
5900                  * triggered it. This way we can abort the connection if
5901                  * the parameters get removed and keep the reference
5902                  * count consistent once the connection is established.
5903                  */
5904
5905                 if (!params->explicit_connect)
5906                         params->conn = hci_conn_get(conn);
5907
5908                 return conn;
5909         }
5910
5911         switch (PTR_ERR(conn)) {
5912         case -EBUSY:
5913                 /* If hci_connect() returns -EBUSY it means there is already
5914                  * an LE connection attempt going on. Since controllers don't
5915                  * support more than one connection attempt at the time, we
5916                  * don't consider this an error case.
5917                  */
5918                 break;
5919         default:
5920                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5921                 return NULL;
5922         }
5923
5924         return NULL;
5925 }
5926
5927 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5928                                u8 bdaddr_type, bdaddr_t *direct_addr,
5929                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5930                                bool ext_adv)
5931 {
5932         struct discovery_state *d = &hdev->discovery;
5933         struct smp_irk *irk;
5934         struct hci_conn *conn;
5935         bool match, bdaddr_resolved;
5936         u32 flags;
5937         u8 *ptr;
5938
5939         switch (type) {
5940         case LE_ADV_IND:
5941         case LE_ADV_DIRECT_IND:
5942         case LE_ADV_SCAN_IND:
5943         case LE_ADV_NONCONN_IND:
5944         case LE_ADV_SCAN_RSP:
5945                 break;
5946         default:
5947                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5948                                        "type: 0x%02x", type);
5949                 return;
5950         }
5951
5952         if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5953                 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5954                 return;
5955         }
5956
5957         /* Find the end of the data in case the report contains padded zero
5958          * bytes at the end causing an invalid length value.
5959          *
5960          * When data is NULL, len is 0 so there is no need for extra ptr
5961          * check as 'ptr < data + 0' is already false in such case.
5962          */
5963         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5964                 if (ptr + 1 + *ptr > data + len)
5965                         break;
5966         }
5967
5968         /* Adjust for actual length. This handles the case when remote
5969          * device is advertising with incorrect data length.
5970          */
5971         len = ptr - data;
5972
5973         /* If the direct address is present, then this report is from
5974          * a LE Direct Advertising Report event. In that case it is
5975          * important to see if the address is matching the local
5976          * controller address.
5977          */
5978         if (direct_addr) {
5979                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
5980                                                   &bdaddr_resolved);
5981
5982                 /* Only resolvable random addresses are valid for these
5983                  * kind of reports and others can be ignored.
5984                  */
5985                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5986                         return;
5987
5988                 /* If the controller is not using resolvable random
5989                  * addresses, then this report can be ignored.
5990                  */
5991                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5992                         return;
5993
5994                 /* If the local IRK of the controller does not match
5995                  * with the resolvable random address provided, then
5996                  * this report can be ignored.
5997                  */
5998                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5999                         return;
6000         }
6001
6002         /* Check if we need to convert to identity address */
6003         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6004         if (irk) {
6005                 bdaddr = &irk->bdaddr;
6006                 bdaddr_type = irk->addr_type;
6007         }
6008
6009         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6010
6011         /* Check if we have been requested to connect to this device.
6012          *
6013          * direct_addr is set only for directed advertising reports (it is NULL
6014          * for advertising reports) and is already verified to be RPA above.
6015          */
6016         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6017                                      type);
6018         if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6019                 /* Store report for later inclusion by
6020                  * mgmt_device_connected
6021                  */
6022                 memcpy(conn->le_adv_data, data, len);
6023                 conn->le_adv_data_len = len;
6024         }
6025
6026         /* Passive scanning shouldn't trigger any device found events,
6027          * except for devices marked as CONN_REPORT for which we do send
6028          * device found events, or advertisement monitoring requested.
6029          */
6030         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6031                 if (type == LE_ADV_DIRECT_IND)
6032                         return;
6033
6034                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6035                                                bdaddr, bdaddr_type) &&
6036                     idr_is_empty(&hdev->adv_monitors_idr))
6037                         return;
6038
6039                 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6040                         flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6041                 else
6042                         flags = 0;
6043                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6044                                   rssi, flags, data, len, NULL, 0);
6045                 return;
6046         }
6047
6048         /* When receiving non-connectable or scannable undirected
6049          * advertising reports, this means that the remote device is
6050          * not connectable and then clearly indicate this in the
6051          * device found event.
6052          *
6053          * When receiving a scan response, then there is no way to
6054          * know if the remote device is connectable or not. However
6055          * since scan responses are merged with a previously seen
6056          * advertising report, the flags field from that report
6057          * will be used.
6058          *
6059          * In the really unlikely case that a controller get confused
6060          * and just sends a scan response event, then it is marked as
6061          * not connectable as well.
6062          */
6063         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
6064             type == LE_ADV_SCAN_RSP)
6065                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6066         else
6067                 flags = 0;
6068
6069         /* If there's nothing pending either store the data from this
6070          * event or send an immediate device found event if the data
6071          * should not be stored for later.
6072          */
6073         if (!ext_adv && !has_pending_adv_report(hdev)) {
6074                 /* If the report will trigger a SCAN_REQ store it for
6075                  * later merging.
6076                  */
6077                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6078                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6079                                                  rssi, flags, data, len);
6080                         return;
6081                 }
6082
6083                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6084                                   rssi, flags, data, len, NULL, 0);
6085                 return;
6086         }
6087
6088         /* Check if the pending report is for the same device as the new one */
6089         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6090                  bdaddr_type == d->last_adv_addr_type);
6091
6092         /* If the pending data doesn't match this report or this isn't a
6093          * scan response (e.g. we got a duplicate ADV_IND) then force
6094          * sending of the pending data.
6095          */
6096         if (type != LE_ADV_SCAN_RSP || !match) {
6097                 /* Send out whatever is in the cache, but skip duplicates */
6098                 if (!match)
6099                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6100                                           d->last_adv_addr_type, NULL,
6101                                           d->last_adv_rssi, d->last_adv_flags,
6102                                           d->last_adv_data,
6103                                           d->last_adv_data_len, NULL, 0);
6104
6105                 /* If the new report will trigger a SCAN_REQ store it for
6106                  * later merging.
6107                  */
6108                 if (!ext_adv && (type == LE_ADV_IND ||
6109                                  type == LE_ADV_SCAN_IND)) {
6110                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6111                                                  rssi, flags, data, len);
6112                         return;
6113                 }
6114
6115                 /* The advertising reports cannot be merged, so clear
6116                  * the pending report and send out a device found event.
6117                  */
6118                 clear_pending_adv_report(hdev);
6119                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6120                                   rssi, flags, data, len, NULL, 0);
6121                 return;
6122         }
6123
6124         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6125          * the new event is a SCAN_RSP. We can therefore proceed with
6126          * sending a merged device found event.
6127          */
6128         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6129                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6130                           d->last_adv_data, d->last_adv_data_len, data, len);
6131         clear_pending_adv_report(hdev);
6132 }
6133
6134 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6135                                   struct sk_buff *skb)
6136 {
6137         struct hci_ev_le_advertising_report *ev = data;
6138
6139         if (!ev->num)
6140                 return;
6141
6142         hci_dev_lock(hdev);
6143
6144         while (ev->num--) {
6145                 struct hci_ev_le_advertising_info *info;
6146                 s8 rssi;
6147
6148                 info = hci_le_ev_skb_pull(hdev, skb,
6149                                           HCI_EV_LE_ADVERTISING_REPORT,
6150                                           sizeof(*info));
6151                 if (!info)
6152                         break;
6153
6154                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6155                                         info->length + 1))
6156                         break;
6157
6158                 if (info->length <= HCI_MAX_AD_LENGTH) {
6159                         rssi = info->data[info->length];
6160                         process_adv_report(hdev, info->type, &info->bdaddr,
6161                                            info->bdaddr_type, NULL, 0, rssi,
6162                                            info->data, info->length, false);
6163                 } else {
6164                         bt_dev_err(hdev, "Dropping invalid advertising data");
6165                 }
6166         }
6167
6168         hci_dev_unlock(hdev);
6169 }
6170
6171 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6172 {
6173         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6174                 switch (evt_type) {
6175                 case LE_LEGACY_ADV_IND:
6176                         return LE_ADV_IND;
6177                 case LE_LEGACY_ADV_DIRECT_IND:
6178                         return LE_ADV_DIRECT_IND;
6179                 case LE_LEGACY_ADV_SCAN_IND:
6180                         return LE_ADV_SCAN_IND;
6181                 case LE_LEGACY_NONCONN_IND:
6182                         return LE_ADV_NONCONN_IND;
6183                 case LE_LEGACY_SCAN_RSP_ADV:
6184                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6185                         return LE_ADV_SCAN_RSP;
6186                 }
6187
6188                 goto invalid;
6189         }
6190
6191         if (evt_type & LE_EXT_ADV_CONN_IND) {
6192                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6193                         return LE_ADV_DIRECT_IND;
6194
6195                 return LE_ADV_IND;
6196         }
6197
6198         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6199                 return LE_ADV_SCAN_RSP;
6200
6201         if (evt_type & LE_EXT_ADV_SCAN_IND)
6202                 return LE_ADV_SCAN_IND;
6203
6204         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6205             evt_type & LE_EXT_ADV_DIRECT_IND)
6206                 return LE_ADV_NONCONN_IND;
6207
6208 invalid:
6209         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6210                                evt_type);
6211
6212         return LE_ADV_INVALID;
6213 }
6214
6215 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6216                                       struct sk_buff *skb)
6217 {
6218         struct hci_ev_le_ext_adv_report *ev = data;
6219
6220         if (!ev->num)
6221                 return;
6222
6223         hci_dev_lock(hdev);
6224
6225         while (ev->num--) {
6226                 struct hci_ev_le_ext_adv_info *info;
6227                 u8 legacy_evt_type;
6228                 u16 evt_type;
6229
6230                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6231                                           sizeof(*info));
6232                 if (!info)
6233                         break;
6234
6235                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6236                                         info->length))
6237                         break;
6238
6239                 evt_type = __le16_to_cpu(info->type);
6240                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6241                 if (legacy_evt_type != LE_ADV_INVALID) {
6242                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6243                                            info->bdaddr_type, NULL, 0,
6244                                            info->rssi, info->data, info->length,
6245                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU));
6246                 }
6247         }
6248
6249         hci_dev_unlock(hdev);
6250 }
6251
6252 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6253                                             struct sk_buff *skb)
6254 {
6255         struct hci_ev_le_remote_feat_complete *ev = data;
6256         struct hci_conn *conn;
6257
6258         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6259
6260         hci_dev_lock(hdev);
6261
6262         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6263         if (conn) {
6264                 if (!ev->status)
6265                         memcpy(conn->features[0], ev->features, 8);
6266
6267                 if (conn->state == BT_CONFIG) {
6268                         __u8 status;
6269
6270                         /* If the local controller supports peripheral-initiated
6271                          * features exchange, but the remote controller does
6272                          * not, then it is possible that the error code 0x1a
6273                          * for unsupported remote feature gets returned.
6274                          *
6275                          * In this specific case, allow the connection to
6276                          * transition into connected state and mark it as
6277                          * successful.
6278                          */
6279                         if (!conn->out && ev->status == 0x1a &&
6280                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6281                                 status = 0x00;
6282                         else
6283                                 status = ev->status;
6284
6285                         conn->state = BT_CONNECTED;
6286                         hci_connect_cfm(conn, status);
6287                         hci_conn_drop(conn);
6288                 }
6289         }
6290
6291         hci_dev_unlock(hdev);
6292 }
6293
6294 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6295                                    struct sk_buff *skb)
6296 {
6297         struct hci_ev_le_ltk_req *ev = data;
6298         struct hci_cp_le_ltk_reply cp;
6299         struct hci_cp_le_ltk_neg_reply neg;
6300         struct hci_conn *conn;
6301         struct smp_ltk *ltk;
6302
6303         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6304
6305         hci_dev_lock(hdev);
6306
6307         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6308         if (conn == NULL)
6309                 goto not_found;
6310
6311         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6312         if (!ltk)
6313                 goto not_found;
6314
6315         if (smp_ltk_is_sc(ltk)) {
6316                 /* With SC both EDiv and Rand are set to zero */
6317                 if (ev->ediv || ev->rand)
6318                         goto not_found;
6319         } else {
6320                 /* For non-SC keys check that EDiv and Rand match */
6321                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6322                         goto not_found;
6323         }
6324
6325         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6326         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6327         cp.handle = cpu_to_le16(conn->handle);
6328
6329         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6330
6331         conn->enc_key_size = ltk->enc_size;
6332
6333         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6334
6335         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6336          * temporary key used to encrypt a connection following
6337          * pairing. It is used during the Encrypted Session Setup to
6338          * distribute the keys. Later, security can be re-established
6339          * using a distributed LTK.
6340          */
6341         if (ltk->type == SMP_STK) {
6342                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6343                 list_del_rcu(&ltk->list);
6344                 kfree_rcu(ltk, rcu);
6345         } else {
6346                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6347         }
6348
6349         hci_dev_unlock(hdev);
6350
6351         return;
6352
6353 not_found:
6354         neg.handle = ev->handle;
6355         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6356         hci_dev_unlock(hdev);
6357 }
6358
6359 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6360                                       u8 reason)
6361 {
6362         struct hci_cp_le_conn_param_req_neg_reply cp;
6363
6364         cp.handle = cpu_to_le16(handle);
6365         cp.reason = reason;
6366
6367         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6368                      &cp);
6369 }
6370
6371 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6372                                              struct sk_buff *skb)
6373 {
6374         struct hci_ev_le_remote_conn_param_req *ev = data;
6375         struct hci_cp_le_conn_param_req_reply cp;
6376         struct hci_conn *hcon;
6377         u16 handle, min, max, latency, timeout;
6378
6379         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6380
6381         handle = le16_to_cpu(ev->handle);
6382         min = le16_to_cpu(ev->interval_min);
6383         max = le16_to_cpu(ev->interval_max);
6384         latency = le16_to_cpu(ev->latency);
6385         timeout = le16_to_cpu(ev->timeout);
6386
6387         hcon = hci_conn_hash_lookup_handle(hdev, handle);
6388         if (!hcon || hcon->state != BT_CONNECTED)
6389                 return send_conn_param_neg_reply(hdev, handle,
6390                                                  HCI_ERROR_UNKNOWN_CONN_ID);
6391
6392         if (hci_check_conn_params(min, max, latency, timeout))
6393                 return send_conn_param_neg_reply(hdev, handle,
6394                                                  HCI_ERROR_INVALID_LL_PARAMS);
6395
6396         if (hcon->role == HCI_ROLE_MASTER) {
6397                 struct hci_conn_params *params;
6398                 u8 store_hint;
6399
6400                 hci_dev_lock(hdev);
6401
6402                 params = hci_conn_params_lookup(hdev, &hcon->dst,
6403                                                 hcon->dst_type);
6404                 if (params) {
6405                         params->conn_min_interval = min;
6406                         params->conn_max_interval = max;
6407                         params->conn_latency = latency;
6408                         params->supervision_timeout = timeout;
6409                         store_hint = 0x01;
6410                 } else {
6411                         store_hint = 0x00;
6412                 }
6413
6414                 hci_dev_unlock(hdev);
6415
6416                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6417                                     store_hint, min, max, latency, timeout);
6418         }
6419
6420         cp.handle = ev->handle;
6421         cp.interval_min = ev->interval_min;
6422         cp.interval_max = ev->interval_max;
6423         cp.latency = ev->latency;
6424         cp.timeout = ev->timeout;
6425         cp.min_ce_len = 0;
6426         cp.max_ce_len = 0;
6427
6428         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6429 }
6430
6431 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6432                                          struct sk_buff *skb)
6433 {
6434         struct hci_ev_le_direct_adv_report *ev = data;
6435         int i;
6436
6437         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6438                                 flex_array_size(ev, info, ev->num)))
6439                 return;
6440
6441         if (!ev->num)
6442                 return;
6443
6444         hci_dev_lock(hdev);
6445
6446         for (i = 0; i < ev->num; i++) {
6447                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6448
6449                 process_adv_report(hdev, info->type, &info->bdaddr,
6450                                    info->bdaddr_type, &info->direct_addr,
6451                                    info->direct_addr_type, info->rssi, NULL, 0,
6452                                    false);
6453         }
6454
6455         hci_dev_unlock(hdev);
6456 }
6457
6458 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6459                                   struct sk_buff *skb)
6460 {
6461         struct hci_ev_le_phy_update_complete *ev = data;
6462         struct hci_conn *conn;
6463
6464         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6465
6466         if (ev->status)
6467                 return;
6468
6469         hci_dev_lock(hdev);
6470
6471         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6472         if (!conn)
6473                 goto unlock;
6474
6475         conn->le_tx_phy = ev->tx_phy;
6476         conn->le_rx_phy = ev->rx_phy;
6477
6478 unlock:
6479         hci_dev_unlock(hdev);
6480 }
6481
6482 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
6483 [_op] = { \
6484         .func = _func, \
6485         .min_len = _min_len, \
6486         .max_len = _max_len, \
6487 }
6488
6489 #define HCI_LE_EV(_op, _func, _len) \
6490         HCI_LE_EV_VL(_op, _func, _len, _len)
6491
6492 #define HCI_LE_EV_STATUS(_op, _func) \
6493         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
6494
6495 /* Entries in this table shall have their position according to the subevent
6496  * opcode they handle so the use of the macros above is recommend since it does
6497  * attempt to initialize at its proper index using Designated Initializers that
6498  * way events without a callback function can be ommited.
6499  */
6500 static const struct hci_le_ev {
6501         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
6502         u16  min_len;
6503         u16  max_len;
6504 } hci_le_ev_table[U8_MAX + 1] = {
6505         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
6506         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
6507                   sizeof(struct hci_ev_le_conn_complete)),
6508         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
6509         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
6510                      sizeof(struct hci_ev_le_advertising_report),
6511                      HCI_MAX_EVENT_SIZE),
6512         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
6513         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
6514                   hci_le_conn_update_complete_evt,
6515                   sizeof(struct hci_ev_le_conn_update_complete)),
6516         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
6517         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
6518                   hci_le_remote_feat_complete_evt,
6519                   sizeof(struct hci_ev_le_remote_feat_complete)),
6520         /* [0x05 = HCI_EV_LE_LTK_REQ] */
6521         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
6522                   sizeof(struct hci_ev_le_ltk_req)),
6523         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
6524         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
6525                   hci_le_remote_conn_param_req_evt,
6526                   sizeof(struct hci_ev_le_remote_conn_param_req)),
6527         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
6528         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6529                   hci_le_enh_conn_complete_evt,
6530                   sizeof(struct hci_ev_le_enh_conn_complete)),
6531         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
6532         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
6533                      sizeof(struct hci_ev_le_direct_adv_report),
6534                      HCI_MAX_EVENT_SIZE),
6535         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
6536         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
6537                   sizeof(struct hci_ev_le_phy_update_complete)),
6538         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
6539         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
6540                      sizeof(struct hci_ev_le_ext_adv_report),
6541                      HCI_MAX_EVENT_SIZE),
6542         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
6543         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
6544                   sizeof(struct hci_evt_le_ext_adv_set_term)),
6545 };
6546
6547 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
6548                             struct sk_buff *skb, u16 *opcode, u8 *status,
6549                             hci_req_complete_t *req_complete,
6550                             hci_req_complete_skb_t *req_complete_skb)
6551 {
6552         struct hci_ev_le_meta *ev = data;
6553         const struct hci_le_ev *subev;
6554
6555         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
6556
6557         /* Only match event if command OGF is for LE */
6558         if (hdev->sent_cmd &&
6559             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
6560             hci_skb_event(hdev->sent_cmd) == ev->subevent) {
6561                 *opcode = hci_skb_opcode(hdev->sent_cmd);
6562                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
6563                                      req_complete_skb);
6564         }
6565
6566         subev = &hci_le_ev_table[ev->subevent];
6567         if (!subev->func)
6568                 return;
6569
6570         if (skb->len < subev->min_len) {
6571                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
6572                            ev->subevent, skb->len, subev->min_len);
6573                 return;
6574         }
6575
6576         /* Just warn if the length is over max_len size it still be
6577          * possible to partially parse the event so leave to callback to
6578          * decide if that is acceptable.
6579          */
6580         if (skb->len > subev->max_len)
6581                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
6582                             ev->subevent, skb->len, subev->max_len);
6583
6584         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
6585         if (!data)
6586                 return;
6587
6588         subev->func(hdev, data, skb);
6589 }
6590
6591 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6592                                  u8 event, struct sk_buff *skb)
6593 {
6594         struct hci_ev_cmd_complete *ev;
6595         struct hci_event_hdr *hdr;
6596
6597         if (!skb)
6598                 return false;
6599
6600         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
6601         if (!hdr)
6602                 return false;
6603
6604         if (event) {
6605                 if (hdr->evt != event)
6606                         return false;
6607                 return true;
6608         }
6609
6610         /* Check if request ended in Command Status - no way to retrieve
6611          * any extra parameters in this case.
6612          */
6613         if (hdr->evt == HCI_EV_CMD_STATUS)
6614                 return false;
6615
6616         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6617                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6618                            hdr->evt);
6619                 return false;
6620         }
6621
6622         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
6623         if (!ev)
6624                 return false;
6625
6626         if (opcode != __le16_to_cpu(ev->opcode)) {
6627                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6628                        __le16_to_cpu(ev->opcode));
6629                 return false;
6630         }
6631
6632         return true;
6633 }
6634
6635 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6636                                   struct sk_buff *skb)
6637 {
6638         struct hci_ev_le_advertising_info *adv;
6639         struct hci_ev_le_direct_adv_info *direct_adv;
6640         struct hci_ev_le_ext_adv_info *ext_adv;
6641         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6642         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6643
6644         hci_dev_lock(hdev);
6645
6646         /* If we are currently suspended and this is the first BT event seen,
6647          * save the wake reason associated with the event.
6648          */
6649         if (!hdev->suspended || hdev->wake_reason)
6650                 goto unlock;
6651
6652         /* Default to remote wake. Values for wake_reason are documented in the
6653          * Bluez mgmt api docs.
6654          */
6655         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6656
6657         /* Once configured for remote wakeup, we should only wake up for
6658          * reconnections. It's useful to see which device is waking us up so
6659          * keep track of the bdaddr of the connection event that woke us up.
6660          */
6661         if (event == HCI_EV_CONN_REQUEST) {
6662                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6663                 hdev->wake_addr_type = BDADDR_BREDR;
6664         } else if (event == HCI_EV_CONN_COMPLETE) {
6665                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6666                 hdev->wake_addr_type = BDADDR_BREDR;
6667         } else if (event == HCI_EV_LE_META) {
6668                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6669                 u8 subevent = le_ev->subevent;
6670                 u8 *ptr = &skb->data[sizeof(*le_ev)];
6671                 u8 num_reports = *ptr;
6672
6673                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6674                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6675                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6676                     num_reports) {
6677                         adv = (void *)(ptr + 1);
6678                         direct_adv = (void *)(ptr + 1);
6679                         ext_adv = (void *)(ptr + 1);
6680
6681                         switch (subevent) {
6682                         case HCI_EV_LE_ADVERTISING_REPORT:
6683                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
6684                                 hdev->wake_addr_type = adv->bdaddr_type;
6685                                 break;
6686                         case HCI_EV_LE_DIRECT_ADV_REPORT:
6687                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6688                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
6689                                 break;
6690                         case HCI_EV_LE_EXT_ADV_REPORT:
6691                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6692                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
6693                                 break;
6694                         }
6695                 }
6696         } else {
6697                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6698         }
6699
6700 unlock:
6701         hci_dev_unlock(hdev);
6702 }
6703
6704 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
6705 [_op] = { \
6706         .req = false, \
6707         .func = _func, \
6708         .min_len = _min_len, \
6709         .max_len = _max_len, \
6710 }
6711
6712 #define HCI_EV(_op, _func, _len) \
6713         HCI_EV_VL(_op, _func, _len, _len)
6714
6715 #define HCI_EV_STATUS(_op, _func) \
6716         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
6717
6718 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
6719 [_op] = { \
6720         .req = true, \
6721         .func_req = _func, \
6722         .min_len = _min_len, \
6723         .max_len = _max_len, \
6724 }
6725
6726 #define HCI_EV_REQ(_op, _func, _len) \
6727         HCI_EV_REQ_VL(_op, _func, _len, _len)
6728
6729 /* Entries in this table shall have their position according to the event opcode
6730  * they handle so the use of the macros above is recommend since it does attempt
6731  * to initialize at its proper index using Designated Initializers that way
6732  * events without a callback function don't have entered.
6733  */
6734 static const struct hci_ev {
6735         bool req;
6736         union {
6737                 void (*func)(struct hci_dev *hdev, void *data,
6738                              struct sk_buff *skb);
6739                 void (*func_req)(struct hci_dev *hdev, void *data,
6740                                  struct sk_buff *skb, u16 *opcode, u8 *status,
6741                                  hci_req_complete_t *req_complete,
6742                                  hci_req_complete_skb_t *req_complete_skb);
6743         };
6744         u16  min_len;
6745         u16  max_len;
6746 } hci_ev_table[U8_MAX + 1] = {
6747         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
6748         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
6749         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
6750         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
6751                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
6752         /* [0x03 = HCI_EV_CONN_COMPLETE] */
6753         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
6754                sizeof(struct hci_ev_conn_complete)),
6755         /* [0x04 = HCI_EV_CONN_REQUEST] */
6756         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
6757                sizeof(struct hci_ev_conn_request)),
6758         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
6759         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
6760                sizeof(struct hci_ev_disconn_complete)),
6761         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
6762         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
6763                sizeof(struct hci_ev_auth_complete)),
6764         /* [0x07 = HCI_EV_REMOTE_NAME] */
6765         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
6766                sizeof(struct hci_ev_remote_name)),
6767         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
6768         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
6769                sizeof(struct hci_ev_encrypt_change)),
6770         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
6771         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
6772                hci_change_link_key_complete_evt,
6773                sizeof(struct hci_ev_change_link_key_complete)),
6774         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
6775         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
6776                sizeof(struct hci_ev_remote_features)),
6777         /* [0x0e = HCI_EV_CMD_COMPLETE] */
6778         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
6779                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
6780         /* [0x0f = HCI_EV_CMD_STATUS] */
6781         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
6782                    sizeof(struct hci_ev_cmd_status)),
6783         /* [0x10 = HCI_EV_CMD_STATUS] */
6784         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
6785                sizeof(struct hci_ev_hardware_error)),
6786         /* [0x12 = HCI_EV_ROLE_CHANGE] */
6787         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
6788                sizeof(struct hci_ev_role_change)),
6789         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
6790         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
6791                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
6792         /* [0x14 = HCI_EV_MODE_CHANGE] */
6793         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
6794                sizeof(struct hci_ev_mode_change)),
6795         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
6796         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
6797                sizeof(struct hci_ev_pin_code_req)),
6798         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
6799         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
6800                sizeof(struct hci_ev_link_key_req)),
6801         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
6802         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
6803                sizeof(struct hci_ev_link_key_notify)),
6804         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
6805         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
6806                sizeof(struct hci_ev_clock_offset)),
6807         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
6808         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
6809                sizeof(struct hci_ev_pkt_type_change)),
6810         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
6811         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
6812                sizeof(struct hci_ev_pscan_rep_mode)),
6813         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
6814         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
6815                   hci_inquiry_result_with_rssi_evt,
6816                   sizeof(struct hci_ev_inquiry_result_rssi),
6817                   HCI_MAX_EVENT_SIZE),
6818         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
6819         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
6820                sizeof(struct hci_ev_remote_ext_features)),
6821         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
6822         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
6823                sizeof(struct hci_ev_sync_conn_complete)),
6824         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
6825         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
6826                   hci_extended_inquiry_result_evt,
6827                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
6828         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
6829         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
6830                sizeof(struct hci_ev_key_refresh_complete)),
6831         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
6832         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
6833                sizeof(struct hci_ev_io_capa_request)),
6834         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
6835         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
6836                sizeof(struct hci_ev_io_capa_reply)),
6837         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
6838         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
6839                sizeof(struct hci_ev_user_confirm_req)),
6840         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
6841         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
6842                sizeof(struct hci_ev_user_passkey_req)),
6843         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
6844         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
6845                sizeof(struct hci_ev_remote_oob_data_request)),
6846         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
6847         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
6848                sizeof(struct hci_ev_simple_pair_complete)),
6849         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
6850         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
6851                sizeof(struct hci_ev_user_passkey_notify)),
6852         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
6853         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
6854                sizeof(struct hci_ev_keypress_notify)),
6855         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
6856         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
6857                sizeof(struct hci_ev_remote_host_features)),
6858         /* [0x3e = HCI_EV_LE_META] */
6859         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
6860                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
6861 #if IS_ENABLED(CONFIG_BT_HS)
6862         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
6863         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
6864                sizeof(struct hci_ev_phy_link_complete)),
6865         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
6866         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
6867                sizeof(struct hci_ev_channel_selected)),
6868         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
6869         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
6870                hci_disconn_loglink_complete_evt,
6871                sizeof(struct hci_ev_disconn_logical_link_complete)),
6872         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
6873         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
6874                sizeof(struct hci_ev_logical_link_complete)),
6875         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
6876         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
6877                hci_disconn_phylink_complete_evt,
6878                sizeof(struct hci_ev_disconn_phy_link_complete)),
6879 #endif
6880         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
6881         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
6882                sizeof(struct hci_ev_num_comp_blocks)),
6883         /* [0xff = HCI_EV_VENDOR] */
6884         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
6885 };
6886
6887 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
6888                            u16 *opcode, u8 *status,
6889                            hci_req_complete_t *req_complete,
6890                            hci_req_complete_skb_t *req_complete_skb)
6891 {
6892         const struct hci_ev *ev = &hci_ev_table[event];
6893         void *data;
6894
6895         if (!ev->func)
6896                 return;
6897
6898         if (skb->len < ev->min_len) {
6899                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
6900                            event, skb->len, ev->min_len);
6901                 return;
6902         }
6903
6904         /* Just warn if the length is over max_len size it still be
6905          * possible to partially parse the event so leave to callback to
6906          * decide if that is acceptable.
6907          */
6908         if (skb->len > ev->max_len)
6909                 bt_dev_warn_ratelimited(hdev,
6910                                         "unexpected event 0x%2.2x length: %u > %u",
6911                                         event, skb->len, ev->max_len);
6912
6913         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
6914         if (!data)
6915                 return;
6916
6917         if (ev->req)
6918                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
6919                              req_complete_skb);
6920         else
6921                 ev->func(hdev, data, skb);
6922 }
6923
6924 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6925 {
6926         struct hci_event_hdr *hdr = (void *) skb->data;
6927         hci_req_complete_t req_complete = NULL;
6928         hci_req_complete_skb_t req_complete_skb = NULL;
6929         struct sk_buff *orig_skb = NULL;
6930         u8 status = 0, event, req_evt = 0;
6931         u16 opcode = HCI_OP_NOP;
6932
6933         if (skb->len < sizeof(*hdr)) {
6934                 bt_dev_err(hdev, "Malformed HCI Event");
6935                 goto done;
6936         }
6937
6938         event = hdr->evt;
6939         if (!event) {
6940                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
6941                             event);
6942                 goto done;
6943         }
6944
6945         /* Only match event if command OGF is not for LE */
6946         if (hdev->sent_cmd &&
6947             hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
6948             hci_skb_event(hdev->sent_cmd) == event) {
6949                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
6950                                      status, &req_complete, &req_complete_skb);
6951                 req_evt = event;
6952         }
6953
6954         /* If it looks like we might end up having to call
6955          * req_complete_skb, store a pristine copy of the skb since the
6956          * various handlers may modify the original one through
6957          * skb_pull() calls, etc.
6958          */
6959         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6960             event == HCI_EV_CMD_COMPLETE)
6961                 orig_skb = skb_clone(skb, GFP_KERNEL);
6962
6963         skb_pull(skb, HCI_EVENT_HDR_SIZE);
6964
6965         /* Store wake reason if we're suspended */
6966         hci_store_wake_reason(hdev, event, skb);
6967
6968         bt_dev_dbg(hdev, "event 0x%2.2x", event);
6969
6970         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
6971                        &req_complete_skb);
6972
6973         if (req_complete) {
6974                 req_complete(hdev, status, opcode);
6975         } else if (req_complete_skb) {
6976                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6977                         kfree_skb(orig_skb);
6978                         orig_skb = NULL;
6979                 }
6980                 req_complete_skb(hdev, status, opcode, orig_skb);
6981         }
6982
6983 done:
6984         kfree_skb(orig_skb);
6985         kfree_skb(skb);
6986         hdev->stat.evt_rx++;
6987 }