GNU Linux-libre 6.8.9-gnu
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68         __u8 scan = opt;
69
70         BT_DBG("%s %x", req->hdev->name, scan);
71
72         /* Inquiry and Page scans */
73         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74         return 0;
75 }
76
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79         __u8 auth = opt;
80
81         BT_DBG("%s %x", req->hdev->name, auth);
82
83         /* Authentication */
84         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85         return 0;
86 }
87
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90         __u8 encrypt = opt;
91
92         BT_DBG("%s %x", req->hdev->name, encrypt);
93
94         /* Encryption */
95         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96         return 0;
97 }
98
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101         __le16 policy = cpu_to_le16(opt);
102
103         BT_DBG("%s %x", req->hdev->name, policy);
104
105         /* Default link policy */
106         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107         return 0;
108 }
109
110 /* Get HCI device by index.
111  * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
113 {
114         struct hci_dev *hdev = NULL, *d;
115
116         BT_DBG("%d", index);
117
118         if (index < 0)
119                 return NULL;
120
121         read_lock(&hci_dev_list_lock);
122         list_for_each_entry(d, &hci_dev_list, list) {
123                 if (d->id == index) {
124                         hdev = hci_dev_hold(d);
125                         break;
126                 }
127         }
128         read_unlock(&hci_dev_list_lock);
129         return hdev;
130 }
131
132 /* ---- Inquiry support ---- */
133
134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136         struct discovery_state *discov = &hdev->discovery;
137
138         switch (discov->state) {
139         case DISCOVERY_FINDING:
140         case DISCOVERY_RESOLVING:
141                 return true;
142
143         default:
144                 return false;
145         }
146 }
147
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150         int old_state = hdev->discovery.state;
151
152         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153
154         if (old_state == state)
155                 return;
156
157         hdev->discovery.state = state;
158
159         switch (state) {
160         case DISCOVERY_STOPPED:
161                 hci_update_passive_scan(hdev);
162
163                 if (old_state != DISCOVERY_STARTING)
164                         mgmt_discovering(hdev, 0);
165                 break;
166         case DISCOVERY_STARTING:
167                 break;
168         case DISCOVERY_FINDING:
169                 mgmt_discovering(hdev, 1);
170                 break;
171         case DISCOVERY_RESOLVING:
172                 break;
173         case DISCOVERY_STOPPING:
174                 break;
175         }
176 }
177
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180         struct discovery_state *cache = &hdev->discovery;
181         struct inquiry_entry *p, *n;
182
183         list_for_each_entry_safe(p, n, &cache->all, all) {
184                 list_del(&p->all);
185                 kfree(p);
186         }
187
188         INIT_LIST_HEAD(&cache->unknown);
189         INIT_LIST_HEAD(&cache->resolve);
190 }
191
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193                                                bdaddr_t *bdaddr)
194 {
195         struct discovery_state *cache = &hdev->discovery;
196         struct inquiry_entry *e;
197
198         BT_DBG("cache %p, %pMR", cache, bdaddr);
199
200         list_for_each_entry(e, &cache->all, all) {
201                 if (!bacmp(&e->data.bdaddr, bdaddr))
202                         return e;
203         }
204
205         return NULL;
206 }
207
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209                                                        bdaddr_t *bdaddr)
210 {
211         struct discovery_state *cache = &hdev->discovery;
212         struct inquiry_entry *e;
213
214         BT_DBG("cache %p, %pMR", cache, bdaddr);
215
216         list_for_each_entry(e, &cache->unknown, list) {
217                 if (!bacmp(&e->data.bdaddr, bdaddr))
218                         return e;
219         }
220
221         return NULL;
222 }
223
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225                                                        bdaddr_t *bdaddr,
226                                                        int state)
227 {
228         struct discovery_state *cache = &hdev->discovery;
229         struct inquiry_entry *e;
230
231         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232
233         list_for_each_entry(e, &cache->resolve, list) {
234                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235                         return e;
236                 if (!bacmp(&e->data.bdaddr, bdaddr))
237                         return e;
238         }
239
240         return NULL;
241 }
242
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244                                       struct inquiry_entry *ie)
245 {
246         struct discovery_state *cache = &hdev->discovery;
247         struct list_head *pos = &cache->resolve;
248         struct inquiry_entry *p;
249
250         list_del(&ie->list);
251
252         list_for_each_entry(p, &cache->resolve, list) {
253                 if (p->name_state != NAME_PENDING &&
254                     abs(p->data.rssi) >= abs(ie->data.rssi))
255                         break;
256                 pos = &p->list;
257         }
258
259         list_add(&ie->list, pos);
260 }
261
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263                              bool name_known)
264 {
265         struct discovery_state *cache = &hdev->discovery;
266         struct inquiry_entry *ie;
267         u32 flags = 0;
268
269         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270
271         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272
273         if (!data->ssp_mode)
274                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275
276         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277         if (ie) {
278                 if (!ie->data.ssp_mode)
279                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280
281                 if (ie->name_state == NAME_NEEDED &&
282                     data->rssi != ie->data.rssi) {
283                         ie->data.rssi = data->rssi;
284                         hci_inquiry_cache_update_resolve(hdev, ie);
285                 }
286
287                 goto update;
288         }
289
290         /* Entry not in the cache. Add new one. */
291         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292         if (!ie) {
293                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294                 goto done;
295         }
296
297         list_add(&ie->all, &cache->all);
298
299         if (name_known) {
300                 ie->name_state = NAME_KNOWN;
301         } else {
302                 ie->name_state = NAME_NOT_KNOWN;
303                 list_add(&ie->list, &cache->unknown);
304         }
305
306 update:
307         if (name_known && ie->name_state != NAME_KNOWN &&
308             ie->name_state != NAME_PENDING) {
309                 ie->name_state = NAME_KNOWN;
310                 list_del(&ie->list);
311         }
312
313         memcpy(&ie->data, data, sizeof(*data));
314         ie->timestamp = jiffies;
315         cache->timestamp = jiffies;
316
317         if (ie->name_state == NAME_NOT_KNOWN)
318                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319
320 done:
321         return flags;
322 }
323
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326         struct discovery_state *cache = &hdev->discovery;
327         struct inquiry_info *info = (struct inquiry_info *) buf;
328         struct inquiry_entry *e;
329         int copied = 0;
330
331         list_for_each_entry(e, &cache->all, all) {
332                 struct inquiry_data *data = &e->data;
333
334                 if (copied >= num)
335                         break;
336
337                 bacpy(&info->bdaddr, &data->bdaddr);
338                 info->pscan_rep_mode    = data->pscan_rep_mode;
339                 info->pscan_period_mode = data->pscan_period_mode;
340                 info->pscan_mode        = data->pscan_mode;
341                 memcpy(info->dev_class, data->dev_class, 3);
342                 info->clock_offset      = data->clock_offset;
343
344                 info++;
345                 copied++;
346         }
347
348         BT_DBG("cache %p, copied %d", cache, copied);
349         return copied;
350 }
351
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355         struct hci_dev *hdev = req->hdev;
356         struct hci_cp_inquiry cp;
357
358         BT_DBG("%s", hdev->name);
359
360         if (test_bit(HCI_INQUIRY, &hdev->flags))
361                 return 0;
362
363         /* Start Inquiry */
364         memcpy(&cp.lap, &ir->lap, 3);
365         cp.length  = ir->length;
366         cp.num_rsp = ir->num_rsp;
367         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368
369         return 0;
370 }
371
372 int hci_inquiry(void __user *arg)
373 {
374         __u8 __user *ptr = arg;
375         struct hci_inquiry_req ir;
376         struct hci_dev *hdev;
377         int err = 0, do_inquiry = 0, max_rsp;
378         long timeo;
379         __u8 *buf;
380
381         if (copy_from_user(&ir, ptr, sizeof(ir)))
382                 return -EFAULT;
383
384         hdev = hci_dev_get(ir.dev_id);
385         if (!hdev)
386                 return -ENODEV;
387
388         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389                 err = -EBUSY;
390                 goto done;
391         }
392
393         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394                 err = -EOPNOTSUPP;
395                 goto done;
396         }
397
398         if (hdev->dev_type != HCI_PRIMARY) {
399                 err = -EOPNOTSUPP;
400                 goto done;
401         }
402
403         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404                 err = -EOPNOTSUPP;
405                 goto done;
406         }
407
408         /* Restrict maximum inquiry length to 60 seconds */
409         if (ir.length > 60) {
410                 err = -EINVAL;
411                 goto done;
412         }
413
414         hci_dev_lock(hdev);
415         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417                 hci_inquiry_cache_flush(hdev);
418                 do_inquiry = 1;
419         }
420         hci_dev_unlock(hdev);
421
422         timeo = ir.length * msecs_to_jiffies(2000);
423
424         if (do_inquiry) {
425                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
426                                    timeo, NULL);
427                 if (err < 0)
428                         goto done;
429
430                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431                  * cleared). If it is interrupted by a signal, return -EINTR.
432                  */
433                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434                                 TASK_INTERRUPTIBLE)) {
435                         err = -EINTR;
436                         goto done;
437                 }
438         }
439
440         /* for unlimited number of responses we will use buffer with
441          * 255 entries
442          */
443         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444
445         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446          * copy it to the user space.
447          */
448         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
449         if (!buf) {
450                 err = -ENOMEM;
451                 goto done;
452         }
453
454         hci_dev_lock(hdev);
455         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456         hci_dev_unlock(hdev);
457
458         BT_DBG("num_rsp %d", ir.num_rsp);
459
460         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461                 ptr += sizeof(ir);
462                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
463                                  ir.num_rsp))
464                         err = -EFAULT;
465         } else
466                 err = -EFAULT;
467
468         kfree(buf);
469
470 done:
471         hci_dev_put(hdev);
472         return err;
473 }
474
475 static int hci_dev_do_open(struct hci_dev *hdev)
476 {
477         int ret = 0;
478
479         BT_DBG("%s %p", hdev->name, hdev);
480
481         hci_req_sync_lock(hdev);
482
483         ret = hci_dev_open_sync(hdev);
484
485         hci_req_sync_unlock(hdev);
486         return ret;
487 }
488
489 /* ---- HCI ioctl helpers ---- */
490
491 int hci_dev_open(__u16 dev)
492 {
493         struct hci_dev *hdev;
494         int err;
495
496         hdev = hci_dev_get(dev);
497         if (!hdev)
498                 return -ENODEV;
499
500         /* Devices that are marked as unconfigured can only be powered
501          * up as user channel. Trying to bring them up as normal devices
502          * will result into a failure. Only user channel operation is
503          * possible.
504          *
505          * When this function is called for a user channel, the flag
506          * HCI_USER_CHANNEL will be set first before attempting to
507          * open the device.
508          */
509         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511                 err = -EOPNOTSUPP;
512                 goto done;
513         }
514
515         /* We need to ensure that no other power on/off work is pending
516          * before proceeding to call hci_dev_do_open. This is
517          * particularly important if the setup procedure has not yet
518          * completed.
519          */
520         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521                 cancel_delayed_work(&hdev->power_off);
522
523         /* After this call it is guaranteed that the setup procedure
524          * has finished. This means that error conditions like RFKILL
525          * or no valid public or static random address apply.
526          */
527         flush_workqueue(hdev->req_workqueue);
528
529         /* For controllers not using the management interface and that
530          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531          * so that pairing works for them. Once the management interface
532          * is in use this bit will be cleared again and userspace has
533          * to explicitly enable it.
534          */
535         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536             !hci_dev_test_flag(hdev, HCI_MGMT))
537                 hci_dev_set_flag(hdev, HCI_BONDABLE);
538
539         err = hci_dev_do_open(hdev);
540
541 done:
542         hci_dev_put(hdev);
543         return err;
544 }
545
546 int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         int err;
549
550         BT_DBG("%s %p", hdev->name, hdev);
551
552         hci_req_sync_lock(hdev);
553
554         err = hci_dev_close_sync(hdev);
555
556         hci_req_sync_unlock(hdev);
557
558         return err;
559 }
560
561 int hci_dev_close(__u16 dev)
562 {
563         struct hci_dev *hdev;
564         int err;
565
566         hdev = hci_dev_get(dev);
567         if (!hdev)
568                 return -ENODEV;
569
570         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571                 err = -EBUSY;
572                 goto done;
573         }
574
575         cancel_work_sync(&hdev->power_on);
576         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577                 cancel_delayed_work(&hdev->power_off);
578
579         err = hci_dev_do_close(hdev);
580
581 done:
582         hci_dev_put(hdev);
583         return err;
584 }
585
586 static int hci_dev_do_reset(struct hci_dev *hdev)
587 {
588         int ret;
589
590         BT_DBG("%s %p", hdev->name, hdev);
591
592         hci_req_sync_lock(hdev);
593
594         /* Drop queues */
595         skb_queue_purge(&hdev->rx_q);
596         skb_queue_purge(&hdev->cmd_q);
597
598         /* Cancel these to avoid queueing non-chained pending work */
599         hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600         /* Wait for
601          *
602          *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603          *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
604          *
605          * inside RCU section to see the flag or complete scheduling.
606          */
607         synchronize_rcu();
608         /* Explicitly cancel works in case scheduled after setting the flag. */
609         cancel_delayed_work(&hdev->cmd_timer);
610         cancel_delayed_work(&hdev->ncmd_timer);
611
612         /* Avoid potential lockdep warnings from the *_flush() calls by
613          * ensuring the workqueue is empty up front.
614          */
615         drain_workqueue(hdev->workqueue);
616
617         hci_dev_lock(hdev);
618         hci_inquiry_cache_flush(hdev);
619         hci_conn_hash_flush(hdev);
620         hci_dev_unlock(hdev);
621
622         if (hdev->flush)
623                 hdev->flush(hdev);
624
625         hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
626
627         atomic_set(&hdev->cmd_cnt, 1);
628         hdev->acl_cnt = 0;
629         hdev->sco_cnt = 0;
630         hdev->le_cnt = 0;
631         hdev->iso_cnt = 0;
632
633         ret = hci_reset_sync(hdev);
634
635         hci_req_sync_unlock(hdev);
636         return ret;
637 }
638
639 int hci_dev_reset(__u16 dev)
640 {
641         struct hci_dev *hdev;
642         int err;
643
644         hdev = hci_dev_get(dev);
645         if (!hdev)
646                 return -ENODEV;
647
648         if (!test_bit(HCI_UP, &hdev->flags)) {
649                 err = -ENETDOWN;
650                 goto done;
651         }
652
653         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
654                 err = -EBUSY;
655                 goto done;
656         }
657
658         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
659                 err = -EOPNOTSUPP;
660                 goto done;
661         }
662
663         err = hci_dev_do_reset(hdev);
664
665 done:
666         hci_dev_put(hdev);
667         return err;
668 }
669
670 int hci_dev_reset_stat(__u16 dev)
671 {
672         struct hci_dev *hdev;
673         int ret = 0;
674
675         hdev = hci_dev_get(dev);
676         if (!hdev)
677                 return -ENODEV;
678
679         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
680                 ret = -EBUSY;
681                 goto done;
682         }
683
684         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
685                 ret = -EOPNOTSUPP;
686                 goto done;
687         }
688
689         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
690
691 done:
692         hci_dev_put(hdev);
693         return ret;
694 }
695
696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
697 {
698         bool conn_changed, discov_changed;
699
700         BT_DBG("%s scan 0x%02x", hdev->name, scan);
701
702         if ((scan & SCAN_PAGE))
703                 conn_changed = !hci_dev_test_and_set_flag(hdev,
704                                                           HCI_CONNECTABLE);
705         else
706                 conn_changed = hci_dev_test_and_clear_flag(hdev,
707                                                            HCI_CONNECTABLE);
708
709         if ((scan & SCAN_INQUIRY)) {
710                 discov_changed = !hci_dev_test_and_set_flag(hdev,
711                                                             HCI_DISCOVERABLE);
712         } else {
713                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714                 discov_changed = hci_dev_test_and_clear_flag(hdev,
715                                                              HCI_DISCOVERABLE);
716         }
717
718         if (!hci_dev_test_flag(hdev, HCI_MGMT))
719                 return;
720
721         if (conn_changed || discov_changed) {
722                 /* In case this was disabled through mgmt */
723                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
724
725                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726                         hci_update_adv_data(hdev, hdev->cur_adv_instance);
727
728                 mgmt_new_settings(hdev);
729         }
730 }
731
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
733 {
734         struct hci_dev *hdev;
735         struct hci_dev_req dr;
736         int err = 0;
737
738         if (copy_from_user(&dr, arg, sizeof(dr)))
739                 return -EFAULT;
740
741         hdev = hci_dev_get(dr.dev_id);
742         if (!hdev)
743                 return -ENODEV;
744
745         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
746                 err = -EBUSY;
747                 goto done;
748         }
749
750         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
751                 err = -EOPNOTSUPP;
752                 goto done;
753         }
754
755         if (hdev->dev_type != HCI_PRIMARY) {
756                 err = -EOPNOTSUPP;
757                 goto done;
758         }
759
760         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
761                 err = -EOPNOTSUPP;
762                 goto done;
763         }
764
765         switch (cmd) {
766         case HCISETAUTH:
767                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768                                    HCI_INIT_TIMEOUT, NULL);
769                 break;
770
771         case HCISETENCRYPT:
772                 if (!lmp_encrypt_capable(hdev)) {
773                         err = -EOPNOTSUPP;
774                         break;
775                 }
776
777                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778                         /* Auth must be enabled first */
779                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780                                            HCI_INIT_TIMEOUT, NULL);
781                         if (err)
782                                 break;
783                 }
784
785                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786                                    HCI_INIT_TIMEOUT, NULL);
787                 break;
788
789         case HCISETSCAN:
790                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791                                    HCI_INIT_TIMEOUT, NULL);
792
793                 /* Ensure that the connectable and discoverable states
794                  * get correctly modified as this was a non-mgmt change.
795                  */
796                 if (!err)
797                         hci_update_passive_scan_state(hdev, dr.dev_opt);
798                 break;
799
800         case HCISETLINKPOL:
801                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802                                    HCI_INIT_TIMEOUT, NULL);
803                 break;
804
805         case HCISETLINKMODE:
806                 hdev->link_mode = ((__u16) dr.dev_opt) &
807                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
808                 break;
809
810         case HCISETPTYPE:
811                 if (hdev->pkt_type == (__u16) dr.dev_opt)
812                         break;
813
814                 hdev->pkt_type = (__u16) dr.dev_opt;
815                 mgmt_phy_configuration_changed(hdev, NULL);
816                 break;
817
818         case HCISETACLMTU:
819                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
820                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
821                 break;
822
823         case HCISETSCOMTU:
824                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
825                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
826                 break;
827
828         default:
829                 err = -EINVAL;
830                 break;
831         }
832
833 done:
834         hci_dev_put(hdev);
835         return err;
836 }
837
838 int hci_get_dev_list(void __user *arg)
839 {
840         struct hci_dev *hdev;
841         struct hci_dev_list_req *dl;
842         struct hci_dev_req *dr;
843         int n = 0, size, err;
844         __u16 dev_num;
845
846         if (get_user(dev_num, (__u16 __user *) arg))
847                 return -EFAULT;
848
849         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
850                 return -EINVAL;
851
852         size = sizeof(*dl) + dev_num * sizeof(*dr);
853
854         dl = kzalloc(size, GFP_KERNEL);
855         if (!dl)
856                 return -ENOMEM;
857
858         dr = dl->dev_req;
859
860         read_lock(&hci_dev_list_lock);
861         list_for_each_entry(hdev, &hci_dev_list, list) {
862                 unsigned long flags = hdev->flags;
863
864                 /* When the auto-off is configured it means the transport
865                  * is running, but in that case still indicate that the
866                  * device is actually down.
867                  */
868                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869                         flags &= ~BIT(HCI_UP);
870
871                 (dr + n)->dev_id  = hdev->id;
872                 (dr + n)->dev_opt = flags;
873
874                 if (++n >= dev_num)
875                         break;
876         }
877         read_unlock(&hci_dev_list_lock);
878
879         dl->dev_num = n;
880         size = sizeof(*dl) + n * sizeof(*dr);
881
882         err = copy_to_user(arg, dl, size);
883         kfree(dl);
884
885         return err ? -EFAULT : 0;
886 }
887
888 int hci_get_dev_info(void __user *arg)
889 {
890         struct hci_dev *hdev;
891         struct hci_dev_info di;
892         unsigned long flags;
893         int err = 0;
894
895         if (copy_from_user(&di, arg, sizeof(di)))
896                 return -EFAULT;
897
898         hdev = hci_dev_get(di.dev_id);
899         if (!hdev)
900                 return -ENODEV;
901
902         /* When the auto-off is configured it means the transport
903          * is running, but in that case still indicate that the
904          * device is actually down.
905          */
906         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907                 flags = hdev->flags & ~BIT(HCI_UP);
908         else
909                 flags = hdev->flags;
910
911         strscpy(di.name, hdev->name, sizeof(di.name));
912         di.bdaddr   = hdev->bdaddr;
913         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
914         di.flags    = flags;
915         di.pkt_type = hdev->pkt_type;
916         if (lmp_bredr_capable(hdev)) {
917                 di.acl_mtu  = hdev->acl_mtu;
918                 di.acl_pkts = hdev->acl_pkts;
919                 di.sco_mtu  = hdev->sco_mtu;
920                 di.sco_pkts = hdev->sco_pkts;
921         } else {
922                 di.acl_mtu  = hdev->le_mtu;
923                 di.acl_pkts = hdev->le_pkts;
924                 di.sco_mtu  = 0;
925                 di.sco_pkts = 0;
926         }
927         di.link_policy = hdev->link_policy;
928         di.link_mode   = hdev->link_mode;
929
930         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931         memcpy(&di.features, &hdev->features, sizeof(di.features));
932
933         if (copy_to_user(arg, &di, sizeof(di)))
934                 err = -EFAULT;
935
936         hci_dev_put(hdev);
937
938         return err;
939 }
940
941 /* ---- Interface to HCI drivers ---- */
942
943 static int hci_rfkill_set_block(void *data, bool blocked)
944 {
945         struct hci_dev *hdev = data;
946
947         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
948
949         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
950                 return -EBUSY;
951
952         if (blocked) {
953                 hci_dev_set_flag(hdev, HCI_RFKILLED);
954                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955                     !hci_dev_test_flag(hdev, HCI_CONFIG))
956                         hci_dev_do_close(hdev);
957         } else {
958                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
959         }
960
961         return 0;
962 }
963
964 static const struct rfkill_ops hci_rfkill_ops = {
965         .set_block = hci_rfkill_set_block,
966 };
967
968 static void hci_power_on(struct work_struct *work)
969 {
970         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
971         int err;
972
973         BT_DBG("%s", hdev->name);
974
975         if (test_bit(HCI_UP, &hdev->flags) &&
976             hci_dev_test_flag(hdev, HCI_MGMT) &&
977             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978                 cancel_delayed_work(&hdev->power_off);
979                 err = hci_powered_update_sync(hdev);
980                 mgmt_power_on(hdev, err);
981                 return;
982         }
983
984         err = hci_dev_do_open(hdev);
985         if (err < 0) {
986                 hci_dev_lock(hdev);
987                 mgmt_set_powered_failed(hdev, err);
988                 hci_dev_unlock(hdev);
989                 return;
990         }
991
992         /* During the HCI setup phase, a few error conditions are
993          * ignored and they need to be checked now. If they are still
994          * valid, it is important to turn the device back off.
995          */
996         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998             (hdev->dev_type == HCI_PRIMARY &&
999              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002                 hci_dev_do_close(hdev);
1003         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005                                    HCI_AUTO_OFF_TIMEOUT);
1006         }
1007
1008         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009                 /* For unconfigured devices, set the HCI_RAW flag
1010                  * so that userspace can easily identify them.
1011                  */
1012                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013                         set_bit(HCI_RAW, &hdev->flags);
1014
1015                 /* For fully configured devices, this will send
1016                  * the Index Added event. For unconfigured devices,
1017                  * it will send Unconfigued Index Added event.
1018                  *
1019                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020                  * and no event will be send.
1021                  */
1022                 mgmt_index_added(hdev);
1023         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024                 /* When the controller is now configured, then it
1025                  * is important to clear the HCI_RAW flag.
1026                  */
1027                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028                         clear_bit(HCI_RAW, &hdev->flags);
1029
1030                 /* Powering on the controller with HCI_CONFIG set only
1031                  * happens with the transition from unconfigured to
1032                  * configured. This will send the Index Added event.
1033                  */
1034                 mgmt_index_added(hdev);
1035         }
1036 }
1037
1038 static void hci_power_off(struct work_struct *work)
1039 {
1040         struct hci_dev *hdev = container_of(work, struct hci_dev,
1041                                             power_off.work);
1042
1043         BT_DBG("%s", hdev->name);
1044
1045         hci_dev_do_close(hdev);
1046 }
1047
1048 static void hci_error_reset(struct work_struct *work)
1049 {
1050         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1051
1052         hci_dev_hold(hdev);
1053         BT_DBG("%s", hdev->name);
1054
1055         if (hdev->hw_error)
1056                 hdev->hw_error(hdev, hdev->hw_error_code);
1057         else
1058                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1059
1060         if (!hci_dev_do_close(hdev))
1061                 hci_dev_do_open(hdev);
1062
1063         hci_dev_put(hdev);
1064 }
1065
1066 void hci_uuids_clear(struct hci_dev *hdev)
1067 {
1068         struct bt_uuid *uuid, *tmp;
1069
1070         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1071                 list_del(&uuid->list);
1072                 kfree(uuid);
1073         }
1074 }
1075
1076 void hci_link_keys_clear(struct hci_dev *hdev)
1077 {
1078         struct link_key *key, *tmp;
1079
1080         list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1081                 list_del_rcu(&key->list);
1082                 kfree_rcu(key, rcu);
1083         }
1084 }
1085
1086 void hci_smp_ltks_clear(struct hci_dev *hdev)
1087 {
1088         struct smp_ltk *k, *tmp;
1089
1090         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1091                 list_del_rcu(&k->list);
1092                 kfree_rcu(k, rcu);
1093         }
1094 }
1095
1096 void hci_smp_irks_clear(struct hci_dev *hdev)
1097 {
1098         struct smp_irk *k, *tmp;
1099
1100         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1101                 list_del_rcu(&k->list);
1102                 kfree_rcu(k, rcu);
1103         }
1104 }
1105
1106 void hci_blocked_keys_clear(struct hci_dev *hdev)
1107 {
1108         struct blocked_key *b, *tmp;
1109
1110         list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1111                 list_del_rcu(&b->list);
1112                 kfree_rcu(b, rcu);
1113         }
1114 }
1115
1116 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1117 {
1118         bool blocked = false;
1119         struct blocked_key *b;
1120
1121         rcu_read_lock();
1122         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1123                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1124                         blocked = true;
1125                         break;
1126                 }
1127         }
1128
1129         rcu_read_unlock();
1130         return blocked;
1131 }
1132
1133 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1134 {
1135         struct link_key *k;
1136
1137         rcu_read_lock();
1138         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1139                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1140                         rcu_read_unlock();
1141
1142                         if (hci_is_blocked_key(hdev,
1143                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1144                                                k->val)) {
1145                                 bt_dev_warn_ratelimited(hdev,
1146                                                         "Link key blocked for %pMR",
1147                                                         &k->bdaddr);
1148                                 return NULL;
1149                         }
1150
1151                         return k;
1152                 }
1153         }
1154         rcu_read_unlock();
1155
1156         return NULL;
1157 }
1158
1159 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1160                                u8 key_type, u8 old_key_type)
1161 {
1162         /* Legacy key */
1163         if (key_type < 0x03)
1164                 return true;
1165
1166         /* Debug keys are insecure so don't store them persistently */
1167         if (key_type == HCI_LK_DEBUG_COMBINATION)
1168                 return false;
1169
1170         /* Changed combination key and there's no previous one */
1171         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1172                 return false;
1173
1174         /* Security mode 3 case */
1175         if (!conn)
1176                 return true;
1177
1178         /* BR/EDR key derived using SC from an LE link */
1179         if (conn->type == LE_LINK)
1180                 return true;
1181
1182         /* Neither local nor remote side had no-bonding as requirement */
1183         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1184                 return true;
1185
1186         /* Local side had dedicated bonding as requirement */
1187         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1188                 return true;
1189
1190         /* Remote side had dedicated bonding as requirement */
1191         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1192                 return true;
1193
1194         /* If none of the above criteria match, then don't store the key
1195          * persistently */
1196         return false;
1197 }
1198
1199 static u8 ltk_role(u8 type)
1200 {
1201         if (type == SMP_LTK)
1202                 return HCI_ROLE_MASTER;
1203
1204         return HCI_ROLE_SLAVE;
1205 }
1206
1207 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1208                              u8 addr_type, u8 role)
1209 {
1210         struct smp_ltk *k;
1211
1212         rcu_read_lock();
1213         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1214                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1215                         continue;
1216
1217                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1218                         rcu_read_unlock();
1219
1220                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1221                                                k->val)) {
1222                                 bt_dev_warn_ratelimited(hdev,
1223                                                         "LTK blocked for %pMR",
1224                                                         &k->bdaddr);
1225                                 return NULL;
1226                         }
1227
1228                         return k;
1229                 }
1230         }
1231         rcu_read_unlock();
1232
1233         return NULL;
1234 }
1235
1236 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1237 {
1238         struct smp_irk *irk_to_return = NULL;
1239         struct smp_irk *irk;
1240
1241         rcu_read_lock();
1242         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1243                 if (!bacmp(&irk->rpa, rpa)) {
1244                         irk_to_return = irk;
1245                         goto done;
1246                 }
1247         }
1248
1249         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1250                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1251                         bacpy(&irk->rpa, rpa);
1252                         irk_to_return = irk;
1253                         goto done;
1254                 }
1255         }
1256
1257 done:
1258         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1259                                                 irk_to_return->val)) {
1260                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1261                                         &irk_to_return->bdaddr);
1262                 irk_to_return = NULL;
1263         }
1264
1265         rcu_read_unlock();
1266
1267         return irk_to_return;
1268 }
1269
1270 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1271                                      u8 addr_type)
1272 {
1273         struct smp_irk *irk_to_return = NULL;
1274         struct smp_irk *irk;
1275
1276         /* Identity Address must be public or static random */
1277         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1278                 return NULL;
1279
1280         rcu_read_lock();
1281         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1282                 if (addr_type == irk->addr_type &&
1283                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1284                         irk_to_return = irk;
1285                         goto done;
1286                 }
1287         }
1288
1289 done:
1290
1291         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1292                                                 irk_to_return->val)) {
1293                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1294                                         &irk_to_return->bdaddr);
1295                 irk_to_return = NULL;
1296         }
1297
1298         rcu_read_unlock();
1299
1300         return irk_to_return;
1301 }
1302
1303 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1304                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1305                                   u8 pin_len, bool *persistent)
1306 {
1307         struct link_key *key, *old_key;
1308         u8 old_key_type;
1309
1310         old_key = hci_find_link_key(hdev, bdaddr);
1311         if (old_key) {
1312                 old_key_type = old_key->type;
1313                 key = old_key;
1314         } else {
1315                 old_key_type = conn ? conn->key_type : 0xff;
1316                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1317                 if (!key)
1318                         return NULL;
1319                 list_add_rcu(&key->list, &hdev->link_keys);
1320         }
1321
1322         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1323
1324         /* Some buggy controller combinations generate a changed
1325          * combination key for legacy pairing even when there's no
1326          * previous key */
1327         if (type == HCI_LK_CHANGED_COMBINATION &&
1328             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1329                 type = HCI_LK_COMBINATION;
1330                 if (conn)
1331                         conn->key_type = type;
1332         }
1333
1334         bacpy(&key->bdaddr, bdaddr);
1335         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1336         key->pin_len = pin_len;
1337
1338         if (type == HCI_LK_CHANGED_COMBINATION)
1339                 key->type = old_key_type;
1340         else
1341                 key->type = type;
1342
1343         if (persistent)
1344                 *persistent = hci_persistent_key(hdev, conn, type,
1345                                                  old_key_type);
1346
1347         return key;
1348 }
1349
1350 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1351                             u8 addr_type, u8 type, u8 authenticated,
1352                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1353 {
1354         struct smp_ltk *key, *old_key;
1355         u8 role = ltk_role(type);
1356
1357         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1358         if (old_key)
1359                 key = old_key;
1360         else {
1361                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1362                 if (!key)
1363                         return NULL;
1364                 list_add_rcu(&key->list, &hdev->long_term_keys);
1365         }
1366
1367         bacpy(&key->bdaddr, bdaddr);
1368         key->bdaddr_type = addr_type;
1369         memcpy(key->val, tk, sizeof(key->val));
1370         key->authenticated = authenticated;
1371         key->ediv = ediv;
1372         key->rand = rand;
1373         key->enc_size = enc_size;
1374         key->type = type;
1375
1376         return key;
1377 }
1378
1379 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1380                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1381 {
1382         struct smp_irk *irk;
1383
1384         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1385         if (!irk) {
1386                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1387                 if (!irk)
1388                         return NULL;
1389
1390                 bacpy(&irk->bdaddr, bdaddr);
1391                 irk->addr_type = addr_type;
1392
1393                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1394         }
1395
1396         memcpy(irk->val, val, 16);
1397         bacpy(&irk->rpa, rpa);
1398
1399         return irk;
1400 }
1401
1402 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1403 {
1404         struct link_key *key;
1405
1406         key = hci_find_link_key(hdev, bdaddr);
1407         if (!key)
1408                 return -ENOENT;
1409
1410         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1411
1412         list_del_rcu(&key->list);
1413         kfree_rcu(key, rcu);
1414
1415         return 0;
1416 }
1417
1418 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1419 {
1420         struct smp_ltk *k, *tmp;
1421         int removed = 0;
1422
1423         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1424                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1425                         continue;
1426
1427                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1428
1429                 list_del_rcu(&k->list);
1430                 kfree_rcu(k, rcu);
1431                 removed++;
1432         }
1433
1434         return removed ? 0 : -ENOENT;
1435 }
1436
1437 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1438 {
1439         struct smp_irk *k, *tmp;
1440
1441         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1442                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1443                         continue;
1444
1445                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1446
1447                 list_del_rcu(&k->list);
1448                 kfree_rcu(k, rcu);
1449         }
1450 }
1451
1452 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1453 {
1454         struct smp_ltk *k;
1455         struct smp_irk *irk;
1456         u8 addr_type;
1457
1458         if (type == BDADDR_BREDR) {
1459                 if (hci_find_link_key(hdev, bdaddr))
1460                         return true;
1461                 return false;
1462         }
1463
1464         /* Convert to HCI addr type which struct smp_ltk uses */
1465         if (type == BDADDR_LE_PUBLIC)
1466                 addr_type = ADDR_LE_DEV_PUBLIC;
1467         else
1468                 addr_type = ADDR_LE_DEV_RANDOM;
1469
1470         irk = hci_get_irk(hdev, bdaddr, addr_type);
1471         if (irk) {
1472                 bdaddr = &irk->bdaddr;
1473                 addr_type = irk->addr_type;
1474         }
1475
1476         rcu_read_lock();
1477         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1478                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1479                         rcu_read_unlock();
1480                         return true;
1481                 }
1482         }
1483         rcu_read_unlock();
1484
1485         return false;
1486 }
1487
1488 /* HCI command timer function */
1489 static void hci_cmd_timeout(struct work_struct *work)
1490 {
1491         struct hci_dev *hdev = container_of(work, struct hci_dev,
1492                                             cmd_timer.work);
1493
1494         if (hdev->req_skb) {
1495                 u16 opcode = hci_skb_opcode(hdev->req_skb);
1496
1497                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1498
1499                 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1500         } else {
1501                 bt_dev_err(hdev, "command tx timeout");
1502         }
1503
1504         if (hdev->cmd_timeout)
1505                 hdev->cmd_timeout(hdev);
1506
1507         atomic_set(&hdev->cmd_cnt, 1);
1508         queue_work(hdev->workqueue, &hdev->cmd_work);
1509 }
1510
1511 /* HCI ncmd timer function */
1512 static void hci_ncmd_timeout(struct work_struct *work)
1513 {
1514         struct hci_dev *hdev = container_of(work, struct hci_dev,
1515                                             ncmd_timer.work);
1516
1517         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1518
1519         /* During HCI_INIT phase no events can be injected if the ncmd timer
1520          * triggers since the procedure has its own timeout handling.
1521          */
1522         if (test_bit(HCI_INIT, &hdev->flags))
1523                 return;
1524
1525         /* This is an irrecoverable state, inject hardware error event */
1526         hci_reset_dev(hdev);
1527 }
1528
1529 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1530                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1531 {
1532         struct oob_data *data;
1533
1534         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1535                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1536                         continue;
1537                 if (data->bdaddr_type != bdaddr_type)
1538                         continue;
1539                 return data;
1540         }
1541
1542         return NULL;
1543 }
1544
1545 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1546                                u8 bdaddr_type)
1547 {
1548         struct oob_data *data;
1549
1550         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1551         if (!data)
1552                 return -ENOENT;
1553
1554         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1555
1556         list_del(&data->list);
1557         kfree(data);
1558
1559         return 0;
1560 }
1561
1562 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1563 {
1564         struct oob_data *data, *n;
1565
1566         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1567                 list_del(&data->list);
1568                 kfree(data);
1569         }
1570 }
1571
1572 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1573                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1574                             u8 *hash256, u8 *rand256)
1575 {
1576         struct oob_data *data;
1577
1578         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1579         if (!data) {
1580                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1581                 if (!data)
1582                         return -ENOMEM;
1583
1584                 bacpy(&data->bdaddr, bdaddr);
1585                 data->bdaddr_type = bdaddr_type;
1586                 list_add(&data->list, &hdev->remote_oob_data);
1587         }
1588
1589         if (hash192 && rand192) {
1590                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1591                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1592                 if (hash256 && rand256)
1593                         data->present = 0x03;
1594         } else {
1595                 memset(data->hash192, 0, sizeof(data->hash192));
1596                 memset(data->rand192, 0, sizeof(data->rand192));
1597                 if (hash256 && rand256)
1598                         data->present = 0x02;
1599                 else
1600                         data->present = 0x00;
1601         }
1602
1603         if (hash256 && rand256) {
1604                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1605                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1606         } else {
1607                 memset(data->hash256, 0, sizeof(data->hash256));
1608                 memset(data->rand256, 0, sizeof(data->rand256));
1609                 if (hash192 && rand192)
1610                         data->present = 0x01;
1611         }
1612
1613         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1614
1615         return 0;
1616 }
1617
1618 /* This function requires the caller holds hdev->lock */
1619 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1620 {
1621         struct adv_info *adv_instance;
1622
1623         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1624                 if (adv_instance->instance == instance)
1625                         return adv_instance;
1626         }
1627
1628         return NULL;
1629 }
1630
1631 /* This function requires the caller holds hdev->lock */
1632 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1633 {
1634         struct adv_info *cur_instance;
1635
1636         cur_instance = hci_find_adv_instance(hdev, instance);
1637         if (!cur_instance)
1638                 return NULL;
1639
1640         if (cur_instance == list_last_entry(&hdev->adv_instances,
1641                                             struct adv_info, list))
1642                 return list_first_entry(&hdev->adv_instances,
1643                                                  struct adv_info, list);
1644         else
1645                 return list_next_entry(cur_instance, list);
1646 }
1647
1648 /* This function requires the caller holds hdev->lock */
1649 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1650 {
1651         struct adv_info *adv_instance;
1652
1653         adv_instance = hci_find_adv_instance(hdev, instance);
1654         if (!adv_instance)
1655                 return -ENOENT;
1656
1657         BT_DBG("%s removing %dMR", hdev->name, instance);
1658
1659         if (hdev->cur_adv_instance == instance) {
1660                 if (hdev->adv_instance_timeout) {
1661                         cancel_delayed_work(&hdev->adv_instance_expire);
1662                         hdev->adv_instance_timeout = 0;
1663                 }
1664                 hdev->cur_adv_instance = 0x00;
1665         }
1666
1667         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1668
1669         list_del(&adv_instance->list);
1670         kfree(adv_instance);
1671
1672         hdev->adv_instance_cnt--;
1673
1674         return 0;
1675 }
1676
1677 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1678 {
1679         struct adv_info *adv_instance, *n;
1680
1681         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1682                 adv_instance->rpa_expired = rpa_expired;
1683 }
1684
1685 /* This function requires the caller holds hdev->lock */
1686 void hci_adv_instances_clear(struct hci_dev *hdev)
1687 {
1688         struct adv_info *adv_instance, *n;
1689
1690         if (hdev->adv_instance_timeout) {
1691                 cancel_delayed_work(&hdev->adv_instance_expire);
1692                 hdev->adv_instance_timeout = 0;
1693         }
1694
1695         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1696                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1697                 list_del(&adv_instance->list);
1698                 kfree(adv_instance);
1699         }
1700
1701         hdev->adv_instance_cnt = 0;
1702         hdev->cur_adv_instance = 0x00;
1703 }
1704
1705 static void adv_instance_rpa_expired(struct work_struct *work)
1706 {
1707         struct adv_info *adv_instance = container_of(work, struct adv_info,
1708                                                      rpa_expired_cb.work);
1709
1710         BT_DBG("");
1711
1712         adv_instance->rpa_expired = true;
1713 }
1714
1715 /* This function requires the caller holds hdev->lock */
1716 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1717                                       u32 flags, u16 adv_data_len, u8 *adv_data,
1718                                       u16 scan_rsp_len, u8 *scan_rsp_data,
1719                                       u16 timeout, u16 duration, s8 tx_power,
1720                                       u32 min_interval, u32 max_interval,
1721                                       u8 mesh_handle)
1722 {
1723         struct adv_info *adv;
1724
1725         adv = hci_find_adv_instance(hdev, instance);
1726         if (adv) {
1727                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1728                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1729                 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1730         } else {
1731                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1732                     instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1733                         return ERR_PTR(-EOVERFLOW);
1734
1735                 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1736                 if (!adv)
1737                         return ERR_PTR(-ENOMEM);
1738
1739                 adv->pending = true;
1740                 adv->instance = instance;
1741                 list_add(&adv->list, &hdev->adv_instances);
1742                 hdev->adv_instance_cnt++;
1743         }
1744
1745         adv->flags = flags;
1746         adv->min_interval = min_interval;
1747         adv->max_interval = max_interval;
1748         adv->tx_power = tx_power;
1749         /* Defining a mesh_handle changes the timing units to ms,
1750          * rather than seconds, and ties the instance to the requested
1751          * mesh_tx queue.
1752          */
1753         adv->mesh = mesh_handle;
1754
1755         hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1756                                   scan_rsp_len, scan_rsp_data);
1757
1758         adv->timeout = timeout;
1759         adv->remaining_time = timeout;
1760
1761         if (duration == 0)
1762                 adv->duration = hdev->def_multi_adv_rotation_duration;
1763         else
1764                 adv->duration = duration;
1765
1766         INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1767
1768         BT_DBG("%s for %dMR", hdev->name, instance);
1769
1770         return adv;
1771 }
1772
1773 /* This function requires the caller holds hdev->lock */
1774 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1775                                       u32 flags, u8 data_len, u8 *data,
1776                                       u32 min_interval, u32 max_interval)
1777 {
1778         struct adv_info *adv;
1779
1780         adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1781                                    0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1782                                    min_interval, max_interval, 0);
1783         if (IS_ERR(adv))
1784                 return adv;
1785
1786         adv->periodic = true;
1787         adv->per_adv_data_len = data_len;
1788
1789         if (data)
1790                 memcpy(adv->per_adv_data, data, data_len);
1791
1792         return adv;
1793 }
1794
1795 /* This function requires the caller holds hdev->lock */
1796 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1797                               u16 adv_data_len, u8 *adv_data,
1798                               u16 scan_rsp_len, u8 *scan_rsp_data)
1799 {
1800         struct adv_info *adv;
1801
1802         adv = hci_find_adv_instance(hdev, instance);
1803
1804         /* If advertisement doesn't exist, we can't modify its data */
1805         if (!adv)
1806                 return -ENOENT;
1807
1808         if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1809                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1810                 memcpy(adv->adv_data, adv_data, adv_data_len);
1811                 adv->adv_data_len = adv_data_len;
1812                 adv->adv_data_changed = true;
1813         }
1814
1815         if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1816                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1817                 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1818                 adv->scan_rsp_len = scan_rsp_len;
1819                 adv->scan_rsp_changed = true;
1820         }
1821
1822         /* Mark as changed if there are flags which would affect it */
1823         if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1824             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1825                 adv->scan_rsp_changed = true;
1826
1827         return 0;
1828 }
1829
1830 /* This function requires the caller holds hdev->lock */
1831 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1832 {
1833         u32 flags;
1834         struct adv_info *adv;
1835
1836         if (instance == 0x00) {
1837                 /* Instance 0 always manages the "Tx Power" and "Flags"
1838                  * fields
1839                  */
1840                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1841
1842                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1843                  * corresponds to the "connectable" instance flag.
1844                  */
1845                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1846                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1847
1848                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1849                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1850                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1851                         flags |= MGMT_ADV_FLAG_DISCOV;
1852
1853                 return flags;
1854         }
1855
1856         adv = hci_find_adv_instance(hdev, instance);
1857
1858         /* Return 0 when we got an invalid instance identifier. */
1859         if (!adv)
1860                 return 0;
1861
1862         return adv->flags;
1863 }
1864
1865 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1866 {
1867         struct adv_info *adv;
1868
1869         /* Instance 0x00 always set local name */
1870         if (instance == 0x00)
1871                 return true;
1872
1873         adv = hci_find_adv_instance(hdev, instance);
1874         if (!adv)
1875                 return false;
1876
1877         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1878             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1879                 return true;
1880
1881         return adv->scan_rsp_len ? true : false;
1882 }
1883
1884 /* This function requires the caller holds hdev->lock */
1885 void hci_adv_monitors_clear(struct hci_dev *hdev)
1886 {
1887         struct adv_monitor *monitor;
1888         int handle;
1889
1890         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1891                 hci_free_adv_monitor(hdev, monitor);
1892
1893         idr_destroy(&hdev->adv_monitors_idr);
1894 }
1895
1896 /* Frees the monitor structure and do some bookkeepings.
1897  * This function requires the caller holds hdev->lock.
1898  */
1899 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1900 {
1901         struct adv_pattern *pattern;
1902         struct adv_pattern *tmp;
1903
1904         if (!monitor)
1905                 return;
1906
1907         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1908                 list_del(&pattern->list);
1909                 kfree(pattern);
1910         }
1911
1912         if (monitor->handle)
1913                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1914
1915         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1916                 hdev->adv_monitors_cnt--;
1917                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1918         }
1919
1920         kfree(monitor);
1921 }
1922
1923 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1924  * also attempts to forward the request to the controller.
1925  * This function requires the caller holds hci_req_sync_lock.
1926  */
1927 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1928 {
1929         int min, max, handle;
1930         int status = 0;
1931
1932         if (!monitor)
1933                 return -EINVAL;
1934
1935         hci_dev_lock(hdev);
1936
1937         min = HCI_MIN_ADV_MONITOR_HANDLE;
1938         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1939         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1940                            GFP_KERNEL);
1941
1942         hci_dev_unlock(hdev);
1943
1944         if (handle < 0)
1945                 return handle;
1946
1947         monitor->handle = handle;
1948
1949         if (!hdev_is_powered(hdev))
1950                 return status;
1951
1952         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1953         case HCI_ADV_MONITOR_EXT_NONE:
1954                 bt_dev_dbg(hdev, "add monitor %d status %d",
1955                            monitor->handle, status);
1956                 /* Message was not forwarded to controller - not an error */
1957                 break;
1958
1959         case HCI_ADV_MONITOR_EXT_MSFT:
1960                 status = msft_add_monitor_pattern(hdev, monitor);
1961                 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1962                            handle, status);
1963                 break;
1964         }
1965
1966         return status;
1967 }
1968
1969 /* Attempts to tell the controller and free the monitor. If somehow the
1970  * controller doesn't have a corresponding handle, remove anyway.
1971  * This function requires the caller holds hci_req_sync_lock.
1972  */
1973 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1974                                   struct adv_monitor *monitor)
1975 {
1976         int status = 0;
1977         int handle;
1978
1979         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1980         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1981                 bt_dev_dbg(hdev, "remove monitor %d status %d",
1982                            monitor->handle, status);
1983                 goto free_monitor;
1984
1985         case HCI_ADV_MONITOR_EXT_MSFT:
1986                 handle = monitor->handle;
1987                 status = msft_remove_monitor(hdev, monitor);
1988                 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1989                            handle, status);
1990                 break;
1991         }
1992
1993         /* In case no matching handle registered, just free the monitor */
1994         if (status == -ENOENT)
1995                 goto free_monitor;
1996
1997         return status;
1998
1999 free_monitor:
2000         if (status == -ENOENT)
2001                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2002                             monitor->handle);
2003         hci_free_adv_monitor(hdev, monitor);
2004
2005         return status;
2006 }
2007
2008 /* This function requires the caller holds hci_req_sync_lock */
2009 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2010 {
2011         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2012
2013         if (!monitor)
2014                 return -EINVAL;
2015
2016         return hci_remove_adv_monitor(hdev, monitor);
2017 }
2018
2019 /* This function requires the caller holds hci_req_sync_lock */
2020 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2021 {
2022         struct adv_monitor *monitor;
2023         int idr_next_id = 0;
2024         int status = 0;
2025
2026         while (1) {
2027                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2028                 if (!monitor)
2029                         break;
2030
2031                 status = hci_remove_adv_monitor(hdev, monitor);
2032                 if (status)
2033                         return status;
2034
2035                 idr_next_id++;
2036         }
2037
2038         return status;
2039 }
2040
2041 /* This function requires the caller holds hdev->lock */
2042 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2043 {
2044         return !idr_is_empty(&hdev->adv_monitors_idr);
2045 }
2046
2047 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2048 {
2049         if (msft_monitor_supported(hdev))
2050                 return HCI_ADV_MONITOR_EXT_MSFT;
2051
2052         return HCI_ADV_MONITOR_EXT_NONE;
2053 }
2054
2055 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2056                                          bdaddr_t *bdaddr, u8 type)
2057 {
2058         struct bdaddr_list *b;
2059
2060         list_for_each_entry(b, bdaddr_list, list) {
2061                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2062                         return b;
2063         }
2064
2065         return NULL;
2066 }
2067
2068 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2069                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2070                                 u8 type)
2071 {
2072         struct bdaddr_list_with_irk *b;
2073
2074         list_for_each_entry(b, bdaddr_list, list) {
2075                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2076                         return b;
2077         }
2078
2079         return NULL;
2080 }
2081
2082 struct bdaddr_list_with_flags *
2083 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2084                                   bdaddr_t *bdaddr, u8 type)
2085 {
2086         struct bdaddr_list_with_flags *b;
2087
2088         list_for_each_entry(b, bdaddr_list, list) {
2089                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2090                         return b;
2091         }
2092
2093         return NULL;
2094 }
2095
2096 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2097 {
2098         struct bdaddr_list *b, *n;
2099
2100         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2101                 list_del(&b->list);
2102                 kfree(b);
2103         }
2104 }
2105
2106 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2107 {
2108         struct bdaddr_list *entry;
2109
2110         if (!bacmp(bdaddr, BDADDR_ANY))
2111                 return -EBADF;
2112
2113         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2114                 return -EEXIST;
2115
2116         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2117         if (!entry)
2118                 return -ENOMEM;
2119
2120         bacpy(&entry->bdaddr, bdaddr);
2121         entry->bdaddr_type = type;
2122
2123         list_add(&entry->list, list);
2124
2125         return 0;
2126 }
2127
2128 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2129                                         u8 type, u8 *peer_irk, u8 *local_irk)
2130 {
2131         struct bdaddr_list_with_irk *entry;
2132
2133         if (!bacmp(bdaddr, BDADDR_ANY))
2134                 return -EBADF;
2135
2136         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2137                 return -EEXIST;
2138
2139         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2140         if (!entry)
2141                 return -ENOMEM;
2142
2143         bacpy(&entry->bdaddr, bdaddr);
2144         entry->bdaddr_type = type;
2145
2146         if (peer_irk)
2147                 memcpy(entry->peer_irk, peer_irk, 16);
2148
2149         if (local_irk)
2150                 memcpy(entry->local_irk, local_irk, 16);
2151
2152         list_add(&entry->list, list);
2153
2154         return 0;
2155 }
2156
2157 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2158                                    u8 type, u32 flags)
2159 {
2160         struct bdaddr_list_with_flags *entry;
2161
2162         if (!bacmp(bdaddr, BDADDR_ANY))
2163                 return -EBADF;
2164
2165         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2166                 return -EEXIST;
2167
2168         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2169         if (!entry)
2170                 return -ENOMEM;
2171
2172         bacpy(&entry->bdaddr, bdaddr);
2173         entry->bdaddr_type = type;
2174         entry->flags = flags;
2175
2176         list_add(&entry->list, list);
2177
2178         return 0;
2179 }
2180
2181 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2182 {
2183         struct bdaddr_list *entry;
2184
2185         if (!bacmp(bdaddr, BDADDR_ANY)) {
2186                 hci_bdaddr_list_clear(list);
2187                 return 0;
2188         }
2189
2190         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2191         if (!entry)
2192                 return -ENOENT;
2193
2194         list_del(&entry->list);
2195         kfree(entry);
2196
2197         return 0;
2198 }
2199
2200 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2201                                                         u8 type)
2202 {
2203         struct bdaddr_list_with_irk *entry;
2204
2205         if (!bacmp(bdaddr, BDADDR_ANY)) {
2206                 hci_bdaddr_list_clear(list);
2207                 return 0;
2208         }
2209
2210         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2211         if (!entry)
2212                 return -ENOENT;
2213
2214         list_del(&entry->list);
2215         kfree(entry);
2216
2217         return 0;
2218 }
2219
2220 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2221                                    u8 type)
2222 {
2223         struct bdaddr_list_with_flags *entry;
2224
2225         if (!bacmp(bdaddr, BDADDR_ANY)) {
2226                 hci_bdaddr_list_clear(list);
2227                 return 0;
2228         }
2229
2230         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2231         if (!entry)
2232                 return -ENOENT;
2233
2234         list_del(&entry->list);
2235         kfree(entry);
2236
2237         return 0;
2238 }
2239
2240 /* This function requires the caller holds hdev->lock */
2241 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2242                                                bdaddr_t *addr, u8 addr_type)
2243 {
2244         struct hci_conn_params *params;
2245
2246         list_for_each_entry(params, &hdev->le_conn_params, list) {
2247                 if (bacmp(&params->addr, addr) == 0 &&
2248                     params->addr_type == addr_type) {
2249                         return params;
2250                 }
2251         }
2252
2253         return NULL;
2254 }
2255
2256 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2257 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2258                                                   bdaddr_t *addr, u8 addr_type)
2259 {
2260         struct hci_conn_params *param;
2261
2262         rcu_read_lock();
2263
2264         list_for_each_entry_rcu(param, list, action) {
2265                 if (bacmp(&param->addr, addr) == 0 &&
2266                     param->addr_type == addr_type) {
2267                         rcu_read_unlock();
2268                         return param;
2269                 }
2270         }
2271
2272         rcu_read_unlock();
2273
2274         return NULL;
2275 }
2276
2277 /* This function requires the caller holds hdev->lock */
2278 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2279 {
2280         if (list_empty(&param->action))
2281                 return;
2282
2283         list_del_rcu(&param->action);
2284         synchronize_rcu();
2285         INIT_LIST_HEAD(&param->action);
2286 }
2287
2288 /* This function requires the caller holds hdev->lock */
2289 void hci_pend_le_list_add(struct hci_conn_params *param,
2290                           struct list_head *list)
2291 {
2292         list_add_rcu(&param->action, list);
2293 }
2294
2295 /* This function requires the caller holds hdev->lock */
2296 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2297                                             bdaddr_t *addr, u8 addr_type)
2298 {
2299         struct hci_conn_params *params;
2300
2301         params = hci_conn_params_lookup(hdev, addr, addr_type);
2302         if (params)
2303                 return params;
2304
2305         params = kzalloc(sizeof(*params), GFP_KERNEL);
2306         if (!params) {
2307                 bt_dev_err(hdev, "out of memory");
2308                 return NULL;
2309         }
2310
2311         bacpy(&params->addr, addr);
2312         params->addr_type = addr_type;
2313
2314         list_add(&params->list, &hdev->le_conn_params);
2315         INIT_LIST_HEAD(&params->action);
2316
2317         params->conn_min_interval = hdev->le_conn_min_interval;
2318         params->conn_max_interval = hdev->le_conn_max_interval;
2319         params->conn_latency = hdev->le_conn_latency;
2320         params->supervision_timeout = hdev->le_supv_timeout;
2321         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2322
2323         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2324
2325         return params;
2326 }
2327
2328 void hci_conn_params_free(struct hci_conn_params *params)
2329 {
2330         hci_pend_le_list_del_init(params);
2331
2332         if (params->conn) {
2333                 hci_conn_drop(params->conn);
2334                 hci_conn_put(params->conn);
2335         }
2336
2337         list_del(&params->list);
2338         kfree(params);
2339 }
2340
2341 /* This function requires the caller holds hdev->lock */
2342 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2343 {
2344         struct hci_conn_params *params;
2345
2346         params = hci_conn_params_lookup(hdev, addr, addr_type);
2347         if (!params)
2348                 return;
2349
2350         hci_conn_params_free(params);
2351
2352         hci_update_passive_scan(hdev);
2353
2354         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2355 }
2356
2357 /* This function requires the caller holds hdev->lock */
2358 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2359 {
2360         struct hci_conn_params *params, *tmp;
2361
2362         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2363                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2364                         continue;
2365
2366                 /* If trying to establish one time connection to disabled
2367                  * device, leave the params, but mark them as just once.
2368                  */
2369                 if (params->explicit_connect) {
2370                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2371                         continue;
2372                 }
2373
2374                 hci_conn_params_free(params);
2375         }
2376
2377         BT_DBG("All LE disabled connection parameters were removed");
2378 }
2379
2380 /* This function requires the caller holds hdev->lock */
2381 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2382 {
2383         struct hci_conn_params *params, *tmp;
2384
2385         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2386                 hci_conn_params_free(params);
2387
2388         BT_DBG("All LE connection parameters were removed");
2389 }
2390
2391 /* Copy the Identity Address of the controller.
2392  *
2393  * If the controller has a public BD_ADDR, then by default use that one.
2394  * If this is a LE only controller without a public address, default to
2395  * the static random address.
2396  *
2397  * For debugging purposes it is possible to force controllers with a
2398  * public address to use the static random address instead.
2399  *
2400  * In case BR/EDR has been disabled on a dual-mode controller and
2401  * userspace has configured a static address, then that address
2402  * becomes the identity address instead of the public BR/EDR address.
2403  */
2404 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405                                u8 *bdaddr_type)
2406 {
2407         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2408             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2409             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2410              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2411                 bacpy(bdaddr, &hdev->static_addr);
2412                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2413         } else {
2414                 bacpy(bdaddr, &hdev->bdaddr);
2415                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2416         }
2417 }
2418
2419 static void hci_clear_wake_reason(struct hci_dev *hdev)
2420 {
2421         hci_dev_lock(hdev);
2422
2423         hdev->wake_reason = 0;
2424         bacpy(&hdev->wake_addr, BDADDR_ANY);
2425         hdev->wake_addr_type = 0;
2426
2427         hci_dev_unlock(hdev);
2428 }
2429
2430 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2431                                 void *data)
2432 {
2433         struct hci_dev *hdev =
2434                 container_of(nb, struct hci_dev, suspend_notifier);
2435         int ret = 0;
2436
2437         /* Userspace has full control of this device. Do nothing. */
2438         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2439                 return NOTIFY_DONE;
2440
2441         /* To avoid a potential race with hci_unregister_dev. */
2442         hci_dev_hold(hdev);
2443
2444         if (action == PM_SUSPEND_PREPARE)
2445                 ret = hci_suspend_dev(hdev);
2446         else if (action == PM_POST_SUSPEND)
2447                 ret = hci_resume_dev(hdev);
2448
2449         if (ret)
2450                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2451                            action, ret);
2452
2453         hci_dev_put(hdev);
2454         return NOTIFY_DONE;
2455 }
2456
2457 /* Alloc HCI device */
2458 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2459 {
2460         struct hci_dev *hdev;
2461         unsigned int alloc_size;
2462
2463         alloc_size = sizeof(*hdev);
2464         if (sizeof_priv) {
2465                 /* Fixme: May need ALIGN-ment? */
2466                 alloc_size += sizeof_priv;
2467         }
2468
2469         hdev = kzalloc(alloc_size, GFP_KERNEL);
2470         if (!hdev)
2471                 return NULL;
2472
2473         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2474         hdev->esco_type = (ESCO_HV1);
2475         hdev->link_mode = (HCI_LM_ACCEPT);
2476         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2477         hdev->io_capability = 0x03;     /* No Input No Output */
2478         hdev->manufacturer = 0xffff;    /* Default to internal use */
2479         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2480         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2481         hdev->adv_instance_cnt = 0;
2482         hdev->cur_adv_instance = 0x00;
2483         hdev->adv_instance_timeout = 0;
2484
2485         hdev->advmon_allowlist_duration = 300;
2486         hdev->advmon_no_filter_duration = 500;
2487         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2488
2489         hdev->sniff_max_interval = 800;
2490         hdev->sniff_min_interval = 80;
2491
2492         hdev->le_adv_channel_map = 0x07;
2493         hdev->le_adv_min_interval = 0x0800;
2494         hdev->le_adv_max_interval = 0x0800;
2495         hdev->le_scan_interval = 0x0060;
2496         hdev->le_scan_window = 0x0030;
2497         hdev->le_scan_int_suspend = 0x0400;
2498         hdev->le_scan_window_suspend = 0x0012;
2499         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2500         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2501         hdev->le_scan_int_adv_monitor = 0x0060;
2502         hdev->le_scan_window_adv_monitor = 0x0030;
2503         hdev->le_scan_int_connect = 0x0060;
2504         hdev->le_scan_window_connect = 0x0060;
2505         hdev->le_conn_min_interval = 0x0018;
2506         hdev->le_conn_max_interval = 0x0028;
2507         hdev->le_conn_latency = 0x0000;
2508         hdev->le_supv_timeout = 0x002a;
2509         hdev->le_def_tx_len = 0x001b;
2510         hdev->le_def_tx_time = 0x0148;
2511         hdev->le_max_tx_len = 0x001b;
2512         hdev->le_max_tx_time = 0x0148;
2513         hdev->le_max_rx_len = 0x001b;
2514         hdev->le_max_rx_time = 0x0148;
2515         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2516         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2517         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2518         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2519         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2520         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2521         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2522         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2523         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2524
2525         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2526         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2527         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2528         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2529         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2530         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2531
2532         /* default 1.28 sec page scan */
2533         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2534         hdev->def_page_scan_int = 0x0800;
2535         hdev->def_page_scan_window = 0x0012;
2536
2537         mutex_init(&hdev->lock);
2538         mutex_init(&hdev->req_lock);
2539
2540         ida_init(&hdev->unset_handle_ida);
2541
2542         INIT_LIST_HEAD(&hdev->mesh_pending);
2543         INIT_LIST_HEAD(&hdev->mgmt_pending);
2544         INIT_LIST_HEAD(&hdev->reject_list);
2545         INIT_LIST_HEAD(&hdev->accept_list);
2546         INIT_LIST_HEAD(&hdev->uuids);
2547         INIT_LIST_HEAD(&hdev->link_keys);
2548         INIT_LIST_HEAD(&hdev->long_term_keys);
2549         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2550         INIT_LIST_HEAD(&hdev->remote_oob_data);
2551         INIT_LIST_HEAD(&hdev->le_accept_list);
2552         INIT_LIST_HEAD(&hdev->le_resolv_list);
2553         INIT_LIST_HEAD(&hdev->le_conn_params);
2554         INIT_LIST_HEAD(&hdev->pend_le_conns);
2555         INIT_LIST_HEAD(&hdev->pend_le_reports);
2556         INIT_LIST_HEAD(&hdev->conn_hash.list);
2557         INIT_LIST_HEAD(&hdev->adv_instances);
2558         INIT_LIST_HEAD(&hdev->blocked_keys);
2559         INIT_LIST_HEAD(&hdev->monitored_devices);
2560
2561         INIT_LIST_HEAD(&hdev->local_codecs);
2562         INIT_WORK(&hdev->rx_work, hci_rx_work);
2563         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2564         INIT_WORK(&hdev->tx_work, hci_tx_work);
2565         INIT_WORK(&hdev->power_on, hci_power_on);
2566         INIT_WORK(&hdev->error_reset, hci_error_reset);
2567
2568         hci_cmd_sync_init(hdev);
2569
2570         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2571
2572         skb_queue_head_init(&hdev->rx_q);
2573         skb_queue_head_init(&hdev->cmd_q);
2574         skb_queue_head_init(&hdev->raw_q);
2575
2576         init_waitqueue_head(&hdev->req_wait_q);
2577
2578         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2579         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2580
2581         hci_devcd_setup(hdev);
2582         hci_request_setup(hdev);
2583
2584         hci_init_sysfs(hdev);
2585         discovery_init(hdev);
2586
2587         return hdev;
2588 }
2589 EXPORT_SYMBOL(hci_alloc_dev_priv);
2590
2591 /* Free HCI device */
2592 void hci_free_dev(struct hci_dev *hdev)
2593 {
2594         /* will free via device release */
2595         put_device(&hdev->dev);
2596 }
2597 EXPORT_SYMBOL(hci_free_dev);
2598
2599 /* Register HCI device */
2600 int hci_register_dev(struct hci_dev *hdev)
2601 {
2602         int id, error;
2603
2604         if (!hdev->open || !hdev->close || !hdev->send)
2605                 return -EINVAL;
2606
2607         /* Do not allow HCI_AMP devices to register at index 0,
2608          * so the index can be used as the AMP controller ID.
2609          */
2610         switch (hdev->dev_type) {
2611         case HCI_PRIMARY:
2612                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2613                 break;
2614         case HCI_AMP:
2615                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2616                 break;
2617         default:
2618                 return -EINVAL;
2619         }
2620
2621         if (id < 0)
2622                 return id;
2623
2624         error = dev_set_name(&hdev->dev, "hci%u", id);
2625         if (error)
2626                 return error;
2627
2628         hdev->name = dev_name(&hdev->dev);
2629         hdev->id = id;
2630
2631         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2632
2633         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2634         if (!hdev->workqueue) {
2635                 error = -ENOMEM;
2636                 goto err;
2637         }
2638
2639         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2640                                                       hdev->name);
2641         if (!hdev->req_workqueue) {
2642                 destroy_workqueue(hdev->workqueue);
2643                 error = -ENOMEM;
2644                 goto err;
2645         }
2646
2647         if (!IS_ERR_OR_NULL(bt_debugfs))
2648                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2649
2650         error = device_add(&hdev->dev);
2651         if (error < 0)
2652                 goto err_wqueue;
2653
2654         hci_leds_init(hdev);
2655
2656         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2657                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2658                                     hdev);
2659         if (hdev->rfkill) {
2660                 if (rfkill_register(hdev->rfkill) < 0) {
2661                         rfkill_destroy(hdev->rfkill);
2662                         hdev->rfkill = NULL;
2663                 }
2664         }
2665
2666         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2667                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2668
2669         hci_dev_set_flag(hdev, HCI_SETUP);
2670         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2671
2672         if (hdev->dev_type == HCI_PRIMARY) {
2673                 /* Assume BR/EDR support until proven otherwise (such as
2674                  * through reading supported features during init.
2675                  */
2676                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2677         }
2678
2679         write_lock(&hci_dev_list_lock);
2680         list_add(&hdev->list, &hci_dev_list);
2681         write_unlock(&hci_dev_list_lock);
2682
2683         /* Devices that are marked for raw-only usage are unconfigured
2684          * and should not be included in normal operation.
2685          */
2686         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2687                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2688
2689         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2690          * callback.
2691          */
2692         if (hdev->wakeup)
2693                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2694
2695         hci_sock_dev_event(hdev, HCI_DEV_REG);
2696         hci_dev_hold(hdev);
2697
2698         error = hci_register_suspend_notifier(hdev);
2699         if (error)
2700                 BT_WARN("register suspend notifier failed error:%d\n", error);
2701
2702         queue_work(hdev->req_workqueue, &hdev->power_on);
2703
2704         idr_init(&hdev->adv_monitors_idr);
2705         msft_register(hdev);
2706
2707         return id;
2708
2709 err_wqueue:
2710         debugfs_remove_recursive(hdev->debugfs);
2711         destroy_workqueue(hdev->workqueue);
2712         destroy_workqueue(hdev->req_workqueue);
2713 err:
2714         ida_simple_remove(&hci_index_ida, hdev->id);
2715
2716         return error;
2717 }
2718 EXPORT_SYMBOL(hci_register_dev);
2719
2720 /* Unregister HCI device */
2721 void hci_unregister_dev(struct hci_dev *hdev)
2722 {
2723         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2724
2725         mutex_lock(&hdev->unregister_lock);
2726         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2727         mutex_unlock(&hdev->unregister_lock);
2728
2729         write_lock(&hci_dev_list_lock);
2730         list_del(&hdev->list);
2731         write_unlock(&hci_dev_list_lock);
2732
2733         cancel_work_sync(&hdev->power_on);
2734
2735         hci_cmd_sync_clear(hdev);
2736
2737         hci_unregister_suspend_notifier(hdev);
2738
2739         msft_unregister(hdev);
2740
2741         hci_dev_do_close(hdev);
2742
2743         if (!test_bit(HCI_INIT, &hdev->flags) &&
2744             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2745             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2746                 hci_dev_lock(hdev);
2747                 mgmt_index_removed(hdev);
2748                 hci_dev_unlock(hdev);
2749         }
2750
2751         /* mgmt_index_removed should take care of emptying the
2752          * pending list */
2753         BUG_ON(!list_empty(&hdev->mgmt_pending));
2754
2755         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2756
2757         if (hdev->rfkill) {
2758                 rfkill_unregister(hdev->rfkill);
2759                 rfkill_destroy(hdev->rfkill);
2760         }
2761
2762         device_del(&hdev->dev);
2763         /* Actual cleanup is deferred until hci_release_dev(). */
2764         hci_dev_put(hdev);
2765 }
2766 EXPORT_SYMBOL(hci_unregister_dev);
2767
2768 /* Release HCI device */
2769 void hci_release_dev(struct hci_dev *hdev)
2770 {
2771         debugfs_remove_recursive(hdev->debugfs);
2772         kfree_const(hdev->hw_info);
2773         kfree_const(hdev->fw_info);
2774
2775         destroy_workqueue(hdev->workqueue);
2776         destroy_workqueue(hdev->req_workqueue);
2777
2778         hci_dev_lock(hdev);
2779         hci_bdaddr_list_clear(&hdev->reject_list);
2780         hci_bdaddr_list_clear(&hdev->accept_list);
2781         hci_uuids_clear(hdev);
2782         hci_link_keys_clear(hdev);
2783         hci_smp_ltks_clear(hdev);
2784         hci_smp_irks_clear(hdev);
2785         hci_remote_oob_data_clear(hdev);
2786         hci_adv_instances_clear(hdev);
2787         hci_adv_monitors_clear(hdev);
2788         hci_bdaddr_list_clear(&hdev->le_accept_list);
2789         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2790         hci_conn_params_clear_all(hdev);
2791         hci_discovery_filter_clear(hdev);
2792         hci_blocked_keys_clear(hdev);
2793         hci_codec_list_clear(&hdev->local_codecs);
2794         hci_dev_unlock(hdev);
2795
2796         ida_destroy(&hdev->unset_handle_ida);
2797         ida_simple_remove(&hci_index_ida, hdev->id);
2798         kfree_skb(hdev->sent_cmd);
2799         kfree_skb(hdev->req_skb);
2800         kfree_skb(hdev->recv_event);
2801         kfree(hdev);
2802 }
2803 EXPORT_SYMBOL(hci_release_dev);
2804
2805 int hci_register_suspend_notifier(struct hci_dev *hdev)
2806 {
2807         int ret = 0;
2808
2809         if (!hdev->suspend_notifier.notifier_call &&
2810             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2811                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2812                 ret = register_pm_notifier(&hdev->suspend_notifier);
2813         }
2814
2815         return ret;
2816 }
2817
2818 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2819 {
2820         int ret = 0;
2821
2822         if (hdev->suspend_notifier.notifier_call) {
2823                 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2824                 if (!ret)
2825                         hdev->suspend_notifier.notifier_call = NULL;
2826         }
2827
2828         return ret;
2829 }
2830
2831 /* Cancel ongoing command synchronously:
2832  *
2833  * - Cancel command timer
2834  * - Reset command counter
2835  * - Cancel command request
2836  */
2837 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2838 {
2839         bt_dev_dbg(hdev, "err 0x%2.2x", err);
2840
2841         cancel_delayed_work_sync(&hdev->cmd_timer);
2842         cancel_delayed_work_sync(&hdev->ncmd_timer);
2843         atomic_set(&hdev->cmd_cnt, 1);
2844
2845         hci_cmd_sync_cancel_sync(hdev, err);
2846 }
2847
2848 /* Suspend HCI device */
2849 int hci_suspend_dev(struct hci_dev *hdev)
2850 {
2851         int ret;
2852
2853         bt_dev_dbg(hdev, "");
2854
2855         /* Suspend should only act on when powered. */
2856         if (!hdev_is_powered(hdev) ||
2857             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2858                 return 0;
2859
2860         /* If powering down don't attempt to suspend */
2861         if (mgmt_powering_down(hdev))
2862                 return 0;
2863
2864         /* Cancel potentially blocking sync operation before suspend */
2865         hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2866
2867         hci_req_sync_lock(hdev);
2868         ret = hci_suspend_sync(hdev);
2869         hci_req_sync_unlock(hdev);
2870
2871         hci_clear_wake_reason(hdev);
2872         mgmt_suspending(hdev, hdev->suspend_state);
2873
2874         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2875         return ret;
2876 }
2877 EXPORT_SYMBOL(hci_suspend_dev);
2878
2879 /* Resume HCI device */
2880 int hci_resume_dev(struct hci_dev *hdev)
2881 {
2882         int ret;
2883
2884         bt_dev_dbg(hdev, "");
2885
2886         /* Resume should only act on when powered. */
2887         if (!hdev_is_powered(hdev) ||
2888             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2889                 return 0;
2890
2891         /* If powering down don't attempt to resume */
2892         if (mgmt_powering_down(hdev))
2893                 return 0;
2894
2895         hci_req_sync_lock(hdev);
2896         ret = hci_resume_sync(hdev);
2897         hci_req_sync_unlock(hdev);
2898
2899         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2900                       hdev->wake_addr_type);
2901
2902         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2903         return ret;
2904 }
2905 EXPORT_SYMBOL(hci_resume_dev);
2906
2907 /* Reset HCI device */
2908 int hci_reset_dev(struct hci_dev *hdev)
2909 {
2910         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2911         struct sk_buff *skb;
2912
2913         skb = bt_skb_alloc(3, GFP_ATOMIC);
2914         if (!skb)
2915                 return -ENOMEM;
2916
2917         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2918         skb_put_data(skb, hw_err, 3);
2919
2920         bt_dev_err(hdev, "Injecting HCI hardware error event");
2921
2922         /* Send Hardware Error to upper stack */
2923         return hci_recv_frame(hdev, skb);
2924 }
2925 EXPORT_SYMBOL(hci_reset_dev);
2926
2927 /* Receive frame from HCI drivers */
2928 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2929 {
2930         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2931                       && !test_bit(HCI_INIT, &hdev->flags))) {
2932                 kfree_skb(skb);
2933                 return -ENXIO;
2934         }
2935
2936         switch (hci_skb_pkt_type(skb)) {
2937         case HCI_EVENT_PKT:
2938                 break;
2939         case HCI_ACLDATA_PKT:
2940                 /* Detect if ISO packet has been sent as ACL */
2941                 if (hci_conn_num(hdev, ISO_LINK)) {
2942                         __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2943                         __u8 type;
2944
2945                         type = hci_conn_lookup_type(hdev, hci_handle(handle));
2946                         if (type == ISO_LINK)
2947                                 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2948                 }
2949                 break;
2950         case HCI_SCODATA_PKT:
2951                 break;
2952         case HCI_ISODATA_PKT:
2953                 break;
2954         default:
2955                 kfree_skb(skb);
2956                 return -EINVAL;
2957         }
2958
2959         /* Incoming skb */
2960         bt_cb(skb)->incoming = 1;
2961
2962         /* Time stamp */
2963         __net_timestamp(skb);
2964
2965         skb_queue_tail(&hdev->rx_q, skb);
2966         queue_work(hdev->workqueue, &hdev->rx_work);
2967
2968         return 0;
2969 }
2970 EXPORT_SYMBOL(hci_recv_frame);
2971
2972 /* Receive diagnostic message from HCI drivers */
2973 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2974 {
2975         /* Mark as diagnostic packet */
2976         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2977
2978         /* Time stamp */
2979         __net_timestamp(skb);
2980
2981         skb_queue_tail(&hdev->rx_q, skb);
2982         queue_work(hdev->workqueue, &hdev->rx_work);
2983
2984         return 0;
2985 }
2986 EXPORT_SYMBOL(hci_recv_diag);
2987
2988 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2989 {
2990         va_list vargs;
2991
2992         va_start(vargs, fmt);
2993         kfree_const(hdev->hw_info);
2994         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2995         va_end(vargs);
2996 }
2997 EXPORT_SYMBOL(hci_set_hw_info);
2998
2999 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3000 {
3001         va_list vargs;
3002
3003         va_start(vargs, fmt);
3004         kfree_const(hdev->fw_info);
3005         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3006         va_end(vargs);
3007 }
3008 EXPORT_SYMBOL(hci_set_fw_info);
3009
3010 /* ---- Interface to upper protocols ---- */
3011
3012 int hci_register_cb(struct hci_cb *cb)
3013 {
3014         BT_DBG("%p name %s", cb, cb->name);
3015
3016         mutex_lock(&hci_cb_list_lock);
3017         list_add_tail(&cb->list, &hci_cb_list);
3018         mutex_unlock(&hci_cb_list_lock);
3019
3020         return 0;
3021 }
3022 EXPORT_SYMBOL(hci_register_cb);
3023
3024 int hci_unregister_cb(struct hci_cb *cb)
3025 {
3026         BT_DBG("%p name %s", cb, cb->name);
3027
3028         mutex_lock(&hci_cb_list_lock);
3029         list_del(&cb->list);
3030         mutex_unlock(&hci_cb_list_lock);
3031
3032         return 0;
3033 }
3034 EXPORT_SYMBOL(hci_unregister_cb);
3035
3036 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3037 {
3038         int err;
3039
3040         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3041                skb->len);
3042
3043         /* Time stamp */
3044         __net_timestamp(skb);
3045
3046         /* Send copy to monitor */
3047         hci_send_to_monitor(hdev, skb);
3048
3049         if (atomic_read(&hdev->promisc)) {
3050                 /* Send copy to the sockets */
3051                 hci_send_to_sock(hdev, skb);
3052         }
3053
3054         /* Get rid of skb owner, prior to sending to the driver. */
3055         skb_orphan(skb);
3056
3057         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3058                 kfree_skb(skb);
3059                 return -EINVAL;
3060         }
3061
3062         err = hdev->send(hdev, skb);
3063         if (err < 0) {
3064                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3065                 kfree_skb(skb);
3066                 return err;
3067         }
3068
3069         return 0;
3070 }
3071
3072 /* Send HCI command */
3073 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3074                  const void *param)
3075 {
3076         struct sk_buff *skb;
3077
3078         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3079
3080         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3081         if (!skb) {
3082                 bt_dev_err(hdev, "no memory for command");
3083                 return -ENOMEM;
3084         }
3085
3086         /* Stand-alone HCI commands must be flagged as
3087          * single-command requests.
3088          */
3089         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3090
3091         skb_queue_tail(&hdev->cmd_q, skb);
3092         queue_work(hdev->workqueue, &hdev->cmd_work);
3093
3094         return 0;
3095 }
3096
3097 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3098                    const void *param)
3099 {
3100         struct sk_buff *skb;
3101
3102         if (hci_opcode_ogf(opcode) != 0x3f) {
3103                 /* A controller receiving a command shall respond with either
3104                  * a Command Status Event or a Command Complete Event.
3105                  * Therefore, all standard HCI commands must be sent via the
3106                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3107                  * Some vendors do not comply with this rule for vendor-specific
3108                  * commands and do not return any event. We want to support
3109                  * unresponded commands for such cases only.
3110                  */
3111                 bt_dev_err(hdev, "unresponded command not supported");
3112                 return -EINVAL;
3113         }
3114
3115         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3116         if (!skb) {
3117                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3118                            opcode);
3119                 return -ENOMEM;
3120         }
3121
3122         hci_send_frame(hdev, skb);
3123
3124         return 0;
3125 }
3126 EXPORT_SYMBOL(__hci_cmd_send);
3127
3128 /* Get data from the previously sent command */
3129 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3130 {
3131         struct hci_command_hdr *hdr;
3132
3133         if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3134                 return NULL;
3135
3136         hdr = (void *)skb->data;
3137
3138         if (hdr->opcode != cpu_to_le16(opcode))
3139                 return NULL;
3140
3141         return skb->data + HCI_COMMAND_HDR_SIZE;
3142 }
3143
3144 /* Get data from the previously sent command */
3145 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3146 {
3147         void *data;
3148
3149         /* Check if opcode matches last sent command */
3150         data = hci_cmd_data(hdev->sent_cmd, opcode);
3151         if (!data)
3152                 /* Check if opcode matches last request */
3153                 data = hci_cmd_data(hdev->req_skb, opcode);
3154
3155         return data;
3156 }
3157
3158 /* Get data from last received event */
3159 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3160 {
3161         struct hci_event_hdr *hdr;
3162         int offset;
3163
3164         if (!hdev->recv_event)
3165                 return NULL;
3166
3167         hdr = (void *)hdev->recv_event->data;
3168         offset = sizeof(*hdr);
3169
3170         if (hdr->evt != event) {
3171                 /* In case of LE metaevent check the subevent match */
3172                 if (hdr->evt == HCI_EV_LE_META) {
3173                         struct hci_ev_le_meta *ev;
3174
3175                         ev = (void *)hdev->recv_event->data + offset;
3176                         offset += sizeof(*ev);
3177                         if (ev->subevent == event)
3178                                 goto found;
3179                 }
3180                 return NULL;
3181         }
3182
3183 found:
3184         bt_dev_dbg(hdev, "event 0x%2.2x", event);
3185
3186         return hdev->recv_event->data + offset;
3187 }
3188
3189 /* Send ACL data */
3190 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3191 {
3192         struct hci_acl_hdr *hdr;
3193         int len = skb->len;
3194
3195         skb_push(skb, HCI_ACL_HDR_SIZE);
3196         skb_reset_transport_header(skb);
3197         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3198         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3199         hdr->dlen   = cpu_to_le16(len);
3200 }
3201
3202 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3203                           struct sk_buff *skb, __u16 flags)
3204 {
3205         struct hci_conn *conn = chan->conn;
3206         struct hci_dev *hdev = conn->hdev;
3207         struct sk_buff *list;
3208
3209         skb->len = skb_headlen(skb);
3210         skb->data_len = 0;
3211
3212         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3213
3214         switch (hdev->dev_type) {
3215         case HCI_PRIMARY:
3216                 hci_add_acl_hdr(skb, conn->handle, flags);
3217                 break;
3218         case HCI_AMP:
3219                 hci_add_acl_hdr(skb, chan->handle, flags);
3220                 break;
3221         default:
3222                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3223                 return;
3224         }
3225
3226         list = skb_shinfo(skb)->frag_list;
3227         if (!list) {
3228                 /* Non fragmented */
3229                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3230
3231                 skb_queue_tail(queue, skb);
3232         } else {
3233                 /* Fragmented */
3234                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3235
3236                 skb_shinfo(skb)->frag_list = NULL;
3237
3238                 /* Queue all fragments atomically. We need to use spin_lock_bh
3239                  * here because of 6LoWPAN links, as there this function is
3240                  * called from softirq and using normal spin lock could cause
3241                  * deadlocks.
3242                  */
3243                 spin_lock_bh(&queue->lock);
3244
3245                 __skb_queue_tail(queue, skb);
3246
3247                 flags &= ~ACL_START;
3248                 flags |= ACL_CONT;
3249                 do {
3250                         skb = list; list = list->next;
3251
3252                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3253                         hci_add_acl_hdr(skb, conn->handle, flags);
3254
3255                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3256
3257                         __skb_queue_tail(queue, skb);
3258                 } while (list);
3259
3260                 spin_unlock_bh(&queue->lock);
3261         }
3262 }
3263
3264 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3265 {
3266         struct hci_dev *hdev = chan->conn->hdev;
3267
3268         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3269
3270         hci_queue_acl(chan, &chan->data_q, skb, flags);
3271
3272         queue_work(hdev->workqueue, &hdev->tx_work);
3273 }
3274
3275 /* Send SCO data */
3276 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3277 {
3278         struct hci_dev *hdev = conn->hdev;
3279         struct hci_sco_hdr hdr;
3280
3281         BT_DBG("%s len %d", hdev->name, skb->len);
3282
3283         hdr.handle = cpu_to_le16(conn->handle);
3284         hdr.dlen   = skb->len;
3285
3286         skb_push(skb, HCI_SCO_HDR_SIZE);
3287         skb_reset_transport_header(skb);
3288         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3289
3290         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3291
3292         skb_queue_tail(&conn->data_q, skb);
3293         queue_work(hdev->workqueue, &hdev->tx_work);
3294 }
3295
3296 /* Send ISO data */
3297 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3298 {
3299         struct hci_iso_hdr *hdr;
3300         int len = skb->len;
3301
3302         skb_push(skb, HCI_ISO_HDR_SIZE);
3303         skb_reset_transport_header(skb);
3304         hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3305         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3306         hdr->dlen   = cpu_to_le16(len);
3307 }
3308
3309 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3310                           struct sk_buff *skb)
3311 {
3312         struct hci_dev *hdev = conn->hdev;
3313         struct sk_buff *list;
3314         __u16 flags;
3315
3316         skb->len = skb_headlen(skb);
3317         skb->data_len = 0;
3318
3319         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3320
3321         list = skb_shinfo(skb)->frag_list;
3322
3323         flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3324         hci_add_iso_hdr(skb, conn->handle, flags);
3325
3326         if (!list) {
3327                 /* Non fragmented */
3328                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3329
3330                 skb_queue_tail(queue, skb);
3331         } else {
3332                 /* Fragmented */
3333                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3334
3335                 skb_shinfo(skb)->frag_list = NULL;
3336
3337                 __skb_queue_tail(queue, skb);
3338
3339                 do {
3340                         skb = list; list = list->next;
3341
3342                         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3343                         flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3344                                                    0x00);
3345                         hci_add_iso_hdr(skb, conn->handle, flags);
3346
3347                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3348
3349                         __skb_queue_tail(queue, skb);
3350                 } while (list);
3351         }
3352 }
3353
3354 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3355 {
3356         struct hci_dev *hdev = conn->hdev;
3357
3358         BT_DBG("%s len %d", hdev->name, skb->len);
3359
3360         hci_queue_iso(conn, &conn->data_q, skb);
3361
3362         queue_work(hdev->workqueue, &hdev->tx_work);
3363 }
3364
3365 /* ---- HCI TX task (outgoing data) ---- */
3366
3367 /* HCI Connection scheduler */
3368 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3369 {
3370         struct hci_dev *hdev;
3371         int cnt, q;
3372
3373         if (!conn) {
3374                 *quote = 0;
3375                 return;
3376         }
3377
3378         hdev = conn->hdev;
3379
3380         switch (conn->type) {
3381         case ACL_LINK:
3382                 cnt = hdev->acl_cnt;
3383                 break;
3384         case AMP_LINK:
3385                 cnt = hdev->block_cnt;
3386                 break;
3387         case SCO_LINK:
3388         case ESCO_LINK:
3389                 cnt = hdev->sco_cnt;
3390                 break;
3391         case LE_LINK:
3392                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3393                 break;
3394         case ISO_LINK:
3395                 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3396                         hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3397                 break;
3398         default:
3399                 cnt = 0;
3400                 bt_dev_err(hdev, "unknown link type %d", conn->type);
3401         }
3402
3403         q = cnt / num;
3404         *quote = q ? q : 1;
3405 }
3406
3407 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3408                                      int *quote)
3409 {
3410         struct hci_conn_hash *h = &hdev->conn_hash;
3411         struct hci_conn *conn = NULL, *c;
3412         unsigned int num = 0, min = ~0;
3413
3414         /* We don't have to lock device here. Connections are always
3415          * added and removed with TX task disabled. */
3416
3417         rcu_read_lock();
3418
3419         list_for_each_entry_rcu(c, &h->list, list) {
3420                 if (c->type != type || skb_queue_empty(&c->data_q))
3421                         continue;
3422
3423                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3424                         continue;
3425
3426                 num++;
3427
3428                 if (c->sent < min) {
3429                         min  = c->sent;
3430                         conn = c;
3431                 }
3432
3433                 if (hci_conn_num(hdev, type) == num)
3434                         break;
3435         }
3436
3437         rcu_read_unlock();
3438
3439         hci_quote_sent(conn, num, quote);
3440
3441         BT_DBG("conn %p quote %d", conn, *quote);
3442         return conn;
3443 }
3444
3445 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3446 {
3447         struct hci_conn_hash *h = &hdev->conn_hash;
3448         struct hci_conn *c;
3449
3450         bt_dev_err(hdev, "link tx timeout");
3451
3452         rcu_read_lock();
3453
3454         /* Kill stalled connections */
3455         list_for_each_entry_rcu(c, &h->list, list) {
3456                 if (c->type == type && c->sent) {
3457                         bt_dev_err(hdev, "killing stalled connection %pMR",
3458                                    &c->dst);
3459                         /* hci_disconnect might sleep, so, we have to release
3460                          * the RCU read lock before calling it.
3461                          */
3462                         rcu_read_unlock();
3463                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3464                         rcu_read_lock();
3465                 }
3466         }
3467
3468         rcu_read_unlock();
3469 }
3470
3471 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3472                                       int *quote)
3473 {
3474         struct hci_conn_hash *h = &hdev->conn_hash;
3475         struct hci_chan *chan = NULL;
3476         unsigned int num = 0, min = ~0, cur_prio = 0;
3477         struct hci_conn *conn;
3478         int conn_num = 0;
3479
3480         BT_DBG("%s", hdev->name);
3481
3482         rcu_read_lock();
3483
3484         list_for_each_entry_rcu(conn, &h->list, list) {
3485                 struct hci_chan *tmp;
3486
3487                 if (conn->type != type)
3488                         continue;
3489
3490                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3491                         continue;
3492
3493                 conn_num++;
3494
3495                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3496                         struct sk_buff *skb;
3497
3498                         if (skb_queue_empty(&tmp->data_q))
3499                                 continue;
3500
3501                         skb = skb_peek(&tmp->data_q);
3502                         if (skb->priority < cur_prio)
3503                                 continue;
3504
3505                         if (skb->priority > cur_prio) {
3506                                 num = 0;
3507                                 min = ~0;
3508                                 cur_prio = skb->priority;
3509                         }
3510
3511                         num++;
3512
3513                         if (conn->sent < min) {
3514                                 min  = conn->sent;
3515                                 chan = tmp;
3516                         }
3517                 }
3518
3519                 if (hci_conn_num(hdev, type) == conn_num)
3520                         break;
3521         }
3522
3523         rcu_read_unlock();
3524
3525         if (!chan)
3526                 return NULL;
3527
3528         hci_quote_sent(chan->conn, num, quote);
3529
3530         BT_DBG("chan %p quote %d", chan, *quote);
3531         return chan;
3532 }
3533
3534 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3535 {
3536         struct hci_conn_hash *h = &hdev->conn_hash;
3537         struct hci_conn *conn;
3538         int num = 0;
3539
3540         BT_DBG("%s", hdev->name);
3541
3542         rcu_read_lock();
3543
3544         list_for_each_entry_rcu(conn, &h->list, list) {
3545                 struct hci_chan *chan;
3546
3547                 if (conn->type != type)
3548                         continue;
3549
3550                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3551                         continue;
3552
3553                 num++;
3554
3555                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3556                         struct sk_buff *skb;
3557
3558                         if (chan->sent) {
3559                                 chan->sent = 0;
3560                                 continue;
3561                         }
3562
3563                         if (skb_queue_empty(&chan->data_q))
3564                                 continue;
3565
3566                         skb = skb_peek(&chan->data_q);
3567                         if (skb->priority >= HCI_PRIO_MAX - 1)
3568                                 continue;
3569
3570                         skb->priority = HCI_PRIO_MAX - 1;
3571
3572                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3573                                skb->priority);
3574                 }
3575
3576                 if (hci_conn_num(hdev, type) == num)
3577                         break;
3578         }
3579
3580         rcu_read_unlock();
3581
3582 }
3583
3584 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3585 {
3586         /* Calculate count of blocks used by this packet */
3587         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3588 }
3589
3590 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3591 {
3592         unsigned long last_tx;
3593
3594         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3595                 return;
3596
3597         switch (type) {
3598         case LE_LINK:
3599                 last_tx = hdev->le_last_tx;
3600                 break;
3601         default:
3602                 last_tx = hdev->acl_last_tx;
3603                 break;
3604         }
3605
3606         /* tx timeout must be longer than maximum link supervision timeout
3607          * (40.9 seconds)
3608          */
3609         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3610                 hci_link_tx_to(hdev, type);
3611 }
3612
3613 /* Schedule SCO */
3614 static void hci_sched_sco(struct hci_dev *hdev)
3615 {
3616         struct hci_conn *conn;
3617         struct sk_buff *skb;
3618         int quote;
3619
3620         BT_DBG("%s", hdev->name);
3621
3622         if (!hci_conn_num(hdev, SCO_LINK))
3623                 return;
3624
3625         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3626                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3627                         BT_DBG("skb %p len %d", skb, skb->len);
3628                         hci_send_frame(hdev, skb);
3629
3630                         conn->sent++;
3631                         if (conn->sent == ~0)
3632                                 conn->sent = 0;
3633                 }
3634         }
3635 }
3636
3637 static void hci_sched_esco(struct hci_dev *hdev)
3638 {
3639         struct hci_conn *conn;
3640         struct sk_buff *skb;
3641         int quote;
3642
3643         BT_DBG("%s", hdev->name);
3644
3645         if (!hci_conn_num(hdev, ESCO_LINK))
3646                 return;
3647
3648         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3649                                                      &quote))) {
3650                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3651                         BT_DBG("skb %p len %d", skb, skb->len);
3652                         hci_send_frame(hdev, skb);
3653
3654                         conn->sent++;
3655                         if (conn->sent == ~0)
3656                                 conn->sent = 0;
3657                 }
3658         }
3659 }
3660
3661 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3662 {
3663         unsigned int cnt = hdev->acl_cnt;
3664         struct hci_chan *chan;
3665         struct sk_buff *skb;
3666         int quote;
3667
3668         __check_timeout(hdev, cnt, ACL_LINK);
3669
3670         while (hdev->acl_cnt &&
3671                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3672                 u32 priority = (skb_peek(&chan->data_q))->priority;
3673                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3674                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3675                                skb->len, skb->priority);
3676
3677                         /* Stop if priority has changed */
3678                         if (skb->priority < priority)
3679                                 break;
3680
3681                         skb = skb_dequeue(&chan->data_q);
3682
3683                         hci_conn_enter_active_mode(chan->conn,
3684                                                    bt_cb(skb)->force_active);
3685
3686                         hci_send_frame(hdev, skb);
3687                         hdev->acl_last_tx = jiffies;
3688
3689                         hdev->acl_cnt--;
3690                         chan->sent++;
3691                         chan->conn->sent++;
3692
3693                         /* Send pending SCO packets right away */
3694                         hci_sched_sco(hdev);
3695                         hci_sched_esco(hdev);
3696                 }
3697         }
3698
3699         if (cnt != hdev->acl_cnt)
3700                 hci_prio_recalculate(hdev, ACL_LINK);
3701 }
3702
3703 static void hci_sched_acl_blk(struct hci_dev *hdev)
3704 {
3705         unsigned int cnt = hdev->block_cnt;
3706         struct hci_chan *chan;
3707         struct sk_buff *skb;
3708         int quote;
3709         u8 type;
3710
3711         BT_DBG("%s", hdev->name);
3712
3713         if (hdev->dev_type == HCI_AMP)
3714                 type = AMP_LINK;
3715         else
3716                 type = ACL_LINK;
3717
3718         __check_timeout(hdev, cnt, type);
3719
3720         while (hdev->block_cnt > 0 &&
3721                (chan = hci_chan_sent(hdev, type, &quote))) {
3722                 u32 priority = (skb_peek(&chan->data_q))->priority;
3723                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3724                         int blocks;
3725
3726                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3727                                skb->len, skb->priority);
3728
3729                         /* Stop if priority has changed */
3730                         if (skb->priority < priority)
3731                                 break;
3732
3733                         skb = skb_dequeue(&chan->data_q);
3734
3735                         blocks = __get_blocks(hdev, skb);
3736                         if (blocks > hdev->block_cnt)
3737                                 return;
3738
3739                         hci_conn_enter_active_mode(chan->conn,
3740                                                    bt_cb(skb)->force_active);
3741
3742                         hci_send_frame(hdev, skb);
3743                         hdev->acl_last_tx = jiffies;
3744
3745                         hdev->block_cnt -= blocks;
3746                         quote -= blocks;
3747
3748                         chan->sent += blocks;
3749                         chan->conn->sent += blocks;
3750                 }
3751         }
3752
3753         if (cnt != hdev->block_cnt)
3754                 hci_prio_recalculate(hdev, type);
3755 }
3756
3757 static void hci_sched_acl(struct hci_dev *hdev)
3758 {
3759         BT_DBG("%s", hdev->name);
3760
3761         /* No ACL link over BR/EDR controller */
3762         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3763                 return;
3764
3765         /* No AMP link over AMP controller */
3766         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3767                 return;
3768
3769         switch (hdev->flow_ctl_mode) {
3770         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3771                 hci_sched_acl_pkt(hdev);
3772                 break;
3773
3774         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3775                 hci_sched_acl_blk(hdev);
3776                 break;
3777         }
3778 }
3779
3780 static void hci_sched_le(struct hci_dev *hdev)
3781 {
3782         struct hci_chan *chan;
3783         struct sk_buff *skb;
3784         int quote, cnt, tmp;
3785
3786         BT_DBG("%s", hdev->name);
3787
3788         if (!hci_conn_num(hdev, LE_LINK))
3789                 return;
3790
3791         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3792
3793         __check_timeout(hdev, cnt, LE_LINK);
3794
3795         tmp = cnt;
3796         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3797                 u32 priority = (skb_peek(&chan->data_q))->priority;
3798                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3799                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3800                                skb->len, skb->priority);
3801
3802                         /* Stop if priority has changed */
3803                         if (skb->priority < priority)
3804                                 break;
3805
3806                         skb = skb_dequeue(&chan->data_q);
3807
3808                         hci_send_frame(hdev, skb);
3809                         hdev->le_last_tx = jiffies;
3810
3811                         cnt--;
3812                         chan->sent++;
3813                         chan->conn->sent++;
3814
3815                         /* Send pending SCO packets right away */
3816                         hci_sched_sco(hdev);
3817                         hci_sched_esco(hdev);
3818                 }
3819         }
3820
3821         if (hdev->le_pkts)
3822                 hdev->le_cnt = cnt;
3823         else
3824                 hdev->acl_cnt = cnt;
3825
3826         if (cnt != tmp)
3827                 hci_prio_recalculate(hdev, LE_LINK);
3828 }
3829
3830 /* Schedule CIS */
3831 static void hci_sched_iso(struct hci_dev *hdev)
3832 {
3833         struct hci_conn *conn;
3834         struct sk_buff *skb;
3835         int quote, *cnt;
3836
3837         BT_DBG("%s", hdev->name);
3838
3839         if (!hci_conn_num(hdev, ISO_LINK))
3840                 return;
3841
3842         cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3843                 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3844         while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3845                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3846                         BT_DBG("skb %p len %d", skb, skb->len);
3847                         hci_send_frame(hdev, skb);
3848
3849                         conn->sent++;
3850                         if (conn->sent == ~0)
3851                                 conn->sent = 0;
3852                         (*cnt)--;
3853                 }
3854         }
3855 }
3856
3857 static void hci_tx_work(struct work_struct *work)
3858 {
3859         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3860         struct sk_buff *skb;
3861
3862         BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3863                hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3864
3865         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3866                 /* Schedule queues and send stuff to HCI driver */
3867                 hci_sched_sco(hdev);
3868                 hci_sched_esco(hdev);
3869                 hci_sched_iso(hdev);
3870                 hci_sched_acl(hdev);
3871                 hci_sched_le(hdev);
3872         }
3873
3874         /* Send next queued raw (unknown type) packet */
3875         while ((skb = skb_dequeue(&hdev->raw_q)))
3876                 hci_send_frame(hdev, skb);
3877 }
3878
3879 /* ----- HCI RX task (incoming data processing) ----- */
3880
3881 /* ACL data packet */
3882 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3883 {
3884         struct hci_acl_hdr *hdr = (void *) skb->data;
3885         struct hci_conn *conn;
3886         __u16 handle, flags;
3887
3888         skb_pull(skb, HCI_ACL_HDR_SIZE);
3889
3890         handle = __le16_to_cpu(hdr->handle);
3891         flags  = hci_flags(handle);
3892         handle = hci_handle(handle);
3893
3894         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3895                handle, flags);
3896
3897         hdev->stat.acl_rx++;
3898
3899         hci_dev_lock(hdev);
3900         conn = hci_conn_hash_lookup_handle(hdev, handle);
3901         hci_dev_unlock(hdev);
3902
3903         if (conn) {
3904                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3905
3906                 /* Send to upper protocol */
3907                 l2cap_recv_acldata(conn, skb, flags);
3908                 return;
3909         } else {
3910                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3911                            handle);
3912         }
3913
3914         kfree_skb(skb);
3915 }
3916
3917 /* SCO data packet */
3918 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3919 {
3920         struct hci_sco_hdr *hdr = (void *) skb->data;
3921         struct hci_conn *conn;
3922         __u16 handle, flags;
3923
3924         skb_pull(skb, HCI_SCO_HDR_SIZE);
3925
3926         handle = __le16_to_cpu(hdr->handle);
3927         flags  = hci_flags(handle);
3928         handle = hci_handle(handle);
3929
3930         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3931                handle, flags);
3932
3933         hdev->stat.sco_rx++;
3934
3935         hci_dev_lock(hdev);
3936         conn = hci_conn_hash_lookup_handle(hdev, handle);
3937         hci_dev_unlock(hdev);
3938
3939         if (conn) {
3940                 /* Send to upper protocol */
3941                 hci_skb_pkt_status(skb) = flags & 0x03;
3942                 sco_recv_scodata(conn, skb);
3943                 return;
3944         } else {
3945                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3946                                        handle);
3947         }
3948
3949         kfree_skb(skb);
3950 }
3951
3952 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3953 {
3954         struct hci_iso_hdr *hdr;
3955         struct hci_conn *conn;
3956         __u16 handle, flags;
3957
3958         hdr = skb_pull_data(skb, sizeof(*hdr));
3959         if (!hdr) {
3960                 bt_dev_err(hdev, "ISO packet too small");
3961                 goto drop;
3962         }
3963
3964         handle = __le16_to_cpu(hdr->handle);
3965         flags  = hci_flags(handle);
3966         handle = hci_handle(handle);
3967
3968         bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3969                    handle, flags);
3970
3971         hci_dev_lock(hdev);
3972         conn = hci_conn_hash_lookup_handle(hdev, handle);
3973         hci_dev_unlock(hdev);
3974
3975         if (!conn) {
3976                 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3977                            handle);
3978                 goto drop;
3979         }
3980
3981         /* Send to upper protocol */
3982         iso_recv(conn, skb, flags);
3983         return;
3984
3985 drop:
3986         kfree_skb(skb);
3987 }
3988
3989 static bool hci_req_is_complete(struct hci_dev *hdev)
3990 {
3991         struct sk_buff *skb;
3992
3993         skb = skb_peek(&hdev->cmd_q);
3994         if (!skb)
3995                 return true;
3996
3997         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3998 }
3999
4000 static void hci_resend_last(struct hci_dev *hdev)
4001 {
4002         struct hci_command_hdr *sent;
4003         struct sk_buff *skb;
4004         u16 opcode;
4005
4006         if (!hdev->sent_cmd)
4007                 return;
4008
4009         sent = (void *) hdev->sent_cmd->data;
4010         opcode = __le16_to_cpu(sent->opcode);
4011         if (opcode == HCI_OP_RESET)
4012                 return;
4013
4014         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4015         if (!skb)
4016                 return;
4017
4018         skb_queue_head(&hdev->cmd_q, skb);
4019         queue_work(hdev->workqueue, &hdev->cmd_work);
4020 }
4021
4022 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4023                           hci_req_complete_t *req_complete,
4024                           hci_req_complete_skb_t *req_complete_skb)
4025 {
4026         struct sk_buff *skb;
4027         unsigned long flags;
4028
4029         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4030
4031         /* If the completed command doesn't match the last one that was
4032          * sent we need to do special handling of it.
4033          */
4034         if (!hci_sent_cmd_data(hdev, opcode)) {
4035                 /* Some CSR based controllers generate a spontaneous
4036                  * reset complete event during init and any pending
4037                  * command will never be completed. In such a case we
4038                  * need to resend whatever was the last sent
4039                  * command.
4040                  */
4041                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4042                         hci_resend_last(hdev);
4043
4044                 return;
4045         }
4046
4047         /* If we reach this point this event matches the last command sent */
4048         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4049
4050         /* If the command succeeded and there's still more commands in
4051          * this request the request is not yet complete.
4052          */
4053         if (!status && !hci_req_is_complete(hdev))
4054                 return;
4055
4056         skb = hdev->req_skb;
4057
4058         /* If this was the last command in a request the complete
4059          * callback would be found in hdev->req_skb instead of the
4060          * command queue (hdev->cmd_q).
4061          */
4062         if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
4063                 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4064                 return;
4065         }
4066
4067         if (skb && bt_cb(skb)->hci.req_complete) {
4068                 *req_complete = bt_cb(skb)->hci.req_complete;
4069                 return;
4070         }
4071
4072         /* Remove all pending commands belonging to this request */
4073         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4074         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4075                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4076                         __skb_queue_head(&hdev->cmd_q, skb);
4077                         break;
4078                 }
4079
4080                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4081                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4082                 else
4083                         *req_complete = bt_cb(skb)->hci.req_complete;
4084                 dev_kfree_skb_irq(skb);
4085         }
4086         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4087 }
4088
4089 static void hci_rx_work(struct work_struct *work)
4090 {
4091         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4092         struct sk_buff *skb;
4093
4094         BT_DBG("%s", hdev->name);
4095
4096         /* The kcov_remote functions used for collecting packet parsing
4097          * coverage information from this background thread and associate
4098          * the coverage with the syscall's thread which originally injected
4099          * the packet. This helps fuzzing the kernel.
4100          */
4101         for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4102                 kcov_remote_start_common(skb_get_kcov_handle(skb));
4103
4104                 /* Send copy to monitor */
4105                 hci_send_to_monitor(hdev, skb);
4106
4107                 if (atomic_read(&hdev->promisc)) {
4108                         /* Send copy to the sockets */
4109                         hci_send_to_sock(hdev, skb);
4110                 }
4111
4112                 /* If the device has been opened in HCI_USER_CHANNEL,
4113                  * the userspace has exclusive access to device.
4114                  * When device is HCI_INIT, we still need to process
4115                  * the data packets to the driver in order
4116                  * to complete its setup().
4117                  */
4118                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4119                     !test_bit(HCI_INIT, &hdev->flags)) {
4120                         kfree_skb(skb);
4121                         continue;
4122                 }
4123
4124                 if (test_bit(HCI_INIT, &hdev->flags)) {
4125                         /* Don't process data packets in this states. */
4126                         switch (hci_skb_pkt_type(skb)) {
4127                         case HCI_ACLDATA_PKT:
4128                         case HCI_SCODATA_PKT:
4129                         case HCI_ISODATA_PKT:
4130                                 kfree_skb(skb);
4131                                 continue;
4132                         }
4133                 }
4134
4135                 /* Process frame */
4136                 switch (hci_skb_pkt_type(skb)) {
4137                 case HCI_EVENT_PKT:
4138                         BT_DBG("%s Event packet", hdev->name);
4139                         hci_event_packet(hdev, skb);
4140                         break;
4141
4142                 case HCI_ACLDATA_PKT:
4143                         BT_DBG("%s ACL data packet", hdev->name);
4144                         hci_acldata_packet(hdev, skb);
4145                         break;
4146
4147                 case HCI_SCODATA_PKT:
4148                         BT_DBG("%s SCO data packet", hdev->name);
4149                         hci_scodata_packet(hdev, skb);
4150                         break;
4151
4152                 case HCI_ISODATA_PKT:
4153                         BT_DBG("%s ISO data packet", hdev->name);
4154                         hci_isodata_packet(hdev, skb);
4155                         break;
4156
4157                 default:
4158                         kfree_skb(skb);
4159                         break;
4160                 }
4161         }
4162 }
4163
4164 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4165 {
4166         int err;
4167
4168         bt_dev_dbg(hdev, "skb %p", skb);
4169
4170         kfree_skb(hdev->sent_cmd);
4171
4172         hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4173         if (!hdev->sent_cmd) {
4174                 skb_queue_head(&hdev->cmd_q, skb);
4175                 queue_work(hdev->workqueue, &hdev->cmd_work);
4176                 return;
4177         }
4178
4179         err = hci_send_frame(hdev, skb);
4180         if (err < 0) {
4181                 hci_cmd_sync_cancel_sync(hdev, -err);
4182                 return;
4183         }
4184
4185         if (hci_req_status_pend(hdev) &&
4186             !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4187                 kfree_skb(hdev->req_skb);
4188                 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4189         }
4190
4191         atomic_dec(&hdev->cmd_cnt);
4192 }
4193
4194 static void hci_cmd_work(struct work_struct *work)
4195 {
4196         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4197         struct sk_buff *skb;
4198
4199         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4200                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4201
4202         /* Send queued commands */
4203         if (atomic_read(&hdev->cmd_cnt)) {
4204                 skb = skb_dequeue(&hdev->cmd_q);
4205                 if (!skb)
4206                         return;
4207
4208                 hci_send_cmd_sync(hdev, skb);
4209
4210                 rcu_read_lock();
4211                 if (test_bit(HCI_RESET, &hdev->flags) ||
4212                     hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4213                         cancel_delayed_work(&hdev->cmd_timer);
4214                 else
4215                         queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4216                                            HCI_CMD_TIMEOUT);
4217                 rcu_read_unlock();
4218         }
4219 }