GNU Linux-libre 5.15.137-gnu
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
52
53 /* HCI device list */
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
56
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
60
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         err = kstrtobool_from_user(user_buf, count, &enable);
90         if (err)
91                 return err;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         bool enable;
139         int err;
140
141         err = kstrtobool_from_user(user_buf, count, &enable);
142         if (err)
143                 return err;
144
145         /* When the diagnostic flags are not persistent and the transport
146          * is not active or in user channel operation, then there is no need
147          * for the vendor callback. Instead just store the desired value and
148          * the setting will be programmed when the controller gets powered on.
149          */
150         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151             (!test_bit(HCI_RUNNING, &hdev->flags) ||
152              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153                 goto done;
154
155         hci_req_sync_lock(hdev);
156         err = hdev->set_diag(hdev, enable);
157         hci_req_sync_unlock(hdev);
158
159         if (err < 0)
160                 return err;
161
162 done:
163         if (enable)
164                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165         else
166                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168         return count;
169 }
170
171 static const struct file_operations vendor_diag_fops = {
172         .open           = simple_open,
173         .read           = vendor_diag_read,
174         .write          = vendor_diag_write,
175         .llseek         = default_llseek,
176 };
177
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181                             &dut_mode_fops);
182
183         if (hdev->set_diag)
184                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185                                     &vendor_diag_fops);
186 }
187
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190         BT_DBG("%s %ld", req->hdev->name, opt);
191
192         /* Reset device */
193         set_bit(HCI_RESET, &req->hdev->flags);
194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
195         return 0;
196 }
197
198 static void bredr_init(struct hci_request *req)
199 {
200         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Read Local Supported Features */
203         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205         /* Read Local Version */
206         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208         /* Read BD Address */
209         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211
212 static void amp_init1(struct hci_request *req)
213 {
214         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216         /* Read Local Version */
217         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219         /* Read Local Supported Commands */
220         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222         /* Read Local AMP Info */
223         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225         /* Read Data Blk size */
226         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228         /* Read Flow Control Mode */
229         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231         /* Read Location Data */
232         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234
235 static int amp_init2(struct hci_request *req)
236 {
237         /* Read Local Supported Features. Not all AMP controllers
238          * support this so it's placed conditionally in the second
239          * stage init.
240          */
241         if (req->hdev->commands[14] & 0x20)
242                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244         return 0;
245 }
246
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249         struct hci_dev *hdev = req->hdev;
250
251         BT_DBG("%s %ld", hdev->name, opt);
252
253         /* Reset */
254         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255                 hci_reset_req(req, 0);
256
257         switch (hdev->dev_type) {
258         case HCI_PRIMARY:
259                 bredr_init(req);
260                 break;
261         case HCI_AMP:
262                 amp_init1(req);
263                 break;
264         default:
265                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266                 break;
267         }
268
269         return 0;
270 }
271
272 static void bredr_setup(struct hci_request *req)
273 {
274         __le16 param;
275         __u8 flt_type;
276
277         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280         /* Read Class of Device */
281         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283         /* Read Local Name */
284         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286         /* Read Voice Setting */
287         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289         /* Read Number of Supported IAC */
290         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292         /* Read Current IAC LAP */
293         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295         /* Clear Event Filters */
296         flt_type = HCI_FLT_CLEAR_ALL;
297         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299         /* Connection accept timeout ~20 secs */
300         param = cpu_to_le16(0x7d00);
301         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
302 }
303
304 static void le_setup(struct hci_request *req)
305 {
306         struct hci_dev *hdev = req->hdev;
307
308         /* Read LE Buffer Size */
309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311         /* Read LE Local Supported Features */
312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314         /* Read LE Supported States */
315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317         /* LE-only controllers have LE implicitly enabled */
318         if (!lmp_bredr_capable(hdev))
319                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321
322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324         struct hci_dev *hdev = req->hdev;
325
326         /* The second byte is 0xff instead of 0x9f (two reserved bits
327          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328          * command otherwise.
329          */
330         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333          * any event mask for pre 1.2 devices.
334          */
335         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336                 return;
337
338         if (lmp_bredr_capable(hdev)) {
339                 events[4] |= 0x01; /* Flow Specification Complete */
340         } else {
341                 /* Use a different default for LE-only devices */
342                 memset(events, 0, sizeof(events));
343                 events[1] |= 0x20; /* Command Complete */
344                 events[1] |= 0x40; /* Command Status */
345                 events[1] |= 0x80; /* Hardware Error */
346
347                 /* If the controller supports the Disconnect command, enable
348                  * the corresponding event. In addition enable packet flow
349                  * control related events.
350                  */
351                 if (hdev->commands[0] & 0x20) {
352                         events[0] |= 0x10; /* Disconnection Complete */
353                         events[2] |= 0x04; /* Number of Completed Packets */
354                         events[3] |= 0x02; /* Data Buffer Overflow */
355                 }
356
357                 /* If the controller supports the Read Remote Version
358                  * Information command, enable the corresponding event.
359                  */
360                 if (hdev->commands[2] & 0x80)
361                         events[1] |= 0x08; /* Read Remote Version Information
362                                             * Complete
363                                             */
364
365                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366                         events[0] |= 0x80; /* Encryption Change */
367                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
368                 }
369         }
370
371         if (lmp_inq_rssi_capable(hdev) ||
372             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373                 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
375         if (lmp_ext_feat_capable(hdev))
376                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378         if (lmp_esco_capable(hdev)) {
379                 events[5] |= 0x08; /* Synchronous Connection Complete */
380                 events[5] |= 0x10; /* Synchronous Connection Changed */
381         }
382
383         if (lmp_sniffsubr_capable(hdev))
384                 events[5] |= 0x20; /* Sniff Subrating */
385
386         if (lmp_pause_enc_capable(hdev))
387                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389         if (lmp_ext_inq_capable(hdev))
390                 events[5] |= 0x40; /* Extended Inquiry Result */
391
392         if (lmp_no_flush_capable(hdev))
393                 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395         if (lmp_lsto_capable(hdev))
396                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398         if (lmp_ssp_capable(hdev)) {
399                 events[6] |= 0x01;      /* IO Capability Request */
400                 events[6] |= 0x02;      /* IO Capability Response */
401                 events[6] |= 0x04;      /* User Confirmation Request */
402                 events[6] |= 0x08;      /* User Passkey Request */
403                 events[6] |= 0x10;      /* Remote OOB Data Request */
404                 events[6] |= 0x20;      /* Simple Pairing Complete */
405                 events[7] |= 0x04;      /* User Passkey Notification */
406                 events[7] |= 0x08;      /* Keypress Notification */
407                 events[7] |= 0x10;      /* Remote Host Supported
408                                          * Features Notification
409                                          */
410         }
411
412         if (lmp_le_capable(hdev))
413                 events[7] |= 0x20;      /* LE Meta-Event */
414
415         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420         struct hci_dev *hdev = req->hdev;
421
422         if (hdev->dev_type == HCI_AMP)
423                 return amp_init2(req);
424
425         if (lmp_bredr_capable(hdev))
426                 bredr_setup(req);
427         else
428                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430         if (lmp_le_capable(hdev))
431                 le_setup(req);
432
433         /* All Bluetooth 1.2 and later controllers should support the
434          * HCI command for reading the local supported commands.
435          *
436          * Unfortunately some controllers indicate Bluetooth 1.2 support,
437          * but do not have support for this command. If that is the case,
438          * the driver can quirk the behavior and skip reading the local
439          * supported commands.
440          */
441         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445         if (lmp_ssp_capable(hdev)) {
446                 /* When SSP is available, then the host features page
447                  * should also be available as well. However some
448                  * controllers list the max_page as 0 as long as SSP
449                  * has not been enabled. To achieve proper debugging
450                  * output, force the minimum max_page to 1 at least.
451                  */
452                 hdev->max_page = 0x01;
453
454                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455                         u8 mode = 0x01;
456
457                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458                                     sizeof(mode), &mode);
459                 } else {
460                         struct hci_cp_write_eir cp;
461
462                         memset(hdev->eir, 0, sizeof(hdev->eir));
463                         memset(&cp, 0, sizeof(cp));
464
465                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466                 }
467         }
468
469         if (lmp_inq_rssi_capable(hdev) ||
470             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471                 u8 mode;
472
473                 /* If Extended Inquiry Result events are supported, then
474                  * they are clearly preferred over Inquiry Result with RSSI
475                  * events.
476                  */
477                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480         }
481
482         if (lmp_inq_tx_pwr_capable(hdev))
483                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485         if (lmp_ext_feat_capable(hdev)) {
486                 struct hci_cp_read_local_ext_features cp;
487
488                 cp.page = 0x01;
489                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490                             sizeof(cp), &cp);
491         }
492
493         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494                 u8 enable = 1;
495                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496                             &enable);
497         }
498
499         return 0;
500 }
501
502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504         struct hci_dev *hdev = req->hdev;
505         struct hci_cp_write_def_link_policy cp;
506         u16 link_policy = 0;
507
508         if (lmp_rswitch_capable(hdev))
509                 link_policy |= HCI_LP_RSWITCH;
510         if (lmp_hold_capable(hdev))
511                 link_policy |= HCI_LP_HOLD;
512         if (lmp_sniff_capable(hdev))
513                 link_policy |= HCI_LP_SNIFF;
514         if (lmp_park_capable(hdev))
515                 link_policy |= HCI_LP_PARK;
516
517         cp.policy = cpu_to_le16(link_policy);
518         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520
521 static void hci_set_le_support(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_cp_write_le_host_supported cp;
525
526         /* LE-only devices do not support explicit enablement */
527         if (!lmp_bredr_capable(hdev))
528                 return;
529
530         memset(&cp, 0, sizeof(cp));
531
532         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533                 cp.le = 0x01;
534                 cp.simul = 0x00;
535         }
536
537         if (cp.le != lmp_host_le_capable(hdev))
538                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539                             &cp);
540 }
541
542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544         struct hci_dev *hdev = req->hdev;
545         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546         bool changed = false;
547
548         /* If Connectionless Peripheral Broadcast central role is supported
549          * enable all necessary events for it.
550          */
551         if (lmp_cpb_central_capable(hdev)) {
552                 events[1] |= 0x40;      /* Triggered Clock Capture */
553                 events[1] |= 0x80;      /* Synchronization Train Complete */
554                 events[2] |= 0x10;      /* Peripheral Page Response Timeout */
555                 events[2] |= 0x20;      /* CPB Channel Map Change */
556                 changed = true;
557         }
558
559         /* If Connectionless Peripheral Broadcast peripheral role is supported
560          * enable all necessary events for it.
561          */
562         if (lmp_cpb_peripheral_capable(hdev)) {
563                 events[2] |= 0x01;      /* Synchronization Train Received */
564                 events[2] |= 0x02;      /* CPB Receive */
565                 events[2] |= 0x04;      /* CPB Timeout */
566                 events[2] |= 0x08;      /* Truncated Page Complete */
567                 changed = true;
568         }
569
570         /* Enable Authenticated Payload Timeout Expired event if supported */
571         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572                 events[2] |= 0x80;
573                 changed = true;
574         }
575
576         /* Some Broadcom based controllers indicate support for Set Event
577          * Mask Page 2 command, but then actually do not support it. Since
578          * the default value is all bits set to zero, the command is only
579          * required if the event mask has to be changed. In case no change
580          * to the event mask is needed, skip this command.
581          */
582         if (changed)
583                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584                             sizeof(events), events);
585 }
586
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589         struct hci_dev *hdev = req->hdev;
590         u8 p;
591
592         hci_setup_event_mask(req);
593
594         if (hdev->commands[6] & 0x20 &&
595             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596                 struct hci_cp_read_stored_link_key cp;
597
598                 bacpy(&cp.bdaddr, BDADDR_ANY);
599                 cp.read_all = 0x01;
600                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601         }
602
603         if (hdev->commands[5] & 0x10)
604                 hci_setup_link_policy(req);
605
606         if (hdev->commands[8] & 0x01)
607                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609         if (hdev->commands[18] & 0x04 &&
610             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
613         /* Some older Broadcom based Bluetooth 1.2 controllers do not
614          * support the Read Page Scan Type command. Check support for
615          * this command in the bit mask of supported commands.
616          */
617         if (hdev->commands[13] & 0x01)
618                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
620         if (lmp_le_capable(hdev)) {
621                 u8 events[8];
622
623                 memset(events, 0, sizeof(events));
624
625                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626                         events[0] |= 0x10;      /* LE Long Term Key Request */
627
628                 /* If controller supports the Connection Parameters Request
629                  * Link Layer Procedure, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632                         events[0] |= 0x20;      /* LE Remote Connection
633                                                  * Parameter Request
634                                                  */
635
636                 /* If the controller supports the Data Length Extension
637                  * feature, enable the corresponding event.
638                  */
639                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640                         events[0] |= 0x40;      /* LE Data Length Change */
641
642                 /* If the controller supports LL Privacy feature, enable
643                  * the corresponding event.
644                  */
645                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646                         events[1] |= 0x02;      /* LE Enhanced Connection
647                                                  * Complete
648                                                  */
649
650                 /* If the controller supports Extended Scanner Filter
651                  * Policies, enable the corresponding event.
652                  */
653                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654                         events[1] |= 0x04;      /* LE Direct Advertising
655                                                  * Report
656                                                  */
657
658                 /* If the controller supports Channel Selection Algorithm #2
659                  * feature, enable the corresponding event.
660                  */
661                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662                         events[2] |= 0x08;      /* LE Channel Selection
663                                                  * Algorithm
664                                                  */
665
666                 /* If the controller supports the LE Set Scan Enable command,
667                  * enable the corresponding advertising report event.
668                  */
669                 if (hdev->commands[26] & 0x08)
670                         events[0] |= 0x02;      /* LE Advertising Report */
671
672                 /* If the controller supports the LE Create Connection
673                  * command, enable the corresponding event.
674                  */
675                 if (hdev->commands[26] & 0x10)
676                         events[0] |= 0x01;      /* LE Connection Complete */
677
678                 /* If the controller supports the LE Connection Update
679                  * command, enable the corresponding event.
680                  */
681                 if (hdev->commands[27] & 0x04)
682                         events[0] |= 0x04;      /* LE Connection Update
683                                                  * Complete
684                                                  */
685
686                 /* If the controller supports the LE Read Remote Used Features
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[27] & 0x20)
690                         events[0] |= 0x08;      /* LE Read Remote Used
691                                                  * Features Complete
692                                                  */
693
694                 /* If the controller supports the LE Read Local P-256
695                  * Public Key command, enable the corresponding event.
696                  */
697                 if (hdev->commands[34] & 0x02)
698                         events[0] |= 0x80;      /* LE Read Local P-256
699                                                  * Public Key Complete
700                                                  */
701
702                 /* If the controller supports the LE Generate DHKey
703                  * command, enable the corresponding event.
704                  */
705                 if (hdev->commands[34] & 0x04)
706                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
707
708                 /* If the controller supports the LE Set Default PHY or
709                  * LE Set PHY commands, enable the corresponding event.
710                  */
711                 if (hdev->commands[35] & (0x20 | 0x40))
712                         events[1] |= 0x08;        /* LE PHY Update Complete */
713
714                 /* If the controller supports LE Set Extended Scan Parameters
715                  * and LE Set Extended Scan Enable commands, enable the
716                  * corresponding event.
717                  */
718                 if (use_ext_scan(hdev))
719                         events[1] |= 0x10;      /* LE Extended Advertising
720                                                  * Report
721                                                  */
722
723                 /* If the controller supports the LE Extended Advertising
724                  * command, enable the corresponding event.
725                  */
726                 if (ext_adv_capable(hdev))
727                         events[2] |= 0x02;      /* LE Advertising Set
728                                                  * Terminated
729                                                  */
730
731                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732                             events);
733
734                 /* Read LE Advertising Channel TX Power */
735                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736                         /* HCI TS spec forbids mixing of legacy and extended
737                          * advertising commands wherein READ_ADV_TX_POWER is
738                          * also included. So do not call it if extended adv
739                          * is supported otherwise controller will return
740                          * COMMAND_DISALLOWED for extended commands.
741                          */
742                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743                 }
744
745                 if ((hdev->commands[38] & 0x80) &&
746                     !test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) {
747                         /* Read LE Min/Max Tx Power*/
748                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
749                                     0, NULL);
750                 }
751
752                 if (hdev->commands[26] & 0x40) {
753                         /* Read LE Accept List Size */
754                         hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
755                                     0, NULL);
756                 }
757
758                 if (hdev->commands[26] & 0x80) {
759                         /* Clear LE Accept List */
760                         hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
761                 }
762
763                 if (hdev->commands[34] & 0x40) {
764                         /* Read LE Resolving List Size */
765                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
766                                     0, NULL);
767                 }
768
769                 if (hdev->commands[34] & 0x20) {
770                         /* Clear LE Resolving List */
771                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
772                 }
773
774                 if (hdev->commands[35] & 0x04) {
775                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
776
777                         /* Set RPA timeout */
778                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
779                                     &rpa_timeout);
780                 }
781
782                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
783                         /* Read LE Maximum Data Length */
784                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
785
786                         /* Read LE Suggested Default Data Length */
787                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
788                 }
789
790                 if (ext_adv_capable(hdev)) {
791                         /* Read LE Number of Supported Advertising Sets */
792                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
793                                     0, NULL);
794                 }
795
796                 hci_set_le_support(req);
797         }
798
799         /* Read features beyond page 1 if available */
800         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
801                 struct hci_cp_read_local_ext_features cp;
802
803                 cp.page = p;
804                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
805                             sizeof(cp), &cp);
806         }
807
808         return 0;
809 }
810
811 static int hci_init4_req(struct hci_request *req, unsigned long opt)
812 {
813         struct hci_dev *hdev = req->hdev;
814
815         /* Some Broadcom based Bluetooth controllers do not support the
816          * Delete Stored Link Key command. They are clearly indicating its
817          * absence in the bit mask of supported commands.
818          *
819          * Check the supported commands and only if the command is marked
820          * as supported send it. If not supported assume that the controller
821          * does not have actual support for stored link keys which makes this
822          * command redundant anyway.
823          *
824          * Some controllers indicate that they support handling deleting
825          * stored link keys, but they don't. The quirk lets a driver
826          * just disable this command.
827          */
828         if (hdev->commands[6] & 0x80 &&
829             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
830                 struct hci_cp_delete_stored_link_key cp;
831
832                 bacpy(&cp.bdaddr, BDADDR_ANY);
833                 cp.delete_all = 0x01;
834                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
835                             sizeof(cp), &cp);
836         }
837
838         /* Set event mask page 2 if the HCI command for it is supported */
839         if (hdev->commands[22] & 0x04)
840                 hci_set_event_mask_page_2(req);
841
842         /* Read local codec list if the HCI command is supported */
843         if (hdev->commands[29] & 0x20)
844                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
845
846         /* Read local pairing options if the HCI command is supported */
847         if (hdev->commands[41] & 0x08)
848                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
849
850         /* Get MWS transport configuration if the HCI command is supported */
851         if (hdev->commands[30] & 0x08)
852                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
853
854         /* Check for Synchronization Train support */
855         if (lmp_sync_train_capable(hdev))
856                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
857
858         /* Enable Secure Connections if supported and configured */
859         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
860             bredr_sc_enabled(hdev)) {
861                 u8 support = 0x01;
862
863                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
864                             sizeof(support), &support);
865         }
866
867         /* Set erroneous data reporting if supported to the wideband speech
868          * setting value
869          */
870         if (hdev->commands[18] & 0x08 &&
871             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
872                 bool enabled = hci_dev_test_flag(hdev,
873                                                  HCI_WIDEBAND_SPEECH_ENABLED);
874
875                 if (enabled !=
876                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
877                         struct hci_cp_write_def_err_data_reporting cp;
878
879                         cp.err_data_reporting = enabled ?
880                                                 ERR_DATA_REPORTING_ENABLED :
881                                                 ERR_DATA_REPORTING_DISABLED;
882
883                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
884                                     sizeof(cp), &cp);
885                 }
886         }
887
888         /* Set Suggested Default Data Length to maximum if supported */
889         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
890                 struct hci_cp_le_write_def_data_len cp;
891
892                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
893                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
894                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
895         }
896
897         /* Set Default PHY parameters if command is supported */
898         if (hdev->commands[35] & 0x20) {
899                 struct hci_cp_le_set_default_phy cp;
900
901                 cp.all_phys = 0x00;
902                 cp.tx_phys = hdev->le_tx_def_phys;
903                 cp.rx_phys = hdev->le_rx_def_phys;
904
905                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
906         }
907
908         return 0;
909 }
910
911 static int __hci_init(struct hci_dev *hdev)
912 {
913         int err;
914
915         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
916         if (err < 0)
917                 return err;
918
919         if (hci_dev_test_flag(hdev, HCI_SETUP))
920                 hci_debugfs_create_basic(hdev);
921
922         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
923         if (err < 0)
924                 return err;
925
926         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
927          * BR/EDR/LE type controllers. AMP controllers only need the
928          * first two stages of init.
929          */
930         if (hdev->dev_type != HCI_PRIMARY)
931                 return 0;
932
933         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
934         if (err < 0)
935                 return err;
936
937         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
938         if (err < 0)
939                 return err;
940
941         /* This function is only called when the controller is actually in
942          * configured state. When the controller is marked as unconfigured,
943          * this initialization procedure is not run.
944          *
945          * It means that it is possible that a controller runs through its
946          * setup phase and then discovers missing settings. If that is the
947          * case, then this function will not be called. It then will only
948          * be called during the config phase.
949          *
950          * So only when in setup phase or config phase, create the debugfs
951          * entries and register the SMP channels.
952          */
953         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
954             !hci_dev_test_flag(hdev, HCI_CONFIG))
955                 return 0;
956
957         hci_debugfs_create_common(hdev);
958
959         if (lmp_bredr_capable(hdev))
960                 hci_debugfs_create_bredr(hdev);
961
962         if (lmp_le_capable(hdev))
963                 hci_debugfs_create_le(hdev);
964
965         return 0;
966 }
967
968 static int hci_init0_req(struct hci_request *req, unsigned long opt)
969 {
970         struct hci_dev *hdev = req->hdev;
971
972         BT_DBG("%s %ld", hdev->name, opt);
973
974         /* Reset */
975         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
976                 hci_reset_req(req, 0);
977
978         /* Read Local Version */
979         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
980
981         /* Read BD Address */
982         if (hdev->set_bdaddr)
983                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
984
985         return 0;
986 }
987
988 static int __hci_unconf_init(struct hci_dev *hdev)
989 {
990         int err;
991
992         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
993                 return 0;
994
995         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
996         if (err < 0)
997                 return err;
998
999         if (hci_dev_test_flag(hdev, HCI_SETUP))
1000                 hci_debugfs_create_basic(hdev);
1001
1002         return 0;
1003 }
1004
1005 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1006 {
1007         __u8 scan = opt;
1008
1009         BT_DBG("%s %x", req->hdev->name, scan);
1010
1011         /* Inquiry and Page scans */
1012         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1013         return 0;
1014 }
1015
1016 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1017 {
1018         __u8 auth = opt;
1019
1020         BT_DBG("%s %x", req->hdev->name, auth);
1021
1022         /* Authentication */
1023         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1024         return 0;
1025 }
1026
1027 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1028 {
1029         __u8 encrypt = opt;
1030
1031         BT_DBG("%s %x", req->hdev->name, encrypt);
1032
1033         /* Encryption */
1034         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1035         return 0;
1036 }
1037
1038 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 {
1040         __le16 policy = cpu_to_le16(opt);
1041
1042         BT_DBG("%s %x", req->hdev->name, policy);
1043
1044         /* Default link policy */
1045         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1046         return 0;
1047 }
1048
1049 /* Get HCI device by index.
1050  * Device is held on return. */
1051 struct hci_dev *hci_dev_get(int index)
1052 {
1053         struct hci_dev *hdev = NULL, *d;
1054
1055         BT_DBG("%d", index);
1056
1057         if (index < 0)
1058                 return NULL;
1059
1060         read_lock(&hci_dev_list_lock);
1061         list_for_each_entry(d, &hci_dev_list, list) {
1062                 if (d->id == index) {
1063                         hdev = hci_dev_hold(d);
1064                         break;
1065                 }
1066         }
1067         read_unlock(&hci_dev_list_lock);
1068         return hdev;
1069 }
1070
1071 /* ---- Inquiry support ---- */
1072
1073 bool hci_discovery_active(struct hci_dev *hdev)
1074 {
1075         struct discovery_state *discov = &hdev->discovery;
1076
1077         switch (discov->state) {
1078         case DISCOVERY_FINDING:
1079         case DISCOVERY_RESOLVING:
1080                 return true;
1081
1082         default:
1083                 return false;
1084         }
1085 }
1086
1087 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1088 {
1089         int old_state = hdev->discovery.state;
1090
1091         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1092
1093         if (old_state == state)
1094                 return;
1095
1096         hdev->discovery.state = state;
1097
1098         switch (state) {
1099         case DISCOVERY_STOPPED:
1100                 hci_update_background_scan(hdev);
1101
1102                 if (old_state != DISCOVERY_STARTING)
1103                         mgmt_discovering(hdev, 0);
1104                 break;
1105         case DISCOVERY_STARTING:
1106                 break;
1107         case DISCOVERY_FINDING:
1108                 mgmt_discovering(hdev, 1);
1109                 break;
1110         case DISCOVERY_RESOLVING:
1111                 break;
1112         case DISCOVERY_STOPPING:
1113                 break;
1114         }
1115 }
1116
1117 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1118 {
1119         struct discovery_state *cache = &hdev->discovery;
1120         struct inquiry_entry *p, *n;
1121
1122         list_for_each_entry_safe(p, n, &cache->all, all) {
1123                 list_del(&p->all);
1124                 kfree(p);
1125         }
1126
1127         INIT_LIST_HEAD(&cache->unknown);
1128         INIT_LIST_HEAD(&cache->resolve);
1129 }
1130
1131 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1132                                                bdaddr_t *bdaddr)
1133 {
1134         struct discovery_state *cache = &hdev->discovery;
1135         struct inquiry_entry *e;
1136
1137         BT_DBG("cache %p, %pMR", cache, bdaddr);
1138
1139         list_for_each_entry(e, &cache->all, all) {
1140                 if (!bacmp(&e->data.bdaddr, bdaddr))
1141                         return e;
1142         }
1143
1144         return NULL;
1145 }
1146
1147 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1148                                                        bdaddr_t *bdaddr)
1149 {
1150         struct discovery_state *cache = &hdev->discovery;
1151         struct inquiry_entry *e;
1152
1153         BT_DBG("cache %p, %pMR", cache, bdaddr);
1154
1155         list_for_each_entry(e, &cache->unknown, list) {
1156                 if (!bacmp(&e->data.bdaddr, bdaddr))
1157                         return e;
1158         }
1159
1160         return NULL;
1161 }
1162
1163 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1164                                                        bdaddr_t *bdaddr,
1165                                                        int state)
1166 {
1167         struct discovery_state *cache = &hdev->discovery;
1168         struct inquiry_entry *e;
1169
1170         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1171
1172         list_for_each_entry(e, &cache->resolve, list) {
1173                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1174                         return e;
1175                 if (!bacmp(&e->data.bdaddr, bdaddr))
1176                         return e;
1177         }
1178
1179         return NULL;
1180 }
1181
1182 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1183                                       struct inquiry_entry *ie)
1184 {
1185         struct discovery_state *cache = &hdev->discovery;
1186         struct list_head *pos = &cache->resolve;
1187         struct inquiry_entry *p;
1188
1189         list_del(&ie->list);
1190
1191         list_for_each_entry(p, &cache->resolve, list) {
1192                 if (p->name_state != NAME_PENDING &&
1193                     abs(p->data.rssi) >= abs(ie->data.rssi))
1194                         break;
1195                 pos = &p->list;
1196         }
1197
1198         list_add(&ie->list, pos);
1199 }
1200
1201 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1202                              bool name_known)
1203 {
1204         struct discovery_state *cache = &hdev->discovery;
1205         struct inquiry_entry *ie;
1206         u32 flags = 0;
1207
1208         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1209
1210         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1211
1212         if (!data->ssp_mode)
1213                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1214
1215         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1216         if (ie) {
1217                 if (!ie->data.ssp_mode)
1218                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1219
1220                 if (ie->name_state == NAME_NEEDED &&
1221                     data->rssi != ie->data.rssi) {
1222                         ie->data.rssi = data->rssi;
1223                         hci_inquiry_cache_update_resolve(hdev, ie);
1224                 }
1225
1226                 goto update;
1227         }
1228
1229         /* Entry not in the cache. Add new one. */
1230         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1231         if (!ie) {
1232                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1233                 goto done;
1234         }
1235
1236         list_add(&ie->all, &cache->all);
1237
1238         if (name_known) {
1239                 ie->name_state = NAME_KNOWN;
1240         } else {
1241                 ie->name_state = NAME_NOT_KNOWN;
1242                 list_add(&ie->list, &cache->unknown);
1243         }
1244
1245 update:
1246         if (name_known && ie->name_state != NAME_KNOWN &&
1247             ie->name_state != NAME_PENDING) {
1248                 ie->name_state = NAME_KNOWN;
1249                 list_del(&ie->list);
1250         }
1251
1252         memcpy(&ie->data, data, sizeof(*data));
1253         ie->timestamp = jiffies;
1254         cache->timestamp = jiffies;
1255
1256         if (ie->name_state == NAME_NOT_KNOWN)
1257                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1258
1259 done:
1260         return flags;
1261 }
1262
1263 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1264 {
1265         struct discovery_state *cache = &hdev->discovery;
1266         struct inquiry_info *info = (struct inquiry_info *) buf;
1267         struct inquiry_entry *e;
1268         int copied = 0;
1269
1270         list_for_each_entry(e, &cache->all, all) {
1271                 struct inquiry_data *data = &e->data;
1272
1273                 if (copied >= num)
1274                         break;
1275
1276                 bacpy(&info->bdaddr, &data->bdaddr);
1277                 info->pscan_rep_mode    = data->pscan_rep_mode;
1278                 info->pscan_period_mode = data->pscan_period_mode;
1279                 info->pscan_mode        = data->pscan_mode;
1280                 memcpy(info->dev_class, data->dev_class, 3);
1281                 info->clock_offset      = data->clock_offset;
1282
1283                 info++;
1284                 copied++;
1285         }
1286
1287         BT_DBG("cache %p, copied %d", cache, copied);
1288         return copied;
1289 }
1290
1291 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1292 {
1293         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1294         struct hci_dev *hdev = req->hdev;
1295         struct hci_cp_inquiry cp;
1296
1297         BT_DBG("%s", hdev->name);
1298
1299         if (test_bit(HCI_INQUIRY, &hdev->flags))
1300                 return 0;
1301
1302         /* Start Inquiry */
1303         memcpy(&cp.lap, &ir->lap, 3);
1304         cp.length  = ir->length;
1305         cp.num_rsp = ir->num_rsp;
1306         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1307
1308         return 0;
1309 }
1310
1311 int hci_inquiry(void __user *arg)
1312 {
1313         __u8 __user *ptr = arg;
1314         struct hci_inquiry_req ir;
1315         struct hci_dev *hdev;
1316         int err = 0, do_inquiry = 0, max_rsp;
1317         long timeo;
1318         __u8 *buf;
1319
1320         if (copy_from_user(&ir, ptr, sizeof(ir)))
1321                 return -EFAULT;
1322
1323         hdev = hci_dev_get(ir.dev_id);
1324         if (!hdev)
1325                 return -ENODEV;
1326
1327         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1328                 err = -EBUSY;
1329                 goto done;
1330         }
1331
1332         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1333                 err = -EOPNOTSUPP;
1334                 goto done;
1335         }
1336
1337         if (hdev->dev_type != HCI_PRIMARY) {
1338                 err = -EOPNOTSUPP;
1339                 goto done;
1340         }
1341
1342         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1343                 err = -EOPNOTSUPP;
1344                 goto done;
1345         }
1346
1347         /* Restrict maximum inquiry length to 60 seconds */
1348         if (ir.length > 60) {
1349                 err = -EINVAL;
1350                 goto done;
1351         }
1352
1353         hci_dev_lock(hdev);
1354         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1355             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1356                 hci_inquiry_cache_flush(hdev);
1357                 do_inquiry = 1;
1358         }
1359         hci_dev_unlock(hdev);
1360
1361         timeo = ir.length * msecs_to_jiffies(2000);
1362
1363         if (do_inquiry) {
1364                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1365                                    timeo, NULL);
1366                 if (err < 0)
1367                         goto done;
1368
1369                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1370                  * cleared). If it is interrupted by a signal, return -EINTR.
1371                  */
1372                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1373                                 TASK_INTERRUPTIBLE)) {
1374                         err = -EINTR;
1375                         goto done;
1376                 }
1377         }
1378
1379         /* for unlimited number of responses we will use buffer with
1380          * 255 entries
1381          */
1382         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1383
1384         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1385          * copy it to the user space.
1386          */
1387         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1388         if (!buf) {
1389                 err = -ENOMEM;
1390                 goto done;
1391         }
1392
1393         hci_dev_lock(hdev);
1394         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1395         hci_dev_unlock(hdev);
1396
1397         BT_DBG("num_rsp %d", ir.num_rsp);
1398
1399         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1400                 ptr += sizeof(ir);
1401                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1402                                  ir.num_rsp))
1403                         err = -EFAULT;
1404         } else
1405                 err = -EFAULT;
1406
1407         kfree(buf);
1408
1409 done:
1410         hci_dev_put(hdev);
1411         return err;
1412 }
1413
1414 /**
1415  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1416  *                                     (BD_ADDR) for a HCI device from
1417  *                                     a firmware node property.
1418  * @hdev:       The HCI device
1419  *
1420  * Search the firmware node for 'local-bd-address'.
1421  *
1422  * All-zero BD addresses are rejected, because those could be properties
1423  * that exist in the firmware tables, but were not updated by the firmware. For
1424  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1425  */
1426 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1427 {
1428         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1429         bdaddr_t ba;
1430         int ret;
1431
1432         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1433                                             (u8 *)&ba, sizeof(ba));
1434         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1435                 return;
1436
1437         bacpy(&hdev->public_addr, &ba);
1438 }
1439
1440 static int hci_dev_do_open(struct hci_dev *hdev)
1441 {
1442         int ret = 0;
1443
1444         BT_DBG("%s %p", hdev->name, hdev);
1445
1446         hci_req_sync_lock(hdev);
1447
1448         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1449                 ret = -ENODEV;
1450                 goto done;
1451         }
1452
1453         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1454             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1455                 /* Check for rfkill but allow the HCI setup stage to
1456                  * proceed (which in itself doesn't cause any RF activity).
1457                  */
1458                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1459                         ret = -ERFKILL;
1460                         goto done;
1461                 }
1462
1463                 /* Check for valid public address or a configured static
1464                  * random address, but let the HCI setup proceed to
1465                  * be able to determine if there is a public address
1466                  * or not.
1467                  *
1468                  * In case of user channel usage, it is not important
1469                  * if a public address or static random address is
1470                  * available.
1471                  *
1472                  * This check is only valid for BR/EDR controllers
1473                  * since AMP controllers do not have an address.
1474                  */
1475                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1476                     hdev->dev_type == HCI_PRIMARY &&
1477                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1478                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1479                         ret = -EADDRNOTAVAIL;
1480                         goto done;
1481                 }
1482         }
1483
1484         if (test_bit(HCI_UP, &hdev->flags)) {
1485                 ret = -EALREADY;
1486                 goto done;
1487         }
1488
1489         if (hdev->open(hdev)) {
1490                 ret = -EIO;
1491                 goto done;
1492         }
1493
1494         set_bit(HCI_RUNNING, &hdev->flags);
1495         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1496
1497         atomic_set(&hdev->cmd_cnt, 1);
1498         set_bit(HCI_INIT, &hdev->flags);
1499
1500         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1501             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1502                 bool invalid_bdaddr;
1503
1504                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1505
1506                 if (hdev->setup)
1507                         ret = hdev->setup(hdev);
1508
1509                 /* The transport driver can set the quirk to mark the
1510                  * BD_ADDR invalid before creating the HCI device or in
1511                  * its setup callback.
1512                  */
1513                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1514                                           &hdev->quirks);
1515
1516                 if (ret)
1517                         goto setup_failed;
1518
1519                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1520                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1521                                 hci_dev_get_bd_addr_from_property(hdev);
1522
1523                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1524                             hdev->set_bdaddr) {
1525                                 ret = hdev->set_bdaddr(hdev,
1526                                                        &hdev->public_addr);
1527
1528                                 /* If setting of the BD_ADDR from the device
1529                                  * property succeeds, then treat the address
1530                                  * as valid even if the invalid BD_ADDR
1531                                  * quirk indicates otherwise.
1532                                  */
1533                                 if (!ret)
1534                                         invalid_bdaddr = false;
1535                         }
1536                 }
1537
1538 setup_failed:
1539                 /* The transport driver can set these quirks before
1540                  * creating the HCI device or in its setup callback.
1541                  *
1542                  * For the invalid BD_ADDR quirk it is possible that
1543                  * it becomes a valid address if the bootloader does
1544                  * provide it (see above).
1545                  *
1546                  * In case any of them is set, the controller has to
1547                  * start up as unconfigured.
1548                  */
1549                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1550                     invalid_bdaddr)
1551                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1552
1553                 /* For an unconfigured controller it is required to
1554                  * read at least the version information provided by
1555                  * the Read Local Version Information command.
1556                  *
1557                  * If the set_bdaddr driver callback is provided, then
1558                  * also the original Bluetooth public device address
1559                  * will be read using the Read BD Address command.
1560                  */
1561                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1562                         ret = __hci_unconf_init(hdev);
1563         }
1564
1565         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1566                 /* If public address change is configured, ensure that
1567                  * the address gets programmed. If the driver does not
1568                  * support changing the public address, fail the power
1569                  * on procedure.
1570                  */
1571                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1572                     hdev->set_bdaddr)
1573                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1574                 else
1575                         ret = -EADDRNOTAVAIL;
1576         }
1577
1578         if (!ret) {
1579                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1580                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1581                         ret = __hci_init(hdev);
1582                         if (!ret && hdev->post_init)
1583                                 ret = hdev->post_init(hdev);
1584                 }
1585         }
1586
1587         /* If the HCI Reset command is clearing all diagnostic settings,
1588          * then they need to be reprogrammed after the init procedure
1589          * completed.
1590          */
1591         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1592             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1593             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1594                 ret = hdev->set_diag(hdev, true);
1595
1596         msft_do_open(hdev);
1597         aosp_do_open(hdev);
1598
1599         clear_bit(HCI_INIT, &hdev->flags);
1600
1601         if (!ret) {
1602                 hci_dev_hold(hdev);
1603                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1604                 hci_adv_instances_set_rpa_expired(hdev, true);
1605                 set_bit(HCI_UP, &hdev->flags);
1606                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1607                 hci_leds_update_powered(hdev, true);
1608                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1609                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1610                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1611                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1612                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1613                     hdev->dev_type == HCI_PRIMARY) {
1614                         ret = __hci_req_hci_power_on(hdev);
1615                         mgmt_power_on(hdev, ret);
1616                 }
1617         } else {
1618                 /* Init failed, cleanup */
1619                 flush_work(&hdev->tx_work);
1620
1621                 /* Since hci_rx_work() is possible to awake new cmd_work
1622                  * it should be flushed first to avoid unexpected call of
1623                  * hci_cmd_work()
1624                  */
1625                 flush_work(&hdev->rx_work);
1626                 flush_work(&hdev->cmd_work);
1627
1628                 skb_queue_purge(&hdev->cmd_q);
1629                 skb_queue_purge(&hdev->rx_q);
1630
1631                 if (hdev->flush)
1632                         hdev->flush(hdev);
1633
1634                 if (hdev->sent_cmd) {
1635                         cancel_delayed_work_sync(&hdev->cmd_timer);
1636                         kfree_skb(hdev->sent_cmd);
1637                         hdev->sent_cmd = NULL;
1638                 }
1639
1640                 clear_bit(HCI_RUNNING, &hdev->flags);
1641                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1642
1643                 hdev->close(hdev);
1644                 hdev->flags &= BIT(HCI_RAW);
1645         }
1646
1647 done:
1648         hci_req_sync_unlock(hdev);
1649         return ret;
1650 }
1651
1652 /* ---- HCI ioctl helpers ---- */
1653
1654 int hci_dev_open(__u16 dev)
1655 {
1656         struct hci_dev *hdev;
1657         int err;
1658
1659         hdev = hci_dev_get(dev);
1660         if (!hdev)
1661                 return -ENODEV;
1662
1663         /* Devices that are marked as unconfigured can only be powered
1664          * up as user channel. Trying to bring them up as normal devices
1665          * will result into a failure. Only user channel operation is
1666          * possible.
1667          *
1668          * When this function is called for a user channel, the flag
1669          * HCI_USER_CHANNEL will be set first before attempting to
1670          * open the device.
1671          */
1672         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1673             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1674                 err = -EOPNOTSUPP;
1675                 goto done;
1676         }
1677
1678         /* We need to ensure that no other power on/off work is pending
1679          * before proceeding to call hci_dev_do_open. This is
1680          * particularly important if the setup procedure has not yet
1681          * completed.
1682          */
1683         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1684                 cancel_delayed_work(&hdev->power_off);
1685
1686         /* After this call it is guaranteed that the setup procedure
1687          * has finished. This means that error conditions like RFKILL
1688          * or no valid public or static random address apply.
1689          */
1690         flush_workqueue(hdev->req_workqueue);
1691
1692         /* For controllers not using the management interface and that
1693          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1694          * so that pairing works for them. Once the management interface
1695          * is in use this bit will be cleared again and userspace has
1696          * to explicitly enable it.
1697          */
1698         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1699             !hci_dev_test_flag(hdev, HCI_MGMT))
1700                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1701
1702         err = hci_dev_do_open(hdev);
1703
1704 done:
1705         hci_dev_put(hdev);
1706         return err;
1707 }
1708
1709 /* This function requires the caller holds hdev->lock */
1710 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1711 {
1712         struct hci_conn_params *p;
1713
1714         list_for_each_entry(p, &hdev->le_conn_params, list) {
1715                 if (p->conn) {
1716                         hci_conn_drop(p->conn);
1717                         hci_conn_put(p->conn);
1718                         p->conn = NULL;
1719                 }
1720                 list_del_init(&p->action);
1721         }
1722
1723         BT_DBG("All LE pending actions cleared");
1724 }
1725
1726 int hci_dev_do_close(struct hci_dev *hdev)
1727 {
1728         bool auto_off;
1729         int err = 0;
1730
1731         BT_DBG("%s %p", hdev->name, hdev);
1732
1733         cancel_delayed_work(&hdev->power_off);
1734         cancel_delayed_work(&hdev->ncmd_timer);
1735
1736         hci_request_cancel_all(hdev);
1737         hci_req_sync_lock(hdev);
1738
1739         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1740             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1741             test_bit(HCI_UP, &hdev->flags)) {
1742                 /* Execute vendor specific shutdown routine */
1743                 if (hdev->shutdown)
1744                         err = hdev->shutdown(hdev);
1745         }
1746
1747         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1748                 cancel_delayed_work_sync(&hdev->cmd_timer);
1749                 hci_req_sync_unlock(hdev);
1750                 return err;
1751         }
1752
1753         hci_leds_update_powered(hdev, false);
1754
1755         /* Flush RX and TX works */
1756         flush_work(&hdev->tx_work);
1757         flush_work(&hdev->rx_work);
1758
1759         if (hdev->discov_timeout > 0) {
1760                 hdev->discov_timeout = 0;
1761                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1762                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1763         }
1764
1765         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1766                 cancel_delayed_work(&hdev->service_cache);
1767
1768         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1769                 struct adv_info *adv_instance;
1770
1771                 cancel_delayed_work_sync(&hdev->rpa_expired);
1772
1773                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1774                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1775         }
1776
1777         /* Avoid potential lockdep warnings from the *_flush() calls by
1778          * ensuring the workqueue is empty up front.
1779          */
1780         drain_workqueue(hdev->workqueue);
1781
1782         hci_dev_lock(hdev);
1783
1784         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1785
1786         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1787
1788         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1789             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1790             hci_dev_test_flag(hdev, HCI_MGMT))
1791                 __mgmt_power_off(hdev);
1792
1793         hci_inquiry_cache_flush(hdev);
1794         hci_pend_le_actions_clear(hdev);
1795         hci_conn_hash_flush(hdev);
1796         hci_dev_unlock(hdev);
1797
1798         smp_unregister(hdev);
1799
1800         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1801
1802         aosp_do_close(hdev);
1803         msft_do_close(hdev);
1804
1805         if (hdev->flush)
1806                 hdev->flush(hdev);
1807
1808         /* Reset device */
1809         skb_queue_purge(&hdev->cmd_q);
1810         atomic_set(&hdev->cmd_cnt, 1);
1811         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1812             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1813                 set_bit(HCI_INIT, &hdev->flags);
1814                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1815                 clear_bit(HCI_INIT, &hdev->flags);
1816         }
1817
1818         /* flush cmd  work */
1819         flush_work(&hdev->cmd_work);
1820
1821         /* Drop queues */
1822         skb_queue_purge(&hdev->rx_q);
1823         skb_queue_purge(&hdev->cmd_q);
1824         skb_queue_purge(&hdev->raw_q);
1825
1826         /* Drop last sent command */
1827         if (hdev->sent_cmd) {
1828                 cancel_delayed_work_sync(&hdev->cmd_timer);
1829                 kfree_skb(hdev->sent_cmd);
1830                 hdev->sent_cmd = NULL;
1831         }
1832
1833         clear_bit(HCI_RUNNING, &hdev->flags);
1834         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1835
1836         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1837                 wake_up(&hdev->suspend_wait_q);
1838
1839         /* After this point our queues are empty
1840          * and no tasks are scheduled. */
1841         hdev->close(hdev);
1842
1843         /* Clear flags */
1844         hdev->flags &= BIT(HCI_RAW);
1845         hci_dev_clear_volatile_flags(hdev);
1846
1847         /* Controller radio is available but is currently powered down */
1848         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1849
1850         memset(hdev->eir, 0, sizeof(hdev->eir));
1851         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1852         bacpy(&hdev->random_addr, BDADDR_ANY);
1853
1854         hci_req_sync_unlock(hdev);
1855
1856         hci_dev_put(hdev);
1857         return err;
1858 }
1859
1860 int hci_dev_close(__u16 dev)
1861 {
1862         struct hci_dev *hdev;
1863         int err;
1864
1865         hdev = hci_dev_get(dev);
1866         if (!hdev)
1867                 return -ENODEV;
1868
1869         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1870                 err = -EBUSY;
1871                 goto done;
1872         }
1873
1874         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1875                 cancel_delayed_work(&hdev->power_off);
1876
1877         err = hci_dev_do_close(hdev);
1878
1879 done:
1880         hci_dev_put(hdev);
1881         return err;
1882 }
1883
1884 static int hci_dev_do_reset(struct hci_dev *hdev)
1885 {
1886         int ret;
1887
1888         BT_DBG("%s %p", hdev->name, hdev);
1889
1890         hci_req_sync_lock(hdev);
1891
1892         /* Drop queues */
1893         skb_queue_purge(&hdev->rx_q);
1894         skb_queue_purge(&hdev->cmd_q);
1895
1896         /* Avoid potential lockdep warnings from the *_flush() calls by
1897          * ensuring the workqueue is empty up front.
1898          */
1899         drain_workqueue(hdev->workqueue);
1900
1901         hci_dev_lock(hdev);
1902         hci_inquiry_cache_flush(hdev);
1903         hci_conn_hash_flush(hdev);
1904         hci_dev_unlock(hdev);
1905
1906         if (hdev->flush)
1907                 hdev->flush(hdev);
1908
1909         atomic_set(&hdev->cmd_cnt, 1);
1910         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1911
1912         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1913
1914         hci_req_sync_unlock(hdev);
1915         return ret;
1916 }
1917
1918 int hci_dev_reset(__u16 dev)
1919 {
1920         struct hci_dev *hdev;
1921         int err;
1922
1923         hdev = hci_dev_get(dev);
1924         if (!hdev)
1925                 return -ENODEV;
1926
1927         if (!test_bit(HCI_UP, &hdev->flags)) {
1928                 err = -ENETDOWN;
1929                 goto done;
1930         }
1931
1932         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1933                 err = -EBUSY;
1934                 goto done;
1935         }
1936
1937         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1938                 err = -EOPNOTSUPP;
1939                 goto done;
1940         }
1941
1942         err = hci_dev_do_reset(hdev);
1943
1944 done:
1945         hci_dev_put(hdev);
1946         return err;
1947 }
1948
1949 int hci_dev_reset_stat(__u16 dev)
1950 {
1951         struct hci_dev *hdev;
1952         int ret = 0;
1953
1954         hdev = hci_dev_get(dev);
1955         if (!hdev)
1956                 return -ENODEV;
1957
1958         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1959                 ret = -EBUSY;
1960                 goto done;
1961         }
1962
1963         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1964                 ret = -EOPNOTSUPP;
1965                 goto done;
1966         }
1967
1968         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1969
1970 done:
1971         hci_dev_put(hdev);
1972         return ret;
1973 }
1974
1975 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1976 {
1977         bool conn_changed, discov_changed;
1978
1979         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1980
1981         if ((scan & SCAN_PAGE))
1982                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1983                                                           HCI_CONNECTABLE);
1984         else
1985                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1986                                                            HCI_CONNECTABLE);
1987
1988         if ((scan & SCAN_INQUIRY)) {
1989                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1990                                                             HCI_DISCOVERABLE);
1991         } else {
1992                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1993                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1994                                                              HCI_DISCOVERABLE);
1995         }
1996
1997         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1998                 return;
1999
2000         if (conn_changed || discov_changed) {
2001                 /* In case this was disabled through mgmt */
2002                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2003
2004                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2005                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2006
2007                 mgmt_new_settings(hdev);
2008         }
2009 }
2010
2011 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2012 {
2013         struct hci_dev *hdev;
2014         struct hci_dev_req dr;
2015         int err = 0;
2016
2017         if (copy_from_user(&dr, arg, sizeof(dr)))
2018                 return -EFAULT;
2019
2020         hdev = hci_dev_get(dr.dev_id);
2021         if (!hdev)
2022                 return -ENODEV;
2023
2024         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2025                 err = -EBUSY;
2026                 goto done;
2027         }
2028
2029         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2030                 err = -EOPNOTSUPP;
2031                 goto done;
2032         }
2033
2034         if (hdev->dev_type != HCI_PRIMARY) {
2035                 err = -EOPNOTSUPP;
2036                 goto done;
2037         }
2038
2039         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2040                 err = -EOPNOTSUPP;
2041                 goto done;
2042         }
2043
2044         switch (cmd) {
2045         case HCISETAUTH:
2046                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2047                                    HCI_INIT_TIMEOUT, NULL);
2048                 break;
2049
2050         case HCISETENCRYPT:
2051                 if (!lmp_encrypt_capable(hdev)) {
2052                         err = -EOPNOTSUPP;
2053                         break;
2054                 }
2055
2056                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2057                         /* Auth must be enabled first */
2058                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2059                                            HCI_INIT_TIMEOUT, NULL);
2060                         if (err)
2061                                 break;
2062                 }
2063
2064                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2065                                    HCI_INIT_TIMEOUT, NULL);
2066                 break;
2067
2068         case HCISETSCAN:
2069                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2070                                    HCI_INIT_TIMEOUT, NULL);
2071
2072                 /* Ensure that the connectable and discoverable states
2073                  * get correctly modified as this was a non-mgmt change.
2074                  */
2075                 if (!err)
2076                         hci_update_scan_state(hdev, dr.dev_opt);
2077                 break;
2078
2079         case HCISETLINKPOL:
2080                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2081                                    HCI_INIT_TIMEOUT, NULL);
2082                 break;
2083
2084         case HCISETLINKMODE:
2085                 hdev->link_mode = ((__u16) dr.dev_opt) &
2086                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2087                 break;
2088
2089         case HCISETPTYPE:
2090                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2091                         break;
2092
2093                 hdev->pkt_type = (__u16) dr.dev_opt;
2094                 mgmt_phy_configuration_changed(hdev, NULL);
2095                 break;
2096
2097         case HCISETACLMTU:
2098                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2099                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2100                 break;
2101
2102         case HCISETSCOMTU:
2103                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2104                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2105                 break;
2106
2107         default:
2108                 err = -EINVAL;
2109                 break;
2110         }
2111
2112 done:
2113         hci_dev_put(hdev);
2114         return err;
2115 }
2116
2117 int hci_get_dev_list(void __user *arg)
2118 {
2119         struct hci_dev *hdev;
2120         struct hci_dev_list_req *dl;
2121         struct hci_dev_req *dr;
2122         int n = 0, size, err;
2123         __u16 dev_num;
2124
2125         if (get_user(dev_num, (__u16 __user *) arg))
2126                 return -EFAULT;
2127
2128         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2129                 return -EINVAL;
2130
2131         size = sizeof(*dl) + dev_num * sizeof(*dr);
2132
2133         dl = kzalloc(size, GFP_KERNEL);
2134         if (!dl)
2135                 return -ENOMEM;
2136
2137         dr = dl->dev_req;
2138
2139         read_lock(&hci_dev_list_lock);
2140         list_for_each_entry(hdev, &hci_dev_list, list) {
2141                 unsigned long flags = hdev->flags;
2142
2143                 /* When the auto-off is configured it means the transport
2144                  * is running, but in that case still indicate that the
2145                  * device is actually down.
2146                  */
2147                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2148                         flags &= ~BIT(HCI_UP);
2149
2150                 (dr + n)->dev_id  = hdev->id;
2151                 (dr + n)->dev_opt = flags;
2152
2153                 if (++n >= dev_num)
2154                         break;
2155         }
2156         read_unlock(&hci_dev_list_lock);
2157
2158         dl->dev_num = n;
2159         size = sizeof(*dl) + n * sizeof(*dr);
2160
2161         err = copy_to_user(arg, dl, size);
2162         kfree(dl);
2163
2164         return err ? -EFAULT : 0;
2165 }
2166
2167 int hci_get_dev_info(void __user *arg)
2168 {
2169         struct hci_dev *hdev;
2170         struct hci_dev_info di;
2171         unsigned long flags;
2172         int err = 0;
2173
2174         if (copy_from_user(&di, arg, sizeof(di)))
2175                 return -EFAULT;
2176
2177         hdev = hci_dev_get(di.dev_id);
2178         if (!hdev)
2179                 return -ENODEV;
2180
2181         /* When the auto-off is configured it means the transport
2182          * is running, but in that case still indicate that the
2183          * device is actually down.
2184          */
2185         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2186                 flags = hdev->flags & ~BIT(HCI_UP);
2187         else
2188                 flags = hdev->flags;
2189
2190         strcpy(di.name, hdev->name);
2191         di.bdaddr   = hdev->bdaddr;
2192         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2193         di.flags    = flags;
2194         di.pkt_type = hdev->pkt_type;
2195         if (lmp_bredr_capable(hdev)) {
2196                 di.acl_mtu  = hdev->acl_mtu;
2197                 di.acl_pkts = hdev->acl_pkts;
2198                 di.sco_mtu  = hdev->sco_mtu;
2199                 di.sco_pkts = hdev->sco_pkts;
2200         } else {
2201                 di.acl_mtu  = hdev->le_mtu;
2202                 di.acl_pkts = hdev->le_pkts;
2203                 di.sco_mtu  = 0;
2204                 di.sco_pkts = 0;
2205         }
2206         di.link_policy = hdev->link_policy;
2207         di.link_mode   = hdev->link_mode;
2208
2209         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2210         memcpy(&di.features, &hdev->features, sizeof(di.features));
2211
2212         if (copy_to_user(arg, &di, sizeof(di)))
2213                 err = -EFAULT;
2214
2215         hci_dev_put(hdev);
2216
2217         return err;
2218 }
2219
2220 /* ---- Interface to HCI drivers ---- */
2221
2222 static int hci_rfkill_set_block(void *data, bool blocked)
2223 {
2224         struct hci_dev *hdev = data;
2225
2226         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2227
2228         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2229                 return -EBUSY;
2230
2231         if (blocked) {
2232                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2233                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2234                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2235                         hci_dev_do_close(hdev);
2236         } else {
2237                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2238         }
2239
2240         return 0;
2241 }
2242
2243 static const struct rfkill_ops hci_rfkill_ops = {
2244         .set_block = hci_rfkill_set_block,
2245 };
2246
2247 static void hci_power_on(struct work_struct *work)
2248 {
2249         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2250         int err;
2251
2252         BT_DBG("%s", hdev->name);
2253
2254         if (test_bit(HCI_UP, &hdev->flags) &&
2255             hci_dev_test_flag(hdev, HCI_MGMT) &&
2256             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2257                 cancel_delayed_work(&hdev->power_off);
2258                 hci_req_sync_lock(hdev);
2259                 err = __hci_req_hci_power_on(hdev);
2260                 hci_req_sync_unlock(hdev);
2261                 mgmt_power_on(hdev, err);
2262                 return;
2263         }
2264
2265         err = hci_dev_do_open(hdev);
2266         if (err < 0) {
2267                 hci_dev_lock(hdev);
2268                 mgmt_set_powered_failed(hdev, err);
2269                 hci_dev_unlock(hdev);
2270                 return;
2271         }
2272
2273         /* During the HCI setup phase, a few error conditions are
2274          * ignored and they need to be checked now. If they are still
2275          * valid, it is important to turn the device back off.
2276          */
2277         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2278             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2279             (hdev->dev_type == HCI_PRIMARY &&
2280              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2281              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2282                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2283                 hci_dev_do_close(hdev);
2284         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2285                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2286                                    HCI_AUTO_OFF_TIMEOUT);
2287         }
2288
2289         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2290                 /* For unconfigured devices, set the HCI_RAW flag
2291                  * so that userspace can easily identify them.
2292                  */
2293                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2294                         set_bit(HCI_RAW, &hdev->flags);
2295
2296                 /* For fully configured devices, this will send
2297                  * the Index Added event. For unconfigured devices,
2298                  * it will send Unconfigued Index Added event.
2299                  *
2300                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2301                  * and no event will be send.
2302                  */
2303                 mgmt_index_added(hdev);
2304         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2305                 /* When the controller is now configured, then it
2306                  * is important to clear the HCI_RAW flag.
2307                  */
2308                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2309                         clear_bit(HCI_RAW, &hdev->flags);
2310
2311                 /* Powering on the controller with HCI_CONFIG set only
2312                  * happens with the transition from unconfigured to
2313                  * configured. This will send the Index Added event.
2314                  */
2315                 mgmt_index_added(hdev);
2316         }
2317 }
2318
2319 static void hci_power_off(struct work_struct *work)
2320 {
2321         struct hci_dev *hdev = container_of(work, struct hci_dev,
2322                                             power_off.work);
2323
2324         BT_DBG("%s", hdev->name);
2325
2326         hci_dev_do_close(hdev);
2327 }
2328
2329 static void hci_error_reset(struct work_struct *work)
2330 {
2331         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2332
2333         BT_DBG("%s", hdev->name);
2334
2335         if (hdev->hw_error)
2336                 hdev->hw_error(hdev, hdev->hw_error_code);
2337         else
2338                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2339
2340         if (hci_dev_do_close(hdev))
2341                 return;
2342
2343         hci_dev_do_open(hdev);
2344 }
2345
2346 void hci_uuids_clear(struct hci_dev *hdev)
2347 {
2348         struct bt_uuid *uuid, *tmp;
2349
2350         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2351                 list_del(&uuid->list);
2352                 kfree(uuid);
2353         }
2354 }
2355
2356 void hci_link_keys_clear(struct hci_dev *hdev)
2357 {
2358         struct link_key *key, *tmp;
2359
2360         list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
2361                 list_del_rcu(&key->list);
2362                 kfree_rcu(key, rcu);
2363         }
2364 }
2365
2366 void hci_smp_ltks_clear(struct hci_dev *hdev)
2367 {
2368         struct smp_ltk *k, *tmp;
2369
2370         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2371                 list_del_rcu(&k->list);
2372                 kfree_rcu(k, rcu);
2373         }
2374 }
2375
2376 void hci_smp_irks_clear(struct hci_dev *hdev)
2377 {
2378         struct smp_irk *k, *tmp;
2379
2380         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2381                 list_del_rcu(&k->list);
2382                 kfree_rcu(k, rcu);
2383         }
2384 }
2385
2386 void hci_blocked_keys_clear(struct hci_dev *hdev)
2387 {
2388         struct blocked_key *b, *tmp;
2389
2390         list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
2391                 list_del_rcu(&b->list);
2392                 kfree_rcu(b, rcu);
2393         }
2394 }
2395
2396 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2397 {
2398         bool blocked = false;
2399         struct blocked_key *b;
2400
2401         rcu_read_lock();
2402         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2403                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2404                         blocked = true;
2405                         break;
2406                 }
2407         }
2408
2409         rcu_read_unlock();
2410         return blocked;
2411 }
2412
2413 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2414 {
2415         struct link_key *k;
2416
2417         rcu_read_lock();
2418         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2419                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2420                         rcu_read_unlock();
2421
2422                         if (hci_is_blocked_key(hdev,
2423                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2424                                                k->val)) {
2425                                 bt_dev_warn_ratelimited(hdev,
2426                                                         "Link key blocked for %pMR",
2427                                                         &k->bdaddr);
2428                                 return NULL;
2429                         }
2430
2431                         return k;
2432                 }
2433         }
2434         rcu_read_unlock();
2435
2436         return NULL;
2437 }
2438
2439 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2440                                u8 key_type, u8 old_key_type)
2441 {
2442         /* Legacy key */
2443         if (key_type < 0x03)
2444                 return true;
2445
2446         /* Debug keys are insecure so don't store them persistently */
2447         if (key_type == HCI_LK_DEBUG_COMBINATION)
2448                 return false;
2449
2450         /* Changed combination key and there's no previous one */
2451         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2452                 return false;
2453
2454         /* Security mode 3 case */
2455         if (!conn)
2456                 return true;
2457
2458         /* BR/EDR key derived using SC from an LE link */
2459         if (conn->type == LE_LINK)
2460                 return true;
2461
2462         /* Neither local nor remote side had no-bonding as requirement */
2463         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2464                 return true;
2465
2466         /* Local side had dedicated bonding as requirement */
2467         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2468                 return true;
2469
2470         /* Remote side had dedicated bonding as requirement */
2471         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2472                 return true;
2473
2474         /* If none of the above criteria match, then don't store the key
2475          * persistently */
2476         return false;
2477 }
2478
2479 static u8 ltk_role(u8 type)
2480 {
2481         if (type == SMP_LTK)
2482                 return HCI_ROLE_MASTER;
2483
2484         return HCI_ROLE_SLAVE;
2485 }
2486
2487 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2488                              u8 addr_type, u8 role)
2489 {
2490         struct smp_ltk *k;
2491
2492         rcu_read_lock();
2493         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2494                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2495                         continue;
2496
2497                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2498                         rcu_read_unlock();
2499
2500                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2501                                                k->val)) {
2502                                 bt_dev_warn_ratelimited(hdev,
2503                                                         "LTK blocked for %pMR",
2504                                                         &k->bdaddr);
2505                                 return NULL;
2506                         }
2507
2508                         return k;
2509                 }
2510         }
2511         rcu_read_unlock();
2512
2513         return NULL;
2514 }
2515
2516 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2517 {
2518         struct smp_irk *irk_to_return = NULL;
2519         struct smp_irk *irk;
2520
2521         rcu_read_lock();
2522         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2523                 if (!bacmp(&irk->rpa, rpa)) {
2524                         irk_to_return = irk;
2525                         goto done;
2526                 }
2527         }
2528
2529         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2530                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2531                         bacpy(&irk->rpa, rpa);
2532                         irk_to_return = irk;
2533                         goto done;
2534                 }
2535         }
2536
2537 done:
2538         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2539                                                 irk_to_return->val)) {
2540                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2541                                         &irk_to_return->bdaddr);
2542                 irk_to_return = NULL;
2543         }
2544
2545         rcu_read_unlock();
2546
2547         return irk_to_return;
2548 }
2549
2550 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2551                                      u8 addr_type)
2552 {
2553         struct smp_irk *irk_to_return = NULL;
2554         struct smp_irk *irk;
2555
2556         /* Identity Address must be public or static random */
2557         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2558                 return NULL;
2559
2560         rcu_read_lock();
2561         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2562                 if (addr_type == irk->addr_type &&
2563                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2564                         irk_to_return = irk;
2565                         goto done;
2566                 }
2567         }
2568
2569 done:
2570
2571         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2572                                                 irk_to_return->val)) {
2573                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2574                                         &irk_to_return->bdaddr);
2575                 irk_to_return = NULL;
2576         }
2577
2578         rcu_read_unlock();
2579
2580         return irk_to_return;
2581 }
2582
2583 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2584                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2585                                   u8 pin_len, bool *persistent)
2586 {
2587         struct link_key *key, *old_key;
2588         u8 old_key_type;
2589
2590         old_key = hci_find_link_key(hdev, bdaddr);
2591         if (old_key) {
2592                 old_key_type = old_key->type;
2593                 key = old_key;
2594         } else {
2595                 old_key_type = conn ? conn->key_type : 0xff;
2596                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2597                 if (!key)
2598                         return NULL;
2599                 list_add_rcu(&key->list, &hdev->link_keys);
2600         }
2601
2602         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2603
2604         /* Some buggy controller combinations generate a changed
2605          * combination key for legacy pairing even when there's no
2606          * previous key */
2607         if (type == HCI_LK_CHANGED_COMBINATION &&
2608             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2609                 type = HCI_LK_COMBINATION;
2610                 if (conn)
2611                         conn->key_type = type;
2612         }
2613
2614         bacpy(&key->bdaddr, bdaddr);
2615         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2616         key->pin_len = pin_len;
2617
2618         if (type == HCI_LK_CHANGED_COMBINATION)
2619                 key->type = old_key_type;
2620         else
2621                 key->type = type;
2622
2623         if (persistent)
2624                 *persistent = hci_persistent_key(hdev, conn, type,
2625                                                  old_key_type);
2626
2627         return key;
2628 }
2629
2630 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2631                             u8 addr_type, u8 type, u8 authenticated,
2632                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2633 {
2634         struct smp_ltk *key, *old_key;
2635         u8 role = ltk_role(type);
2636
2637         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2638         if (old_key)
2639                 key = old_key;
2640         else {
2641                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2642                 if (!key)
2643                         return NULL;
2644                 list_add_rcu(&key->list, &hdev->long_term_keys);
2645         }
2646
2647         bacpy(&key->bdaddr, bdaddr);
2648         key->bdaddr_type = addr_type;
2649         memcpy(key->val, tk, sizeof(key->val));
2650         key->authenticated = authenticated;
2651         key->ediv = ediv;
2652         key->rand = rand;
2653         key->enc_size = enc_size;
2654         key->type = type;
2655
2656         return key;
2657 }
2658
2659 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2660                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2661 {
2662         struct smp_irk *irk;
2663
2664         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2665         if (!irk) {
2666                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2667                 if (!irk)
2668                         return NULL;
2669
2670                 bacpy(&irk->bdaddr, bdaddr);
2671                 irk->addr_type = addr_type;
2672
2673                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2674         }
2675
2676         memcpy(irk->val, val, 16);
2677         bacpy(&irk->rpa, rpa);
2678
2679         return irk;
2680 }
2681
2682 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2683 {
2684         struct link_key *key;
2685
2686         key = hci_find_link_key(hdev, bdaddr);
2687         if (!key)
2688                 return -ENOENT;
2689
2690         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2691
2692         list_del_rcu(&key->list);
2693         kfree_rcu(key, rcu);
2694
2695         return 0;
2696 }
2697
2698 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2699 {
2700         struct smp_ltk *k, *tmp;
2701         int removed = 0;
2702
2703         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2704                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2705                         continue;
2706
2707                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2708
2709                 list_del_rcu(&k->list);
2710                 kfree_rcu(k, rcu);
2711                 removed++;
2712         }
2713
2714         return removed ? 0 : -ENOENT;
2715 }
2716
2717 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2718 {
2719         struct smp_irk *k, *tmp;
2720
2721         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2722                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2723                         continue;
2724
2725                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2726
2727                 list_del_rcu(&k->list);
2728                 kfree_rcu(k, rcu);
2729         }
2730 }
2731
2732 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2733 {
2734         struct smp_ltk *k;
2735         struct smp_irk *irk;
2736         u8 addr_type;
2737
2738         if (type == BDADDR_BREDR) {
2739                 if (hci_find_link_key(hdev, bdaddr))
2740                         return true;
2741                 return false;
2742         }
2743
2744         /* Convert to HCI addr type which struct smp_ltk uses */
2745         if (type == BDADDR_LE_PUBLIC)
2746                 addr_type = ADDR_LE_DEV_PUBLIC;
2747         else
2748                 addr_type = ADDR_LE_DEV_RANDOM;
2749
2750         irk = hci_get_irk(hdev, bdaddr, addr_type);
2751         if (irk) {
2752                 bdaddr = &irk->bdaddr;
2753                 addr_type = irk->addr_type;
2754         }
2755
2756         rcu_read_lock();
2757         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2758                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2759                         rcu_read_unlock();
2760                         return true;
2761                 }
2762         }
2763         rcu_read_unlock();
2764
2765         return false;
2766 }
2767
2768 /* HCI command timer function */
2769 static void hci_cmd_timeout(struct work_struct *work)
2770 {
2771         struct hci_dev *hdev = container_of(work, struct hci_dev,
2772                                             cmd_timer.work);
2773
2774         if (hdev->sent_cmd) {
2775                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2776                 u16 opcode = __le16_to_cpu(sent->opcode);
2777
2778                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2779         } else {
2780                 bt_dev_err(hdev, "command tx timeout");
2781         }
2782
2783         if (hdev->cmd_timeout)
2784                 hdev->cmd_timeout(hdev);
2785
2786         atomic_set(&hdev->cmd_cnt, 1);
2787         queue_work(hdev->workqueue, &hdev->cmd_work);
2788 }
2789
2790 /* HCI ncmd timer function */
2791 static void hci_ncmd_timeout(struct work_struct *work)
2792 {
2793         struct hci_dev *hdev = container_of(work, struct hci_dev,
2794                                             ncmd_timer.work);
2795
2796         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2797
2798         /* During HCI_INIT phase no events can be injected if the ncmd timer
2799          * triggers since the procedure has its own timeout handling.
2800          */
2801         if (test_bit(HCI_INIT, &hdev->flags))
2802                 return;
2803
2804         /* This is an irrecoverable state, inject hardware error event */
2805         hci_reset_dev(hdev);
2806 }
2807
2808 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2809                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2810 {
2811         struct oob_data *data;
2812
2813         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2814                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2815                         continue;
2816                 if (data->bdaddr_type != bdaddr_type)
2817                         continue;
2818                 return data;
2819         }
2820
2821         return NULL;
2822 }
2823
2824 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2825                                u8 bdaddr_type)
2826 {
2827         struct oob_data *data;
2828
2829         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2830         if (!data)
2831                 return -ENOENT;
2832
2833         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2834
2835         list_del(&data->list);
2836         kfree(data);
2837
2838         return 0;
2839 }
2840
2841 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2842 {
2843         struct oob_data *data, *n;
2844
2845         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2846                 list_del(&data->list);
2847                 kfree(data);
2848         }
2849 }
2850
2851 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2852                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2853                             u8 *hash256, u8 *rand256)
2854 {
2855         struct oob_data *data;
2856
2857         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2858         if (!data) {
2859                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2860                 if (!data)
2861                         return -ENOMEM;
2862
2863                 bacpy(&data->bdaddr, bdaddr);
2864                 data->bdaddr_type = bdaddr_type;
2865                 list_add(&data->list, &hdev->remote_oob_data);
2866         }
2867
2868         if (hash192 && rand192) {
2869                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2870                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2871                 if (hash256 && rand256)
2872                         data->present = 0x03;
2873         } else {
2874                 memset(data->hash192, 0, sizeof(data->hash192));
2875                 memset(data->rand192, 0, sizeof(data->rand192));
2876                 if (hash256 && rand256)
2877                         data->present = 0x02;
2878                 else
2879                         data->present = 0x00;
2880         }
2881
2882         if (hash256 && rand256) {
2883                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2884                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2885         } else {
2886                 memset(data->hash256, 0, sizeof(data->hash256));
2887                 memset(data->rand256, 0, sizeof(data->rand256));
2888                 if (hash192 && rand192)
2889                         data->present = 0x01;
2890         }
2891
2892         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2893
2894         return 0;
2895 }
2896
2897 /* This function requires the caller holds hdev->lock */
2898 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2899 {
2900         struct adv_info *adv_instance;
2901
2902         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2903                 if (adv_instance->instance == instance)
2904                         return adv_instance;
2905         }
2906
2907         return NULL;
2908 }
2909
2910 /* This function requires the caller holds hdev->lock */
2911 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2912 {
2913         struct adv_info *cur_instance;
2914
2915         cur_instance = hci_find_adv_instance(hdev, instance);
2916         if (!cur_instance)
2917                 return NULL;
2918
2919         if (cur_instance == list_last_entry(&hdev->adv_instances,
2920                                             struct adv_info, list))
2921                 return list_first_entry(&hdev->adv_instances,
2922                                                  struct adv_info, list);
2923         else
2924                 return list_next_entry(cur_instance, list);
2925 }
2926
2927 /* This function requires the caller holds hdev->lock */
2928 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2929 {
2930         struct adv_info *adv_instance;
2931
2932         adv_instance = hci_find_adv_instance(hdev, instance);
2933         if (!adv_instance)
2934                 return -ENOENT;
2935
2936         BT_DBG("%s removing %dMR", hdev->name, instance);
2937
2938         if (hdev->cur_adv_instance == instance) {
2939                 if (hdev->adv_instance_timeout) {
2940                         cancel_delayed_work(&hdev->adv_instance_expire);
2941                         hdev->adv_instance_timeout = 0;
2942                 }
2943                 hdev->cur_adv_instance = 0x00;
2944         }
2945
2946         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2947
2948         list_del(&adv_instance->list);
2949         kfree(adv_instance);
2950
2951         hdev->adv_instance_cnt--;
2952
2953         return 0;
2954 }
2955
2956 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2957 {
2958         struct adv_info *adv_instance, *n;
2959
2960         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2961                 adv_instance->rpa_expired = rpa_expired;
2962 }
2963
2964 /* This function requires the caller holds hdev->lock */
2965 void hci_adv_instances_clear(struct hci_dev *hdev)
2966 {
2967         struct adv_info *adv_instance, *n;
2968
2969         if (hdev->adv_instance_timeout) {
2970                 cancel_delayed_work(&hdev->adv_instance_expire);
2971                 hdev->adv_instance_timeout = 0;
2972         }
2973
2974         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2975                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2976                 list_del(&adv_instance->list);
2977                 kfree(adv_instance);
2978         }
2979
2980         hdev->adv_instance_cnt = 0;
2981         hdev->cur_adv_instance = 0x00;
2982 }
2983
2984 static void adv_instance_rpa_expired(struct work_struct *work)
2985 {
2986         struct adv_info *adv_instance = container_of(work, struct adv_info,
2987                                                      rpa_expired_cb.work);
2988
2989         BT_DBG("");
2990
2991         adv_instance->rpa_expired = true;
2992 }
2993
2994 /* This function requires the caller holds hdev->lock */
2995 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2996                          u16 adv_data_len, u8 *adv_data,
2997                          u16 scan_rsp_len, u8 *scan_rsp_data,
2998                          u16 timeout, u16 duration, s8 tx_power,
2999                          u32 min_interval, u32 max_interval)
3000 {
3001         struct adv_info *adv_instance;
3002
3003         adv_instance = hci_find_adv_instance(hdev, instance);
3004         if (adv_instance) {
3005                 memset(adv_instance->adv_data, 0,
3006                        sizeof(adv_instance->adv_data));
3007                 memset(adv_instance->scan_rsp_data, 0,
3008                        sizeof(adv_instance->scan_rsp_data));
3009         } else {
3010                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3011                     instance < 1 || instance > hdev->le_num_of_adv_sets)
3012                         return -EOVERFLOW;
3013
3014                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3015                 if (!adv_instance)
3016                         return -ENOMEM;
3017
3018                 adv_instance->pending = true;
3019                 adv_instance->instance = instance;
3020                 list_add(&adv_instance->list, &hdev->adv_instances);
3021                 hdev->adv_instance_cnt++;
3022         }
3023
3024         adv_instance->flags = flags;
3025         adv_instance->adv_data_len = adv_data_len;
3026         adv_instance->scan_rsp_len = scan_rsp_len;
3027         adv_instance->min_interval = min_interval;
3028         adv_instance->max_interval = max_interval;
3029         adv_instance->tx_power = tx_power;
3030
3031         if (adv_data_len)
3032                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3033
3034         if (scan_rsp_len)
3035                 memcpy(adv_instance->scan_rsp_data,
3036                        scan_rsp_data, scan_rsp_len);
3037
3038         adv_instance->timeout = timeout;
3039         adv_instance->remaining_time = timeout;
3040
3041         if (duration == 0)
3042                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3043         else
3044                 adv_instance->duration = duration;
3045
3046         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3047                           adv_instance_rpa_expired);
3048
3049         BT_DBG("%s for %dMR", hdev->name, instance);
3050
3051         return 0;
3052 }
3053
3054 /* This function requires the caller holds hdev->lock */
3055 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3056                               u16 adv_data_len, u8 *adv_data,
3057                               u16 scan_rsp_len, u8 *scan_rsp_data)
3058 {
3059         struct adv_info *adv_instance;
3060
3061         adv_instance = hci_find_adv_instance(hdev, instance);
3062
3063         /* If advertisement doesn't exist, we can't modify its data */
3064         if (!adv_instance)
3065                 return -ENOENT;
3066
3067         if (adv_data_len) {
3068                 memset(adv_instance->adv_data, 0,
3069                        sizeof(adv_instance->adv_data));
3070                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3071                 adv_instance->adv_data_len = adv_data_len;
3072         }
3073
3074         if (scan_rsp_len) {
3075                 memset(adv_instance->scan_rsp_data, 0,
3076                        sizeof(adv_instance->scan_rsp_data));
3077                 memcpy(adv_instance->scan_rsp_data,
3078                        scan_rsp_data, scan_rsp_len);
3079                 adv_instance->scan_rsp_len = scan_rsp_len;
3080         }
3081
3082         return 0;
3083 }
3084
3085 /* This function requires the caller holds hdev->lock */
3086 void hci_adv_monitors_clear(struct hci_dev *hdev)
3087 {
3088         struct adv_monitor *monitor;
3089         int handle;
3090
3091         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3092                 hci_free_adv_monitor(hdev, monitor);
3093
3094         idr_destroy(&hdev->adv_monitors_idr);
3095 }
3096
3097 /* Frees the monitor structure and do some bookkeepings.
3098  * This function requires the caller holds hdev->lock.
3099  */
3100 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3101 {
3102         struct adv_pattern *pattern;
3103         struct adv_pattern *tmp;
3104
3105         if (!monitor)
3106                 return;
3107
3108         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3109                 list_del(&pattern->list);
3110                 kfree(pattern);
3111         }
3112
3113         if (monitor->handle)
3114                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3115
3116         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3117                 hdev->adv_monitors_cnt--;
3118                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3119         }
3120
3121         kfree(monitor);
3122 }
3123
3124 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3125 {
3126         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3127 }
3128
3129 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3130 {
3131         return mgmt_remove_adv_monitor_complete(hdev, status);
3132 }
3133
3134 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3135  * also attempts to forward the request to the controller.
3136  * Returns true if request is forwarded (result is pending), false otherwise.
3137  * This function requires the caller holds hdev->lock.
3138  */
3139 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3140                          int *err)
3141 {
3142         int min, max, handle;
3143
3144         *err = 0;
3145
3146         if (!monitor) {
3147                 *err = -EINVAL;
3148                 return false;
3149         }
3150
3151         min = HCI_MIN_ADV_MONITOR_HANDLE;
3152         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3153         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3154                            GFP_KERNEL);
3155         if (handle < 0) {
3156                 *err = handle;
3157                 return false;
3158         }
3159
3160         monitor->handle = handle;
3161
3162         if (!hdev_is_powered(hdev))
3163                 return false;
3164
3165         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3166         case HCI_ADV_MONITOR_EXT_NONE:
3167                 hci_update_background_scan(hdev);
3168                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3169                 /* Message was not forwarded to controller - not an error */
3170                 return false;
3171         case HCI_ADV_MONITOR_EXT_MSFT:
3172                 *err = msft_add_monitor_pattern(hdev, monitor);
3173                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3174                            *err);
3175                 break;
3176         }
3177
3178         return (*err == 0);
3179 }
3180
3181 /* Attempts to tell the controller and free the monitor. If somehow the
3182  * controller doesn't have a corresponding handle, remove anyway.
3183  * Returns true if request is forwarded (result is pending), false otherwise.
3184  * This function requires the caller holds hdev->lock.
3185  */
3186 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3187                                    struct adv_monitor *monitor,
3188                                    u16 handle, int *err)
3189 {
3190         *err = 0;
3191
3192         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3193         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3194                 goto free_monitor;
3195         case HCI_ADV_MONITOR_EXT_MSFT:
3196                 *err = msft_remove_monitor(hdev, monitor, handle);
3197                 break;
3198         }
3199
3200         /* In case no matching handle registered, just free the monitor */
3201         if (*err == -ENOENT)
3202                 goto free_monitor;
3203
3204         return (*err == 0);
3205
3206 free_monitor:
3207         if (*err == -ENOENT)
3208                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3209                             monitor->handle);
3210         hci_free_adv_monitor(hdev, monitor);
3211
3212         *err = 0;
3213         return false;
3214 }
3215
3216 /* Returns true if request is forwarded (result is pending), false otherwise.
3217  * This function requires the caller holds hdev->lock.
3218  */
3219 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3220 {
3221         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3222         bool pending;
3223
3224         if (!monitor) {
3225                 *err = -EINVAL;
3226                 return false;
3227         }
3228
3229         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3230         if (!*err && !pending)
3231                 hci_update_background_scan(hdev);
3232
3233         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3234                    hdev->name, handle, *err, pending ? "" : "not ");
3235
3236         return pending;
3237 }
3238
3239 /* Returns true if request is forwarded (result is pending), false otherwise.
3240  * This function requires the caller holds hdev->lock.
3241  */
3242 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3243 {
3244         struct adv_monitor *monitor;
3245         int idr_next_id = 0;
3246         bool pending = false;
3247         bool update = false;
3248
3249         *err = 0;
3250
3251         while (!*err && !pending) {
3252                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3253                 if (!monitor)
3254                         break;
3255
3256                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3257
3258                 if (!*err && !pending)
3259                         update = true;
3260         }
3261
3262         if (update)
3263                 hci_update_background_scan(hdev);
3264
3265         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3266                    hdev->name, *err, pending ? "" : "not ");
3267
3268         return pending;
3269 }
3270
3271 /* This function requires the caller holds hdev->lock */
3272 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3273 {
3274         return !idr_is_empty(&hdev->adv_monitors_idr);
3275 }
3276
3277 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3278 {
3279         if (msft_monitor_supported(hdev))
3280                 return HCI_ADV_MONITOR_EXT_MSFT;
3281
3282         return HCI_ADV_MONITOR_EXT_NONE;
3283 }
3284
3285 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3286                                          bdaddr_t *bdaddr, u8 type)
3287 {
3288         struct bdaddr_list *b;
3289
3290         list_for_each_entry(b, bdaddr_list, list) {
3291                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3292                         return b;
3293         }
3294
3295         return NULL;
3296 }
3297
3298 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3299                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3300                                 u8 type)
3301 {
3302         struct bdaddr_list_with_irk *b;
3303
3304         list_for_each_entry(b, bdaddr_list, list) {
3305                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3306                         return b;
3307         }
3308
3309         return NULL;
3310 }
3311
3312 struct bdaddr_list_with_flags *
3313 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3314                                   bdaddr_t *bdaddr, u8 type)
3315 {
3316         struct bdaddr_list_with_flags *b;
3317
3318         list_for_each_entry(b, bdaddr_list, list) {
3319                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3320                         return b;
3321         }
3322
3323         return NULL;
3324 }
3325
3326 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3327 {
3328         struct bdaddr_list *b, *n;
3329
3330         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3331                 list_del(&b->list);
3332                 kfree(b);
3333         }
3334 }
3335
3336 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3337 {
3338         struct bdaddr_list *entry;
3339
3340         if (!bacmp(bdaddr, BDADDR_ANY))
3341                 return -EBADF;
3342
3343         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3344                 return -EEXIST;
3345
3346         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3347         if (!entry)
3348                 return -ENOMEM;
3349
3350         bacpy(&entry->bdaddr, bdaddr);
3351         entry->bdaddr_type = type;
3352
3353         list_add(&entry->list, list);
3354
3355         return 0;
3356 }
3357
3358 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3359                                         u8 type, u8 *peer_irk, u8 *local_irk)
3360 {
3361         struct bdaddr_list_with_irk *entry;
3362
3363         if (!bacmp(bdaddr, BDADDR_ANY))
3364                 return -EBADF;
3365
3366         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3367                 return -EEXIST;
3368
3369         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3370         if (!entry)
3371                 return -ENOMEM;
3372
3373         bacpy(&entry->bdaddr, bdaddr);
3374         entry->bdaddr_type = type;
3375
3376         if (peer_irk)
3377                 memcpy(entry->peer_irk, peer_irk, 16);
3378
3379         if (local_irk)
3380                 memcpy(entry->local_irk, local_irk, 16);
3381
3382         list_add(&entry->list, list);
3383
3384         return 0;
3385 }
3386
3387 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3388                                    u8 type, u32 flags)
3389 {
3390         struct bdaddr_list_with_flags *entry;
3391
3392         if (!bacmp(bdaddr, BDADDR_ANY))
3393                 return -EBADF;
3394
3395         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3396                 return -EEXIST;
3397
3398         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3399         if (!entry)
3400                 return -ENOMEM;
3401
3402         bacpy(&entry->bdaddr, bdaddr);
3403         entry->bdaddr_type = type;
3404         entry->current_flags = flags;
3405
3406         list_add(&entry->list, list);
3407
3408         return 0;
3409 }
3410
3411 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3412 {
3413         struct bdaddr_list *entry;
3414
3415         if (!bacmp(bdaddr, BDADDR_ANY)) {
3416                 hci_bdaddr_list_clear(list);
3417                 return 0;
3418         }
3419
3420         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3421         if (!entry)
3422                 return -ENOENT;
3423
3424         list_del(&entry->list);
3425         kfree(entry);
3426
3427         return 0;
3428 }
3429
3430 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3431                                                         u8 type)
3432 {
3433         struct bdaddr_list_with_irk *entry;
3434
3435         if (!bacmp(bdaddr, BDADDR_ANY)) {
3436                 hci_bdaddr_list_clear(list);
3437                 return 0;
3438         }
3439
3440         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3441         if (!entry)
3442                 return -ENOENT;
3443
3444         list_del(&entry->list);
3445         kfree(entry);
3446
3447         return 0;
3448 }
3449
3450 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3451                                    u8 type)
3452 {
3453         struct bdaddr_list_with_flags *entry;
3454
3455         if (!bacmp(bdaddr, BDADDR_ANY)) {
3456                 hci_bdaddr_list_clear(list);
3457                 return 0;
3458         }
3459
3460         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3461         if (!entry)
3462                 return -ENOENT;
3463
3464         list_del(&entry->list);
3465         kfree(entry);
3466
3467         return 0;
3468 }
3469
3470 /* This function requires the caller holds hdev->lock */
3471 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3472                                                bdaddr_t *addr, u8 addr_type)
3473 {
3474         struct hci_conn_params *params;
3475
3476         list_for_each_entry(params, &hdev->le_conn_params, list) {
3477                 if (bacmp(&params->addr, addr) == 0 &&
3478                     params->addr_type == addr_type) {
3479                         return params;
3480                 }
3481         }
3482
3483         return NULL;
3484 }
3485
3486 /* This function requires the caller holds hdev->lock */
3487 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3488                                                   bdaddr_t *addr, u8 addr_type)
3489 {
3490         struct hci_conn_params *param;
3491
3492         switch (addr_type) {
3493         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3494                 addr_type = ADDR_LE_DEV_PUBLIC;
3495                 break;
3496         case ADDR_LE_DEV_RANDOM_RESOLVED:
3497                 addr_type = ADDR_LE_DEV_RANDOM;
3498                 break;
3499         }
3500
3501         list_for_each_entry(param, list, action) {
3502                 if (bacmp(&param->addr, addr) == 0 &&
3503                     param->addr_type == addr_type)
3504                         return param;
3505         }
3506
3507         return NULL;
3508 }
3509
3510 /* This function requires the caller holds hdev->lock */
3511 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3512                                             bdaddr_t *addr, u8 addr_type)
3513 {
3514         struct hci_conn_params *params;
3515
3516         params = hci_conn_params_lookup(hdev, addr, addr_type);
3517         if (params)
3518                 return params;
3519
3520         params = kzalloc(sizeof(*params), GFP_KERNEL);
3521         if (!params) {
3522                 bt_dev_err(hdev, "out of memory");
3523                 return NULL;
3524         }
3525
3526         bacpy(&params->addr, addr);
3527         params->addr_type = addr_type;
3528
3529         list_add(&params->list, &hdev->le_conn_params);
3530         INIT_LIST_HEAD(&params->action);
3531
3532         params->conn_min_interval = hdev->le_conn_min_interval;
3533         params->conn_max_interval = hdev->le_conn_max_interval;
3534         params->conn_latency = hdev->le_conn_latency;
3535         params->supervision_timeout = hdev->le_supv_timeout;
3536         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3537
3538         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3539
3540         return params;
3541 }
3542
3543 static void hci_conn_params_free(struct hci_conn_params *params)
3544 {
3545         if (params->conn) {
3546                 hci_conn_drop(params->conn);
3547                 hci_conn_put(params->conn);
3548         }
3549
3550         list_del(&params->action);
3551         list_del(&params->list);
3552         kfree(params);
3553 }
3554
3555 /* This function requires the caller holds hdev->lock */
3556 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3557 {
3558         struct hci_conn_params *params;
3559
3560         params = hci_conn_params_lookup(hdev, addr, addr_type);
3561         if (!params)
3562                 return;
3563
3564         hci_conn_params_free(params);
3565
3566         hci_update_background_scan(hdev);
3567
3568         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3569 }
3570
3571 /* This function requires the caller holds hdev->lock */
3572 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3573 {
3574         struct hci_conn_params *params, *tmp;
3575
3576         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3577                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3578                         continue;
3579
3580                 /* If trying to establish one time connection to disabled
3581                  * device, leave the params, but mark them as just once.
3582                  */
3583                 if (params->explicit_connect) {
3584                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3585                         continue;
3586                 }
3587
3588                 list_del(&params->list);
3589                 kfree(params);
3590         }
3591
3592         BT_DBG("All LE disabled connection parameters were removed");
3593 }
3594
3595 /* This function requires the caller holds hdev->lock */
3596 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3597 {
3598         struct hci_conn_params *params, *tmp;
3599
3600         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3601                 hci_conn_params_free(params);
3602
3603         BT_DBG("All LE connection parameters were removed");
3604 }
3605
3606 /* Copy the Identity Address of the controller.
3607  *
3608  * If the controller has a public BD_ADDR, then by default use that one.
3609  * If this is a LE only controller without a public address, default to
3610  * the static random address.
3611  *
3612  * For debugging purposes it is possible to force controllers with a
3613  * public address to use the static random address instead.
3614  *
3615  * In case BR/EDR has been disabled on a dual-mode controller and
3616  * userspace has configured a static address, then that address
3617  * becomes the identity address instead of the public BR/EDR address.
3618  */
3619 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3620                                u8 *bdaddr_type)
3621 {
3622         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3623             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3624             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3625              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3626                 bacpy(bdaddr, &hdev->static_addr);
3627                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3628         } else {
3629                 bacpy(bdaddr, &hdev->bdaddr);
3630                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3631         }
3632 }
3633
3634 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3635 {
3636         int i;
3637
3638         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3639                 clear_bit(i, hdev->suspend_tasks);
3640
3641         wake_up(&hdev->suspend_wait_q);
3642 }
3643
3644 static int hci_suspend_wait_event(struct hci_dev *hdev)
3645 {
3646 #define WAKE_COND                                                              \
3647         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3648          __SUSPEND_NUM_TASKS)
3649
3650         int i;
3651         int ret = wait_event_timeout(hdev->suspend_wait_q,
3652                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3653
3654         if (ret == 0) {
3655                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3656                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3657                         if (test_bit(i, hdev->suspend_tasks))
3658                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3659                         clear_bit(i, hdev->suspend_tasks);
3660                 }
3661
3662                 ret = -ETIMEDOUT;
3663         } else {
3664                 ret = 0;
3665         }
3666
3667         return ret;
3668 }
3669
3670 static void hci_prepare_suspend(struct work_struct *work)
3671 {
3672         struct hci_dev *hdev =
3673                 container_of(work, struct hci_dev, suspend_prepare);
3674
3675         hci_dev_lock(hdev);
3676         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3677         hci_dev_unlock(hdev);
3678 }
3679
3680 static int hci_change_suspend_state(struct hci_dev *hdev,
3681                                     enum suspended_state next)
3682 {
3683         hdev->suspend_state_next = next;
3684         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3685         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3686         return hci_suspend_wait_event(hdev);
3687 }
3688
3689 static void hci_clear_wake_reason(struct hci_dev *hdev)
3690 {
3691         hci_dev_lock(hdev);
3692
3693         hdev->wake_reason = 0;
3694         bacpy(&hdev->wake_addr, BDADDR_ANY);
3695         hdev->wake_addr_type = 0;
3696
3697         hci_dev_unlock(hdev);
3698 }
3699
3700 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3701                                 void *data)
3702 {
3703         struct hci_dev *hdev =
3704                 container_of(nb, struct hci_dev, suspend_notifier);
3705         int ret = 0;
3706         u8 state = BT_RUNNING;
3707
3708         /* If powering down, wait for completion. */
3709         if (mgmt_powering_down(hdev)) {
3710                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3711                 ret = hci_suspend_wait_event(hdev);
3712                 if (ret)
3713                         goto done;
3714         }
3715
3716         /* Suspend notifier should only act on events when powered. */
3717         if (!hdev_is_powered(hdev) ||
3718             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3719                 goto done;
3720
3721         if (action == PM_SUSPEND_PREPARE) {
3722                 /* Suspend consists of two actions:
3723                  *  - First, disconnect everything and make the controller not
3724                  *    connectable (disabling scanning)
3725                  *  - Second, program event filter/accept list and enable scan
3726                  */
3727                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3728                 if (!ret)
3729                         state = BT_SUSPEND_DISCONNECT;
3730
3731                 /* Only configure accept list if disconnect succeeded and wake
3732                  * isn't being prevented.
3733                  */
3734                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3735                         ret = hci_change_suspend_state(hdev,
3736                                                 BT_SUSPEND_CONFIGURE_WAKE);
3737                         if (!ret)
3738                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3739                 }
3740
3741                 hci_clear_wake_reason(hdev);
3742                 mgmt_suspending(hdev, state);
3743
3744         } else if (action == PM_POST_SUSPEND) {
3745                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3746
3747                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3748                               hdev->wake_addr_type);
3749         }
3750
3751 done:
3752         /* We always allow suspend even if suspend preparation failed and
3753          * attempt to recover in resume.
3754          */
3755         if (ret)
3756                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3757                            action, ret);
3758
3759         return NOTIFY_DONE;
3760 }
3761
3762 /* Alloc HCI device */
3763 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3764 {
3765         struct hci_dev *hdev;
3766         unsigned int alloc_size;
3767
3768         alloc_size = sizeof(*hdev);
3769         if (sizeof_priv) {
3770                 /* Fixme: May need ALIGN-ment? */
3771                 alloc_size += sizeof_priv;
3772         }
3773
3774         hdev = kzalloc(alloc_size, GFP_KERNEL);
3775         if (!hdev)
3776                 return NULL;
3777
3778         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3779         hdev->esco_type = (ESCO_HV1);
3780         hdev->link_mode = (HCI_LM_ACCEPT);
3781         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3782         hdev->io_capability = 0x03;     /* No Input No Output */
3783         hdev->manufacturer = 0xffff;    /* Default to internal use */
3784         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3785         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3786         hdev->adv_instance_cnt = 0;
3787         hdev->cur_adv_instance = 0x00;
3788         hdev->adv_instance_timeout = 0;
3789
3790         hdev->advmon_allowlist_duration = 300;
3791         hdev->advmon_no_filter_duration = 500;
3792         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3793
3794         hdev->sniff_max_interval = 800;
3795         hdev->sniff_min_interval = 80;
3796
3797         hdev->le_adv_channel_map = 0x07;
3798         hdev->le_adv_min_interval = 0x0800;
3799         hdev->le_adv_max_interval = 0x0800;
3800         hdev->le_scan_interval = 0x0060;
3801         hdev->le_scan_window = 0x0030;
3802         hdev->le_scan_int_suspend = 0x0400;
3803         hdev->le_scan_window_suspend = 0x0012;
3804         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3805         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3806         hdev->le_scan_int_adv_monitor = 0x0060;
3807         hdev->le_scan_window_adv_monitor = 0x0030;
3808         hdev->le_scan_int_connect = 0x0060;
3809         hdev->le_scan_window_connect = 0x0060;
3810         hdev->le_conn_min_interval = 0x0018;
3811         hdev->le_conn_max_interval = 0x0028;
3812         hdev->le_conn_latency = 0x0000;
3813         hdev->le_supv_timeout = 0x002a;
3814         hdev->le_def_tx_len = 0x001b;
3815         hdev->le_def_tx_time = 0x0148;
3816         hdev->le_max_tx_len = 0x001b;
3817         hdev->le_max_tx_time = 0x0148;
3818         hdev->le_max_rx_len = 0x001b;
3819         hdev->le_max_rx_time = 0x0148;
3820         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3821         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3822         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3823         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3824         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3825         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3826         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3827         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3828         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3829
3830         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3831         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3832         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3833         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3834         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3835         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3836
3837         /* default 1.28 sec page scan */
3838         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3839         hdev->def_page_scan_int = 0x0800;
3840         hdev->def_page_scan_window = 0x0012;
3841
3842         mutex_init(&hdev->lock);
3843         mutex_init(&hdev->req_lock);
3844
3845         INIT_LIST_HEAD(&hdev->mgmt_pending);
3846         INIT_LIST_HEAD(&hdev->reject_list);
3847         INIT_LIST_HEAD(&hdev->accept_list);
3848         INIT_LIST_HEAD(&hdev->uuids);
3849         INIT_LIST_HEAD(&hdev->link_keys);
3850         INIT_LIST_HEAD(&hdev->long_term_keys);
3851         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3852         INIT_LIST_HEAD(&hdev->remote_oob_data);
3853         INIT_LIST_HEAD(&hdev->le_accept_list);
3854         INIT_LIST_HEAD(&hdev->le_resolv_list);
3855         INIT_LIST_HEAD(&hdev->le_conn_params);
3856         INIT_LIST_HEAD(&hdev->pend_le_conns);
3857         INIT_LIST_HEAD(&hdev->pend_le_reports);
3858         INIT_LIST_HEAD(&hdev->conn_hash.list);
3859         INIT_LIST_HEAD(&hdev->adv_instances);
3860         INIT_LIST_HEAD(&hdev->blocked_keys);
3861
3862         INIT_WORK(&hdev->rx_work, hci_rx_work);
3863         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3864         INIT_WORK(&hdev->tx_work, hci_tx_work);
3865         INIT_WORK(&hdev->power_on, hci_power_on);
3866         INIT_WORK(&hdev->error_reset, hci_error_reset);
3867         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3868
3869         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3870
3871         skb_queue_head_init(&hdev->rx_q);
3872         skb_queue_head_init(&hdev->cmd_q);
3873         skb_queue_head_init(&hdev->raw_q);
3874
3875         init_waitqueue_head(&hdev->req_wait_q);
3876         init_waitqueue_head(&hdev->suspend_wait_q);
3877
3878         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3879         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3880
3881         hci_request_setup(hdev);
3882
3883         hci_init_sysfs(hdev);
3884         discovery_init(hdev);
3885
3886         return hdev;
3887 }
3888 EXPORT_SYMBOL(hci_alloc_dev_priv);
3889
3890 /* Free HCI device */
3891 void hci_free_dev(struct hci_dev *hdev)
3892 {
3893         /* will free via device release */
3894         put_device(&hdev->dev);
3895 }
3896 EXPORT_SYMBOL(hci_free_dev);
3897
3898 /* Register HCI device */
3899 int hci_register_dev(struct hci_dev *hdev)
3900 {
3901         int id, error;
3902
3903         if (!hdev->open || !hdev->close || !hdev->send)
3904                 return -EINVAL;
3905
3906         /* Do not allow HCI_AMP devices to register at index 0,
3907          * so the index can be used as the AMP controller ID.
3908          */
3909         switch (hdev->dev_type) {
3910         case HCI_PRIMARY:
3911                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3912                 break;
3913         case HCI_AMP:
3914                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3915                 break;
3916         default:
3917                 return -EINVAL;
3918         }
3919
3920         if (id < 0)
3921                 return id;
3922
3923         error = dev_set_name(&hdev->dev, "hci%u", id);
3924         if (error)
3925                 return error;
3926
3927         hdev->name = dev_name(&hdev->dev);
3928         hdev->id = id;
3929
3930         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3931
3932         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3933         if (!hdev->workqueue) {
3934                 error = -ENOMEM;
3935                 goto err;
3936         }
3937
3938         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3939                                                       hdev->name);
3940         if (!hdev->req_workqueue) {
3941                 destroy_workqueue(hdev->workqueue);
3942                 error = -ENOMEM;
3943                 goto err;
3944         }
3945
3946         if (!IS_ERR_OR_NULL(bt_debugfs))
3947                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3948
3949         error = device_add(&hdev->dev);
3950         if (error < 0)
3951                 goto err_wqueue;
3952
3953         hci_leds_init(hdev);
3954
3955         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3956                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3957                                     hdev);
3958         if (hdev->rfkill) {
3959                 if (rfkill_register(hdev->rfkill) < 0) {
3960                         rfkill_destroy(hdev->rfkill);
3961                         hdev->rfkill = NULL;
3962                 }
3963         }
3964
3965         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3966                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3967
3968         hci_dev_set_flag(hdev, HCI_SETUP);
3969         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3970
3971         if (hdev->dev_type == HCI_PRIMARY) {
3972                 /* Assume BR/EDR support until proven otherwise (such as
3973                  * through reading supported features during init.
3974                  */
3975                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3976         }
3977
3978         write_lock(&hci_dev_list_lock);
3979         list_add(&hdev->list, &hci_dev_list);
3980         write_unlock(&hci_dev_list_lock);
3981
3982         /* Devices that are marked for raw-only usage are unconfigured
3983          * and should not be included in normal operation.
3984          */
3985         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3986                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3987
3988         hci_sock_dev_event(hdev, HCI_DEV_REG);
3989         hci_dev_hold(hdev);
3990
3991         if (!hdev->suspend_notifier.notifier_call &&
3992             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3993                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3994                 error = register_pm_notifier(&hdev->suspend_notifier);
3995                 if (error)
3996                         goto err_wqueue;
3997         }
3998
3999         queue_work(hdev->req_workqueue, &hdev->power_on);
4000
4001         idr_init(&hdev->adv_monitors_idr);
4002
4003         return id;
4004
4005 err_wqueue:
4006         debugfs_remove_recursive(hdev->debugfs);
4007         destroy_workqueue(hdev->workqueue);
4008         destroy_workqueue(hdev->req_workqueue);
4009 err:
4010         ida_simple_remove(&hci_index_ida, hdev->id);
4011
4012         return error;
4013 }
4014 EXPORT_SYMBOL(hci_register_dev);
4015
4016 /* Unregister HCI device */
4017 void hci_unregister_dev(struct hci_dev *hdev)
4018 {
4019         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4020
4021         hci_dev_set_flag(hdev, HCI_UNREGISTER);
4022
4023         write_lock(&hci_dev_list_lock);
4024         list_del(&hdev->list);
4025         write_unlock(&hci_dev_list_lock);
4026
4027         cancel_work_sync(&hdev->power_on);
4028
4029         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4030                 hci_suspend_clear_tasks(hdev);
4031                 unregister_pm_notifier(&hdev->suspend_notifier);
4032                 cancel_work_sync(&hdev->suspend_prepare);
4033         }
4034
4035         hci_dev_do_close(hdev);
4036
4037         if (!test_bit(HCI_INIT, &hdev->flags) &&
4038             !hci_dev_test_flag(hdev, HCI_SETUP) &&
4039             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4040                 hci_dev_lock(hdev);
4041                 mgmt_index_removed(hdev);
4042                 hci_dev_unlock(hdev);
4043         }
4044
4045         /* mgmt_index_removed should take care of emptying the
4046          * pending list */
4047         BUG_ON(!list_empty(&hdev->mgmt_pending));
4048
4049         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4050
4051         if (hdev->rfkill) {
4052                 rfkill_unregister(hdev->rfkill);
4053                 rfkill_destroy(hdev->rfkill);
4054         }
4055
4056         device_del(&hdev->dev);
4057         /* Actual cleanup is deferred until hci_release_dev(). */
4058         hci_dev_put(hdev);
4059 }
4060 EXPORT_SYMBOL(hci_unregister_dev);
4061
4062 /* Release HCI device */
4063 void hci_release_dev(struct hci_dev *hdev)
4064 {
4065         debugfs_remove_recursive(hdev->debugfs);
4066         kfree_const(hdev->hw_info);
4067         kfree_const(hdev->fw_info);
4068
4069         destroy_workqueue(hdev->workqueue);
4070         destroy_workqueue(hdev->req_workqueue);
4071
4072         hci_dev_lock(hdev);
4073         hci_bdaddr_list_clear(&hdev->reject_list);
4074         hci_bdaddr_list_clear(&hdev->accept_list);
4075         hci_uuids_clear(hdev);
4076         hci_link_keys_clear(hdev);
4077         hci_smp_ltks_clear(hdev);
4078         hci_smp_irks_clear(hdev);
4079         hci_remote_oob_data_clear(hdev);
4080         hci_adv_instances_clear(hdev);
4081         hci_adv_monitors_clear(hdev);
4082         hci_bdaddr_list_clear(&hdev->le_accept_list);
4083         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4084         hci_conn_params_clear_all(hdev);
4085         hci_discovery_filter_clear(hdev);
4086         hci_blocked_keys_clear(hdev);
4087         hci_dev_unlock(hdev);
4088
4089         ida_simple_remove(&hci_index_ida, hdev->id);
4090         kfree_skb(hdev->sent_cmd);
4091         kfree(hdev);
4092 }
4093 EXPORT_SYMBOL(hci_release_dev);
4094
4095 /* Suspend HCI device */
4096 int hci_suspend_dev(struct hci_dev *hdev)
4097 {
4098         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4099         return 0;
4100 }
4101 EXPORT_SYMBOL(hci_suspend_dev);
4102
4103 /* Resume HCI device */
4104 int hci_resume_dev(struct hci_dev *hdev)
4105 {
4106         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4107         return 0;
4108 }
4109 EXPORT_SYMBOL(hci_resume_dev);
4110
4111 /* Reset HCI device */
4112 int hci_reset_dev(struct hci_dev *hdev)
4113 {
4114         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4115         struct sk_buff *skb;
4116
4117         skb = bt_skb_alloc(3, GFP_ATOMIC);
4118         if (!skb)
4119                 return -ENOMEM;
4120
4121         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4122         skb_put_data(skb, hw_err, 3);
4123
4124         bt_dev_err(hdev, "Injecting HCI hardware error event");
4125
4126         /* Send Hardware Error to upper stack */
4127         return hci_recv_frame(hdev, skb);
4128 }
4129 EXPORT_SYMBOL(hci_reset_dev);
4130
4131 /* Receive frame from HCI drivers */
4132 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4133 {
4134         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4135                       && !test_bit(HCI_INIT, &hdev->flags))) {
4136                 kfree_skb(skb);
4137                 return -ENXIO;
4138         }
4139
4140         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4141             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4142             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4143             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4144                 kfree_skb(skb);
4145                 return -EINVAL;
4146         }
4147
4148         /* Incoming skb */
4149         bt_cb(skb)->incoming = 1;
4150
4151         /* Time stamp */
4152         __net_timestamp(skb);
4153
4154         skb_queue_tail(&hdev->rx_q, skb);
4155         queue_work(hdev->workqueue, &hdev->rx_work);
4156
4157         return 0;
4158 }
4159 EXPORT_SYMBOL(hci_recv_frame);
4160
4161 /* Receive diagnostic message from HCI drivers */
4162 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4163 {
4164         /* Mark as diagnostic packet */
4165         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4166
4167         /* Time stamp */
4168         __net_timestamp(skb);
4169
4170         skb_queue_tail(&hdev->rx_q, skb);
4171         queue_work(hdev->workqueue, &hdev->rx_work);
4172
4173         return 0;
4174 }
4175 EXPORT_SYMBOL(hci_recv_diag);
4176
4177 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4178 {
4179         va_list vargs;
4180
4181         va_start(vargs, fmt);
4182         kfree_const(hdev->hw_info);
4183         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4184         va_end(vargs);
4185 }
4186 EXPORT_SYMBOL(hci_set_hw_info);
4187
4188 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4189 {
4190         va_list vargs;
4191
4192         va_start(vargs, fmt);
4193         kfree_const(hdev->fw_info);
4194         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4195         va_end(vargs);
4196 }
4197 EXPORT_SYMBOL(hci_set_fw_info);
4198
4199 /* ---- Interface to upper protocols ---- */
4200
4201 int hci_register_cb(struct hci_cb *cb)
4202 {
4203         BT_DBG("%p name %s", cb, cb->name);
4204
4205         mutex_lock(&hci_cb_list_lock);
4206         list_add_tail(&cb->list, &hci_cb_list);
4207         mutex_unlock(&hci_cb_list_lock);
4208
4209         return 0;
4210 }
4211 EXPORT_SYMBOL(hci_register_cb);
4212
4213 int hci_unregister_cb(struct hci_cb *cb)
4214 {
4215         BT_DBG("%p name %s", cb, cb->name);
4216
4217         mutex_lock(&hci_cb_list_lock);
4218         list_del(&cb->list);
4219         mutex_unlock(&hci_cb_list_lock);
4220
4221         return 0;
4222 }
4223 EXPORT_SYMBOL(hci_unregister_cb);
4224
4225 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4226 {
4227         int err;
4228
4229         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4230                skb->len);
4231
4232         /* Time stamp */
4233         __net_timestamp(skb);
4234
4235         /* Send copy to monitor */
4236         hci_send_to_monitor(hdev, skb);
4237
4238         if (atomic_read(&hdev->promisc)) {
4239                 /* Send copy to the sockets */
4240                 hci_send_to_sock(hdev, skb);
4241         }
4242
4243         /* Get rid of skb owner, prior to sending to the driver. */
4244         skb_orphan(skb);
4245
4246         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4247                 kfree_skb(skb);
4248                 return;
4249         }
4250
4251         err = hdev->send(hdev, skb);
4252         if (err < 0) {
4253                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4254                 kfree_skb(skb);
4255         }
4256 }
4257
4258 /* Send HCI command */
4259 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4260                  const void *param)
4261 {
4262         struct sk_buff *skb;
4263
4264         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4265
4266         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4267         if (!skb) {
4268                 bt_dev_err(hdev, "no memory for command");
4269                 return -ENOMEM;
4270         }
4271
4272         /* Stand-alone HCI commands must be flagged as
4273          * single-command requests.
4274          */
4275         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4276
4277         skb_queue_tail(&hdev->cmd_q, skb);
4278         queue_work(hdev->workqueue, &hdev->cmd_work);
4279
4280         return 0;
4281 }
4282
4283 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4284                    const void *param)
4285 {
4286         struct sk_buff *skb;
4287
4288         if (hci_opcode_ogf(opcode) != 0x3f) {
4289                 /* A controller receiving a command shall respond with either
4290                  * a Command Status Event or a Command Complete Event.
4291                  * Therefore, all standard HCI commands must be sent via the
4292                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4293                  * Some vendors do not comply with this rule for vendor-specific
4294                  * commands and do not return any event. We want to support
4295                  * unresponded commands for such cases only.
4296                  */
4297                 bt_dev_err(hdev, "unresponded command not supported");
4298                 return -EINVAL;
4299         }
4300
4301         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4302         if (!skb) {
4303                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4304                            opcode);
4305                 return -ENOMEM;
4306         }
4307
4308         hci_send_frame(hdev, skb);
4309
4310         return 0;
4311 }
4312 EXPORT_SYMBOL(__hci_cmd_send);
4313
4314 /* Get data from the previously sent command */
4315 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4316 {
4317         struct hci_command_hdr *hdr;
4318
4319         if (!hdev->sent_cmd)
4320                 return NULL;
4321
4322         hdr = (void *) hdev->sent_cmd->data;
4323
4324         if (hdr->opcode != cpu_to_le16(opcode))
4325                 return NULL;
4326
4327         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4328
4329         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4330 }
4331
4332 /* Send HCI command and wait for command complete event */
4333 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4334                              const void *param, u32 timeout)
4335 {
4336         struct sk_buff *skb;
4337
4338         if (!test_bit(HCI_UP, &hdev->flags))
4339                 return ERR_PTR(-ENETDOWN);
4340
4341         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4342
4343         hci_req_sync_lock(hdev);
4344         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4345         hci_req_sync_unlock(hdev);
4346
4347         return skb;
4348 }
4349 EXPORT_SYMBOL(hci_cmd_sync);
4350
4351 /* Send ACL data */
4352 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4353 {
4354         struct hci_acl_hdr *hdr;
4355         int len = skb->len;
4356
4357         skb_push(skb, HCI_ACL_HDR_SIZE);
4358         skb_reset_transport_header(skb);
4359         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4360         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4361         hdr->dlen   = cpu_to_le16(len);
4362 }
4363
4364 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4365                           struct sk_buff *skb, __u16 flags)
4366 {
4367         struct hci_conn *conn = chan->conn;
4368         struct hci_dev *hdev = conn->hdev;
4369         struct sk_buff *list;
4370
4371         skb->len = skb_headlen(skb);
4372         skb->data_len = 0;
4373
4374         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4375
4376         switch (hdev->dev_type) {
4377         case HCI_PRIMARY:
4378                 hci_add_acl_hdr(skb, conn->handle, flags);
4379                 break;
4380         case HCI_AMP:
4381                 hci_add_acl_hdr(skb, chan->handle, flags);
4382                 break;
4383         default:
4384                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4385                 return;
4386         }
4387
4388         list = skb_shinfo(skb)->frag_list;
4389         if (!list) {
4390                 /* Non fragmented */
4391                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4392
4393                 skb_queue_tail(queue, skb);
4394         } else {
4395                 /* Fragmented */
4396                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4397
4398                 skb_shinfo(skb)->frag_list = NULL;
4399
4400                 /* Queue all fragments atomically. We need to use spin_lock_bh
4401                  * here because of 6LoWPAN links, as there this function is
4402                  * called from softirq and using normal spin lock could cause
4403                  * deadlocks.
4404                  */
4405                 spin_lock_bh(&queue->lock);
4406
4407                 __skb_queue_tail(queue, skb);
4408
4409                 flags &= ~ACL_START;
4410                 flags |= ACL_CONT;
4411                 do {
4412                         skb = list; list = list->next;
4413
4414                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4415                         hci_add_acl_hdr(skb, conn->handle, flags);
4416
4417                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4418
4419                         __skb_queue_tail(queue, skb);
4420                 } while (list);
4421
4422                 spin_unlock_bh(&queue->lock);
4423         }
4424 }
4425
4426 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4427 {
4428         struct hci_dev *hdev = chan->conn->hdev;
4429
4430         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4431
4432         hci_queue_acl(chan, &chan->data_q, skb, flags);
4433
4434         queue_work(hdev->workqueue, &hdev->tx_work);
4435 }
4436
4437 /* Send SCO data */
4438 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4439 {
4440         struct hci_dev *hdev = conn->hdev;
4441         struct hci_sco_hdr hdr;
4442
4443         BT_DBG("%s len %d", hdev->name, skb->len);
4444
4445         hdr.handle = cpu_to_le16(conn->handle);
4446         hdr.dlen   = skb->len;
4447
4448         skb_push(skb, HCI_SCO_HDR_SIZE);
4449         skb_reset_transport_header(skb);
4450         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4451
4452         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4453
4454         skb_queue_tail(&conn->data_q, skb);
4455         queue_work(hdev->workqueue, &hdev->tx_work);
4456 }
4457
4458 /* ---- HCI TX task (outgoing data) ---- */
4459
4460 /* HCI Connection scheduler */
4461 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4462                                      int *quote)
4463 {
4464         struct hci_conn_hash *h = &hdev->conn_hash;
4465         struct hci_conn *conn = NULL, *c;
4466         unsigned int num = 0, min = ~0;
4467
4468         /* We don't have to lock device here. Connections are always
4469          * added and removed with TX task disabled. */
4470
4471         rcu_read_lock();
4472
4473         list_for_each_entry_rcu(c, &h->list, list) {
4474                 if (c->type != type || skb_queue_empty(&c->data_q))
4475                         continue;
4476
4477                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4478                         continue;
4479
4480                 num++;
4481
4482                 if (c->sent < min) {
4483                         min  = c->sent;
4484                         conn = c;
4485                 }
4486
4487                 if (hci_conn_num(hdev, type) == num)
4488                         break;
4489         }
4490
4491         rcu_read_unlock();
4492
4493         if (conn) {
4494                 int cnt, q;
4495
4496                 switch (conn->type) {
4497                 case ACL_LINK:
4498                         cnt = hdev->acl_cnt;
4499                         break;
4500                 case SCO_LINK:
4501                 case ESCO_LINK:
4502                         cnt = hdev->sco_cnt;
4503                         break;
4504                 case LE_LINK:
4505                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4506                         break;
4507                 default:
4508                         cnt = 0;
4509                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4510                 }
4511
4512                 q = cnt / num;
4513                 *quote = q ? q : 1;
4514         } else
4515                 *quote = 0;
4516
4517         BT_DBG("conn %p quote %d", conn, *quote);
4518         return conn;
4519 }
4520
4521 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4522 {
4523         struct hci_conn_hash *h = &hdev->conn_hash;
4524         struct hci_conn *c;
4525
4526         bt_dev_err(hdev, "link tx timeout");
4527
4528         rcu_read_lock();
4529
4530         /* Kill stalled connections */
4531         list_for_each_entry_rcu(c, &h->list, list) {
4532                 if (c->type == type && c->sent) {
4533                         bt_dev_err(hdev, "killing stalled connection %pMR",
4534                                    &c->dst);
4535                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4536                 }
4537         }
4538
4539         rcu_read_unlock();
4540 }
4541
4542 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4543                                       int *quote)
4544 {
4545         struct hci_conn_hash *h = &hdev->conn_hash;
4546         struct hci_chan *chan = NULL;
4547         unsigned int num = 0, min = ~0, cur_prio = 0;
4548         struct hci_conn *conn;
4549         int cnt, q, conn_num = 0;
4550
4551         BT_DBG("%s", hdev->name);
4552
4553         rcu_read_lock();
4554
4555         list_for_each_entry_rcu(conn, &h->list, list) {
4556                 struct hci_chan *tmp;
4557
4558                 if (conn->type != type)
4559                         continue;
4560
4561                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4562                         continue;
4563
4564                 conn_num++;
4565
4566                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4567                         struct sk_buff *skb;
4568
4569                         if (skb_queue_empty(&tmp->data_q))
4570                                 continue;
4571
4572                         skb = skb_peek(&tmp->data_q);
4573                         if (skb->priority < cur_prio)
4574                                 continue;
4575
4576                         if (skb->priority > cur_prio) {
4577                                 num = 0;
4578                                 min = ~0;
4579                                 cur_prio = skb->priority;
4580                         }
4581
4582                         num++;
4583
4584                         if (conn->sent < min) {
4585                                 min  = conn->sent;
4586                                 chan = tmp;
4587                         }
4588                 }
4589
4590                 if (hci_conn_num(hdev, type) == conn_num)
4591                         break;
4592         }
4593
4594         rcu_read_unlock();
4595
4596         if (!chan)
4597                 return NULL;
4598
4599         switch (chan->conn->type) {
4600         case ACL_LINK:
4601                 cnt = hdev->acl_cnt;
4602                 break;
4603         case AMP_LINK:
4604                 cnt = hdev->block_cnt;
4605                 break;
4606         case SCO_LINK:
4607         case ESCO_LINK:
4608                 cnt = hdev->sco_cnt;
4609                 break;
4610         case LE_LINK:
4611                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4612                 break;
4613         default:
4614                 cnt = 0;
4615                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4616         }
4617
4618         q = cnt / num;
4619         *quote = q ? q : 1;
4620         BT_DBG("chan %p quote %d", chan, *quote);
4621         return chan;
4622 }
4623
4624 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4625 {
4626         struct hci_conn_hash *h = &hdev->conn_hash;
4627         struct hci_conn *conn;
4628         int num = 0;
4629
4630         BT_DBG("%s", hdev->name);
4631
4632         rcu_read_lock();
4633
4634         list_for_each_entry_rcu(conn, &h->list, list) {
4635                 struct hci_chan *chan;
4636
4637                 if (conn->type != type)
4638                         continue;
4639
4640                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4641                         continue;
4642
4643                 num++;
4644
4645                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4646                         struct sk_buff *skb;
4647
4648                         if (chan->sent) {
4649                                 chan->sent = 0;
4650                                 continue;
4651                         }
4652
4653                         if (skb_queue_empty(&chan->data_q))
4654                                 continue;
4655
4656                         skb = skb_peek(&chan->data_q);
4657                         if (skb->priority >= HCI_PRIO_MAX - 1)
4658                                 continue;
4659
4660                         skb->priority = HCI_PRIO_MAX - 1;
4661
4662                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4663                                skb->priority);
4664                 }
4665
4666                 if (hci_conn_num(hdev, type) == num)
4667                         break;
4668         }
4669
4670         rcu_read_unlock();
4671
4672 }
4673
4674 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4675 {
4676         /* Calculate count of blocks used by this packet */
4677         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4678 }
4679
4680 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4681 {
4682         unsigned long last_tx;
4683
4684         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4685                 return;
4686
4687         switch (type) {
4688         case LE_LINK:
4689                 last_tx = hdev->le_last_tx;
4690                 break;
4691         default:
4692                 last_tx = hdev->acl_last_tx;
4693                 break;
4694         }
4695
4696         /* tx timeout must be longer than maximum link supervision timeout
4697          * (40.9 seconds)
4698          */
4699         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4700                 hci_link_tx_to(hdev, type);
4701 }
4702
4703 /* Schedule SCO */
4704 static void hci_sched_sco(struct hci_dev *hdev)
4705 {
4706         struct hci_conn *conn;
4707         struct sk_buff *skb;
4708         int quote;
4709
4710         BT_DBG("%s", hdev->name);
4711
4712         if (!hci_conn_num(hdev, SCO_LINK))
4713                 return;
4714
4715         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4716                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4717                         BT_DBG("skb %p len %d", skb, skb->len);
4718                         hci_send_frame(hdev, skb);
4719
4720                         conn->sent++;
4721                         if (conn->sent == ~0)
4722                                 conn->sent = 0;
4723                 }
4724         }
4725 }
4726
4727 static void hci_sched_esco(struct hci_dev *hdev)
4728 {
4729         struct hci_conn *conn;
4730         struct sk_buff *skb;
4731         int quote;
4732
4733         BT_DBG("%s", hdev->name);
4734
4735         if (!hci_conn_num(hdev, ESCO_LINK))
4736                 return;
4737
4738         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4739                                                      &quote))) {
4740                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4741                         BT_DBG("skb %p len %d", skb, skb->len);
4742                         hci_send_frame(hdev, skb);
4743
4744                         conn->sent++;
4745                         if (conn->sent == ~0)
4746                                 conn->sent = 0;
4747                 }
4748         }
4749 }
4750
4751 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4752 {
4753         unsigned int cnt = hdev->acl_cnt;
4754         struct hci_chan *chan;
4755         struct sk_buff *skb;
4756         int quote;
4757
4758         __check_timeout(hdev, cnt, ACL_LINK);
4759
4760         while (hdev->acl_cnt &&
4761                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4762                 u32 priority = (skb_peek(&chan->data_q))->priority;
4763                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4764                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4765                                skb->len, skb->priority);
4766
4767                         /* Stop if priority has changed */
4768                         if (skb->priority < priority)
4769                                 break;
4770
4771                         skb = skb_dequeue(&chan->data_q);
4772
4773                         hci_conn_enter_active_mode(chan->conn,
4774                                                    bt_cb(skb)->force_active);
4775
4776                         hci_send_frame(hdev, skb);
4777                         hdev->acl_last_tx = jiffies;
4778
4779                         hdev->acl_cnt--;
4780                         chan->sent++;
4781                         chan->conn->sent++;
4782
4783                         /* Send pending SCO packets right away */
4784                         hci_sched_sco(hdev);
4785                         hci_sched_esco(hdev);
4786                 }
4787         }
4788
4789         if (cnt != hdev->acl_cnt)
4790                 hci_prio_recalculate(hdev, ACL_LINK);
4791 }
4792
4793 static void hci_sched_acl_blk(struct hci_dev *hdev)
4794 {
4795         unsigned int cnt = hdev->block_cnt;
4796         struct hci_chan *chan;
4797         struct sk_buff *skb;
4798         int quote;
4799         u8 type;
4800
4801         BT_DBG("%s", hdev->name);
4802
4803         if (hdev->dev_type == HCI_AMP)
4804                 type = AMP_LINK;
4805         else
4806                 type = ACL_LINK;
4807
4808         __check_timeout(hdev, cnt, type);
4809
4810         while (hdev->block_cnt > 0 &&
4811                (chan = hci_chan_sent(hdev, type, &quote))) {
4812                 u32 priority = (skb_peek(&chan->data_q))->priority;
4813                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4814                         int blocks;
4815
4816                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4817                                skb->len, skb->priority);
4818
4819                         /* Stop if priority has changed */
4820                         if (skb->priority < priority)
4821                                 break;
4822
4823                         skb = skb_dequeue(&chan->data_q);
4824
4825                         blocks = __get_blocks(hdev, skb);
4826                         if (blocks > hdev->block_cnt)
4827                                 return;
4828
4829                         hci_conn_enter_active_mode(chan->conn,
4830                                                    bt_cb(skb)->force_active);
4831
4832                         hci_send_frame(hdev, skb);
4833                         hdev->acl_last_tx = jiffies;
4834
4835                         hdev->block_cnt -= blocks;
4836                         quote -= blocks;
4837
4838                         chan->sent += blocks;
4839                         chan->conn->sent += blocks;
4840                 }
4841         }
4842
4843         if (cnt != hdev->block_cnt)
4844                 hci_prio_recalculate(hdev, type);
4845 }
4846
4847 static void hci_sched_acl(struct hci_dev *hdev)
4848 {
4849         BT_DBG("%s", hdev->name);
4850
4851         /* No ACL link over BR/EDR controller */
4852         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4853                 return;
4854
4855         /* No AMP link over AMP controller */
4856         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4857                 return;
4858
4859         switch (hdev->flow_ctl_mode) {
4860         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4861                 hci_sched_acl_pkt(hdev);
4862                 break;
4863
4864         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4865                 hci_sched_acl_blk(hdev);
4866                 break;
4867         }
4868 }
4869
4870 static void hci_sched_le(struct hci_dev *hdev)
4871 {
4872         struct hci_chan *chan;
4873         struct sk_buff *skb;
4874         int quote, cnt, tmp;
4875
4876         BT_DBG("%s", hdev->name);
4877
4878         if (!hci_conn_num(hdev, LE_LINK))
4879                 return;
4880
4881         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4882
4883         __check_timeout(hdev, cnt, LE_LINK);
4884
4885         tmp = cnt;
4886         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4887                 u32 priority = (skb_peek(&chan->data_q))->priority;
4888                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4889                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4890                                skb->len, skb->priority);
4891
4892                         /* Stop if priority has changed */
4893                         if (skb->priority < priority)
4894                                 break;
4895
4896                         skb = skb_dequeue(&chan->data_q);
4897
4898                         hci_send_frame(hdev, skb);
4899                         hdev->le_last_tx = jiffies;
4900
4901                         cnt--;
4902                         chan->sent++;
4903                         chan->conn->sent++;
4904
4905                         /* Send pending SCO packets right away */
4906                         hci_sched_sco(hdev);
4907                         hci_sched_esco(hdev);
4908                 }
4909         }
4910
4911         if (hdev->le_pkts)
4912                 hdev->le_cnt = cnt;
4913         else
4914                 hdev->acl_cnt = cnt;
4915
4916         if (cnt != tmp)
4917                 hci_prio_recalculate(hdev, LE_LINK);
4918 }
4919
4920 static void hci_tx_work(struct work_struct *work)
4921 {
4922         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4923         struct sk_buff *skb;
4924
4925         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4926                hdev->sco_cnt, hdev->le_cnt);
4927
4928         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4929                 /* Schedule queues and send stuff to HCI driver */
4930                 hci_sched_sco(hdev);
4931                 hci_sched_esco(hdev);
4932                 hci_sched_acl(hdev);
4933                 hci_sched_le(hdev);
4934         }
4935
4936         /* Send next queued raw (unknown type) packet */
4937         while ((skb = skb_dequeue(&hdev->raw_q)))
4938                 hci_send_frame(hdev, skb);
4939 }
4940
4941 /* ----- HCI RX task (incoming data processing) ----- */
4942
4943 /* ACL data packet */
4944 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4945 {
4946         struct hci_acl_hdr *hdr = (void *) skb->data;
4947         struct hci_conn *conn;
4948         __u16 handle, flags;
4949
4950         skb_pull(skb, HCI_ACL_HDR_SIZE);
4951
4952         handle = __le16_to_cpu(hdr->handle);
4953         flags  = hci_flags(handle);
4954         handle = hci_handle(handle);
4955
4956         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4957                handle, flags);
4958
4959         hdev->stat.acl_rx++;
4960
4961         hci_dev_lock(hdev);
4962         conn = hci_conn_hash_lookup_handle(hdev, handle);
4963         hci_dev_unlock(hdev);
4964
4965         if (conn) {
4966                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4967
4968                 /* Send to upper protocol */
4969                 l2cap_recv_acldata(conn, skb, flags);
4970                 return;
4971         } else {
4972                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4973                            handle);
4974         }
4975
4976         kfree_skb(skb);
4977 }
4978
4979 /* SCO data packet */
4980 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4981 {
4982         struct hci_sco_hdr *hdr = (void *) skb->data;
4983         struct hci_conn *conn;
4984         __u16 handle, flags;
4985
4986         skb_pull(skb, HCI_SCO_HDR_SIZE);
4987
4988         handle = __le16_to_cpu(hdr->handle);
4989         flags  = hci_flags(handle);
4990         handle = hci_handle(handle);
4991
4992         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4993                handle, flags);
4994
4995         hdev->stat.sco_rx++;
4996
4997         hci_dev_lock(hdev);
4998         conn = hci_conn_hash_lookup_handle(hdev, handle);
4999         hci_dev_unlock(hdev);
5000
5001         if (conn) {
5002                 /* Send to upper protocol */
5003                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
5004                 sco_recv_scodata(conn, skb);
5005                 return;
5006         } else {
5007                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
5008                            handle);
5009         }
5010
5011         kfree_skb(skb);
5012 }
5013
5014 static bool hci_req_is_complete(struct hci_dev *hdev)
5015 {
5016         struct sk_buff *skb;
5017
5018         skb = skb_peek(&hdev->cmd_q);
5019         if (!skb)
5020                 return true;
5021
5022         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
5023 }
5024
5025 static void hci_resend_last(struct hci_dev *hdev)
5026 {
5027         struct hci_command_hdr *sent;
5028         struct sk_buff *skb;
5029         u16 opcode;
5030
5031         if (!hdev->sent_cmd)
5032                 return;
5033
5034         sent = (void *) hdev->sent_cmd->data;
5035         opcode = __le16_to_cpu(sent->opcode);
5036         if (opcode == HCI_OP_RESET)
5037                 return;
5038
5039         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5040         if (!skb)
5041                 return;
5042
5043         skb_queue_head(&hdev->cmd_q, skb);
5044         queue_work(hdev->workqueue, &hdev->cmd_work);
5045 }
5046
5047 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5048                           hci_req_complete_t *req_complete,
5049                           hci_req_complete_skb_t *req_complete_skb)
5050 {
5051         struct sk_buff *skb;
5052         unsigned long flags;
5053
5054         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5055
5056         /* If the completed command doesn't match the last one that was
5057          * sent we need to do special handling of it.
5058          */
5059         if (!hci_sent_cmd_data(hdev, opcode)) {
5060                 /* Some CSR based controllers generate a spontaneous
5061                  * reset complete event during init and any pending
5062                  * command will never be completed. In such a case we
5063                  * need to resend whatever was the last sent
5064                  * command.
5065                  */
5066                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5067                         hci_resend_last(hdev);
5068
5069                 return;
5070         }
5071
5072         /* If we reach this point this event matches the last command sent */
5073         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5074
5075         /* If the command succeeded and there's still more commands in
5076          * this request the request is not yet complete.
5077          */
5078         if (!status && !hci_req_is_complete(hdev))
5079                 return;
5080
5081         /* If this was the last command in a request the complete
5082          * callback would be found in hdev->sent_cmd instead of the
5083          * command queue (hdev->cmd_q).
5084          */
5085         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5086                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5087                 return;
5088         }
5089
5090         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5091                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5092                 return;
5093         }
5094
5095         /* Remove all pending commands belonging to this request */
5096         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5097         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5098                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5099                         __skb_queue_head(&hdev->cmd_q, skb);
5100                         break;
5101                 }
5102
5103                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5104                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5105                 else
5106                         *req_complete = bt_cb(skb)->hci.req_complete;
5107                 dev_kfree_skb_irq(skb);
5108         }
5109         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5110 }
5111
5112 static void hci_rx_work(struct work_struct *work)
5113 {
5114         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5115         struct sk_buff *skb;
5116
5117         BT_DBG("%s", hdev->name);
5118
5119         while ((skb = skb_dequeue(&hdev->rx_q))) {
5120                 /* Send copy to monitor */
5121                 hci_send_to_monitor(hdev, skb);
5122
5123                 if (atomic_read(&hdev->promisc)) {
5124                         /* Send copy to the sockets */
5125                         hci_send_to_sock(hdev, skb);
5126                 }
5127
5128                 /* If the device has been opened in HCI_USER_CHANNEL,
5129                  * the userspace has exclusive access to device.
5130                  * When device is HCI_INIT, we still need to process
5131                  * the data packets to the driver in order
5132                  * to complete its setup().
5133                  */
5134                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5135                     !test_bit(HCI_INIT, &hdev->flags)) {
5136                         kfree_skb(skb);
5137                         continue;
5138                 }
5139
5140                 if (test_bit(HCI_INIT, &hdev->flags)) {
5141                         /* Don't process data packets in this states. */
5142                         switch (hci_skb_pkt_type(skb)) {
5143                         case HCI_ACLDATA_PKT:
5144                         case HCI_SCODATA_PKT:
5145                         case HCI_ISODATA_PKT:
5146                                 kfree_skb(skb);
5147                                 continue;
5148                         }
5149                 }
5150
5151                 /* Process frame */
5152                 switch (hci_skb_pkt_type(skb)) {
5153                 case HCI_EVENT_PKT:
5154                         BT_DBG("%s Event packet", hdev->name);
5155                         hci_event_packet(hdev, skb);
5156                         break;
5157
5158                 case HCI_ACLDATA_PKT:
5159                         BT_DBG("%s ACL data packet", hdev->name);
5160                         hci_acldata_packet(hdev, skb);
5161                         break;
5162
5163                 case HCI_SCODATA_PKT:
5164                         BT_DBG("%s SCO data packet", hdev->name);
5165                         hci_scodata_packet(hdev, skb);
5166                         break;
5167
5168                 default:
5169                         kfree_skb(skb);
5170                         break;
5171                 }
5172         }
5173 }
5174
5175 static void hci_cmd_work(struct work_struct *work)
5176 {
5177         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5178         struct sk_buff *skb;
5179
5180         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5181                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5182
5183         /* Send queued commands */
5184         if (atomic_read(&hdev->cmd_cnt)) {
5185                 skb = skb_dequeue(&hdev->cmd_q);
5186                 if (!skb)
5187                         return;
5188
5189                 kfree_skb(hdev->sent_cmd);
5190
5191                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5192                 if (hdev->sent_cmd) {
5193                         if (hci_req_status_pend(hdev))
5194                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5195                         atomic_dec(&hdev->cmd_cnt);
5196                         hci_send_frame(hdev, skb);
5197                         if (test_bit(HCI_RESET, &hdev->flags))
5198                                 cancel_delayed_work(&hdev->cmd_timer);
5199                         else
5200                                 schedule_delayed_work(&hdev->cmd_timer,
5201                                                       HCI_CMD_TIMEOUT);
5202                 } else {
5203                         skb_queue_head(&hdev->cmd_q, skb);
5204                         queue_work(hdev->workqueue, &hdev->cmd_work);
5205                 }
5206         }
5207 }