GNU Linux-libre 4.14.251-gnu1
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         char buf[32];
80         size_t buf_size = min(count, (sizeof(buf)-1));
81         bool enable;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         char buf[32];
139         size_t buf_size = min(count, (sizeof(buf)-1));
140         bool enable;
141         int err;
142
143         if (copy_from_user(buf, user_buf, buf_size))
144                 return -EFAULT;
145
146         buf[buf_size] = '\0';
147         if (strtobool(buf, &enable))
148                 return -EINVAL;
149
150         /* When the diagnostic flags are not persistent and the transport
151          * is not active or in user channel operation, then there is no need
152          * for the vendor callback. Instead just store the desired value and
153          * the setting will be programmed when the controller gets powered on.
154          */
155         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
156             (!test_bit(HCI_RUNNING, &hdev->flags) ||
157              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
158                 goto done;
159
160         hci_req_sync_lock(hdev);
161         err = hdev->set_diag(hdev, enable);
162         hci_req_sync_unlock(hdev);
163
164         if (err < 0)
165                 return err;
166
167 done:
168         if (enable)
169                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170         else
171                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173         return count;
174 }
175
176 static const struct file_operations vendor_diag_fops = {
177         .open           = simple_open,
178         .read           = vendor_diag_read,
179         .write          = vendor_diag_write,
180         .llseek         = default_llseek,
181 };
182
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186                             &dut_mode_fops);
187
188         if (hdev->set_diag)
189                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190                                     &vendor_diag_fops);
191 }
192
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195         BT_DBG("%s %ld", req->hdev->name, opt);
196
197         /* Reset device */
198         set_bit(HCI_RESET, &req->hdev->flags);
199         hci_req_add(req, HCI_OP_RESET, 0, NULL);
200         return 0;
201 }
202
203 static void bredr_init(struct hci_request *req)
204 {
205         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207         /* Read Local Supported Features */
208         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Local Version */
211         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213         /* Read BD Address */
214         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216
217 static void amp_init1(struct hci_request *req)
218 {
219         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221         /* Read Local Version */
222         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Local Supported Commands */
225         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227         /* Read Local AMP Info */
228         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230         /* Read Data Blk size */
231         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233         /* Read Flow Control Mode */
234         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236         /* Read Location Data */
237         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239
240 static int amp_init2(struct hci_request *req)
241 {
242         /* Read Local Supported Features. Not all AMP controllers
243          * support this so it's placed conditionally in the second
244          * stage init.
245          */
246         if (req->hdev->commands[14] & 0x20)
247                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249         return 0;
250 }
251
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254         struct hci_dev *hdev = req->hdev;
255
256         BT_DBG("%s %ld", hdev->name, opt);
257
258         /* Reset */
259         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260                 hci_reset_req(req, 0);
261
262         switch (hdev->dev_type) {
263         case HCI_PRIMARY:
264                 bredr_init(req);
265                 break;
266         case HCI_AMP:
267                 amp_init1(req);
268                 break;
269         default:
270                 BT_ERR("Unknown device type %d", hdev->dev_type);
271                 break;
272         }
273
274         return 0;
275 }
276
277 static void bredr_setup(struct hci_request *req)
278 {
279         __le16 param;
280         __u8 flt_type;
281
282         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
284
285         /* Read Class of Device */
286         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
287
288         /* Read Local Name */
289         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
290
291         /* Read Voice Setting */
292         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
293
294         /* Read Number of Supported IAC */
295         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
297         /* Read Current IAC LAP */
298         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
300         /* Clear Event Filters */
301         flt_type = HCI_FLT_CLEAR_ALL;
302         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
303
304         /* Connection accept timeout ~20 secs */
305         param = cpu_to_le16(0x7d00);
306         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
307 }
308
309 static void le_setup(struct hci_request *req)
310 {
311         struct hci_dev *hdev = req->hdev;
312
313         /* Read LE Buffer Size */
314         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
315
316         /* Read LE Local Supported Features */
317         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
318
319         /* Read LE Supported States */
320         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
322         /* LE-only controllers have LE implicitly enabled */
323         if (!lmp_bredr_capable(hdev))
324                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
325 }
326
327 static void hci_setup_event_mask(struct hci_request *req)
328 {
329         struct hci_dev *hdev = req->hdev;
330
331         /* The second byte is 0xff instead of 0x9f (two reserved bits
332          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333          * command otherwise.
334          */
335         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338          * any event mask for pre 1.2 devices.
339          */
340         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341                 return;
342
343         if (lmp_bredr_capable(hdev)) {
344                 events[4] |= 0x01; /* Flow Specification Complete */
345         } else {
346                 /* Use a different default for LE-only devices */
347                 memset(events, 0, sizeof(events));
348                 events[1] |= 0x20; /* Command Complete */
349                 events[1] |= 0x40; /* Command Status */
350                 events[1] |= 0x80; /* Hardware Error */
351
352                 /* If the controller supports the Disconnect command, enable
353                  * the corresponding event. In addition enable packet flow
354                  * control related events.
355                  */
356                 if (hdev->commands[0] & 0x20) {
357                         events[0] |= 0x10; /* Disconnection Complete */
358                         events[2] |= 0x04; /* Number of Completed Packets */
359                         events[3] |= 0x02; /* Data Buffer Overflow */
360                 }
361
362                 /* If the controller supports the Read Remote Version
363                  * Information command, enable the corresponding event.
364                  */
365                 if (hdev->commands[2] & 0x80)
366                         events[1] |= 0x08; /* Read Remote Version Information
367                                             * Complete
368                                             */
369
370                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371                         events[0] |= 0x80; /* Encryption Change */
372                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
373                 }
374         }
375
376         if (lmp_inq_rssi_capable(hdev) ||
377             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378                 events[4] |= 0x02; /* Inquiry Result with RSSI */
379
380         if (lmp_ext_feat_capable(hdev))
381                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
382
383         if (lmp_esco_capable(hdev)) {
384                 events[5] |= 0x08; /* Synchronous Connection Complete */
385                 events[5] |= 0x10; /* Synchronous Connection Changed */
386         }
387
388         if (lmp_sniffsubr_capable(hdev))
389                 events[5] |= 0x20; /* Sniff Subrating */
390
391         if (lmp_pause_enc_capable(hdev))
392                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
393
394         if (lmp_ext_inq_capable(hdev))
395                 events[5] |= 0x40; /* Extended Inquiry Result */
396
397         if (lmp_no_flush_capable(hdev))
398                 events[7] |= 0x01; /* Enhanced Flush Complete */
399
400         if (lmp_lsto_capable(hdev))
401                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
402
403         if (lmp_ssp_capable(hdev)) {
404                 events[6] |= 0x01;      /* IO Capability Request */
405                 events[6] |= 0x02;      /* IO Capability Response */
406                 events[6] |= 0x04;      /* User Confirmation Request */
407                 events[6] |= 0x08;      /* User Passkey Request */
408                 events[6] |= 0x10;      /* Remote OOB Data Request */
409                 events[6] |= 0x20;      /* Simple Pairing Complete */
410                 events[7] |= 0x04;      /* User Passkey Notification */
411                 events[7] |= 0x08;      /* Keypress Notification */
412                 events[7] |= 0x10;      /* Remote Host Supported
413                                          * Features Notification
414                                          */
415         }
416
417         if (lmp_le_capable(hdev))
418                 events[7] |= 0x20;      /* LE Meta-Event */
419
420         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
421 }
422
423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
424 {
425         struct hci_dev *hdev = req->hdev;
426
427         if (hdev->dev_type == HCI_AMP)
428                 return amp_init2(req);
429
430         if (lmp_bredr_capable(hdev))
431                 bredr_setup(req);
432         else
433                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
434
435         if (lmp_le_capable(hdev))
436                 le_setup(req);
437
438         /* All Bluetooth 1.2 and later controllers should support the
439          * HCI command for reading the local supported commands.
440          *
441          * Unfortunately some controllers indicate Bluetooth 1.2 support,
442          * but do not have support for this command. If that is the case,
443          * the driver can quirk the behavior and skip reading the local
444          * supported commands.
445          */
446         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
449
450         if (lmp_ssp_capable(hdev)) {
451                 /* When SSP is available, then the host features page
452                  * should also be available as well. However some
453                  * controllers list the max_page as 0 as long as SSP
454                  * has not been enabled. To achieve proper debugging
455                  * output, force the minimum max_page to 1 at least.
456                  */
457                 hdev->max_page = 0x01;
458
459                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
460                         u8 mode = 0x01;
461
462                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463                                     sizeof(mode), &mode);
464                 } else {
465                         struct hci_cp_write_eir cp;
466
467                         memset(hdev->eir, 0, sizeof(hdev->eir));
468                         memset(&cp, 0, sizeof(cp));
469
470                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
471                 }
472         }
473
474         if (lmp_inq_rssi_capable(hdev) ||
475             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
476                 u8 mode;
477
478                 /* If Extended Inquiry Result events are supported, then
479                  * they are clearly preferred over Inquiry Result with RSSI
480                  * events.
481                  */
482                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485         }
486
487         if (lmp_inq_tx_pwr_capable(hdev))
488                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
489
490         if (lmp_ext_feat_capable(hdev)) {
491                 struct hci_cp_read_local_ext_features cp;
492
493                 cp.page = 0x01;
494                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495                             sizeof(cp), &cp);
496         }
497
498         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
499                 u8 enable = 1;
500                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501                             &enable);
502         }
503
504         return 0;
505 }
506
507 static void hci_setup_link_policy(struct hci_request *req)
508 {
509         struct hci_dev *hdev = req->hdev;
510         struct hci_cp_write_def_link_policy cp;
511         u16 link_policy = 0;
512
513         if (lmp_rswitch_capable(hdev))
514                 link_policy |= HCI_LP_RSWITCH;
515         if (lmp_hold_capable(hdev))
516                 link_policy |= HCI_LP_HOLD;
517         if (lmp_sniff_capable(hdev))
518                 link_policy |= HCI_LP_SNIFF;
519         if (lmp_park_capable(hdev))
520                 link_policy |= HCI_LP_PARK;
521
522         cp.policy = cpu_to_le16(link_policy);
523         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
524 }
525
526 static void hci_set_le_support(struct hci_request *req)
527 {
528         struct hci_dev *hdev = req->hdev;
529         struct hci_cp_write_le_host_supported cp;
530
531         /* LE-only devices do not support explicit enablement */
532         if (!lmp_bredr_capable(hdev))
533                 return;
534
535         memset(&cp, 0, sizeof(cp));
536
537         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
538                 cp.le = 0x01;
539                 cp.simul = 0x00;
540         }
541
542         if (cp.le != lmp_host_le_capable(hdev))
543                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544                             &cp);
545 }
546
547 static void hci_set_event_mask_page_2(struct hci_request *req)
548 {
549         struct hci_dev *hdev = req->hdev;
550         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551         bool changed = false;
552
553         /* If Connectionless Slave Broadcast master role is supported
554          * enable all necessary events for it.
555          */
556         if (lmp_csb_master_capable(hdev)) {
557                 events[1] |= 0x40;      /* Triggered Clock Capture */
558                 events[1] |= 0x80;      /* Synchronization Train Complete */
559                 events[2] |= 0x10;      /* Slave Page Response Timeout */
560                 events[2] |= 0x20;      /* CSB Channel Map Change */
561                 changed = true;
562         }
563
564         /* If Connectionless Slave Broadcast slave role is supported
565          * enable all necessary events for it.
566          */
567         if (lmp_csb_slave_capable(hdev)) {
568                 events[2] |= 0x01;      /* Synchronization Train Received */
569                 events[2] |= 0x02;      /* CSB Receive */
570                 events[2] |= 0x04;      /* CSB Timeout */
571                 events[2] |= 0x08;      /* Truncated Page Complete */
572                 changed = true;
573         }
574
575         /* Enable Authenticated Payload Timeout Expired event if supported */
576         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
577                 events[2] |= 0x80;
578                 changed = true;
579         }
580
581         /* Some Broadcom based controllers indicate support for Set Event
582          * Mask Page 2 command, but then actually do not support it. Since
583          * the default value is all bits set to zero, the command is only
584          * required if the event mask has to be changed. In case no change
585          * to the event mask is needed, skip this command.
586          */
587         if (changed)
588                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
589                             sizeof(events), events);
590 }
591
592 static int hci_init3_req(struct hci_request *req, unsigned long opt)
593 {
594         struct hci_dev *hdev = req->hdev;
595         u8 p;
596
597         hci_setup_event_mask(req);
598
599         if (hdev->commands[6] & 0x20 &&
600             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
601                 struct hci_cp_read_stored_link_key cp;
602
603                 bacpy(&cp.bdaddr, BDADDR_ANY);
604                 cp.read_all = 0x01;
605                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
606         }
607
608         if (hdev->commands[5] & 0x10)
609                 hci_setup_link_policy(req);
610
611         if (hdev->commands[8] & 0x01)
612                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
613
614         /* Some older Broadcom based Bluetooth 1.2 controllers do not
615          * support the Read Page Scan Type command. Check support for
616          * this command in the bit mask of supported commands.
617          */
618         if (hdev->commands[13] & 0x01)
619                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
620
621         if (lmp_le_capable(hdev)) {
622                 u8 events[8];
623
624                 memset(events, 0, sizeof(events));
625
626                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
627                         events[0] |= 0x10;      /* LE Long Term Key Request */
628
629                 /* If controller supports the Connection Parameters Request
630                  * Link Layer Procedure, enable the corresponding event.
631                  */
632                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
633                         events[0] |= 0x20;      /* LE Remote Connection
634                                                  * Parameter Request
635                                                  */
636
637                 /* If the controller supports the Data Length Extension
638                  * feature, enable the corresponding event.
639                  */
640                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
641                         events[0] |= 0x40;      /* LE Data Length Change */
642
643                 /* If the controller supports Extended Scanner Filter
644                  * Policies, enable the correspondig event.
645                  */
646                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
647                         events[1] |= 0x04;      /* LE Direct Advertising
648                                                  * Report
649                                                  */
650
651                 /* If the controller supports Channel Selection Algorithm #2
652                  * feature, enable the corresponding event.
653                  */
654                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
655                         events[2] |= 0x08;      /* LE Channel Selection
656                                                  * Algorithm
657                                                  */
658
659                 /* If the controller supports the LE Set Scan Enable command,
660                  * enable the corresponding advertising report event.
661                  */
662                 if (hdev->commands[26] & 0x08)
663                         events[0] |= 0x02;      /* LE Advertising Report */
664
665                 /* If the controller supports the LE Create Connection
666                  * command, enable the corresponding event.
667                  */
668                 if (hdev->commands[26] & 0x10)
669                         events[0] |= 0x01;      /* LE Connection Complete */
670
671                 /* If the controller supports the LE Connection Update
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[27] & 0x04)
675                         events[0] |= 0x04;      /* LE Connection Update
676                                                  * Complete
677                                                  */
678
679                 /* If the controller supports the LE Read Remote Used Features
680                  * command, enable the corresponding event.
681                  */
682                 if (hdev->commands[27] & 0x20)
683                         events[0] |= 0x08;      /* LE Read Remote Used
684                                                  * Features Complete
685                                                  */
686
687                 /* If the controller supports the LE Read Local P-256
688                  * Public Key command, enable the corresponding event.
689                  */
690                 if (hdev->commands[34] & 0x02)
691                         events[0] |= 0x80;      /* LE Read Local P-256
692                                                  * Public Key Complete
693                                                  */
694
695                 /* If the controller supports the LE Generate DHKey
696                  * command, enable the corresponding event.
697                  */
698                 if (hdev->commands[34] & 0x04)
699                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
700
701                 /* If the controller supports the LE Set Default PHY or
702                  * LE Set PHY commands, enable the corresponding event.
703                  */
704                 if (hdev->commands[35] & (0x20 | 0x40))
705                         events[1] |= 0x08;        /* LE PHY Update Complete */
706
707                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
708                             events);
709
710                 if (hdev->commands[25] & 0x40) {
711                         /* Read LE Advertising Channel TX Power */
712                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
713                 }
714
715                 if (hdev->commands[26] & 0x40) {
716                         /* Read LE White List Size */
717                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
718                                     0, NULL);
719                 }
720
721                 if (hdev->commands[26] & 0x80) {
722                         /* Clear LE White List */
723                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
724                 }
725
726                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
727                         /* Read LE Maximum Data Length */
728                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
729
730                         /* Read LE Suggested Default Data Length */
731                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
732                 }
733
734                 hci_set_le_support(req);
735         }
736
737         /* Read features beyond page 1 if available */
738         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
739                 struct hci_cp_read_local_ext_features cp;
740
741                 cp.page = p;
742                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
743                             sizeof(cp), &cp);
744         }
745
746         return 0;
747 }
748
749 static int hci_init4_req(struct hci_request *req, unsigned long opt)
750 {
751         struct hci_dev *hdev = req->hdev;
752
753         /* Some Broadcom based Bluetooth controllers do not support the
754          * Delete Stored Link Key command. They are clearly indicating its
755          * absence in the bit mask of supported commands.
756          *
757          * Check the supported commands and only if the the command is marked
758          * as supported send it. If not supported assume that the controller
759          * does not have actual support for stored link keys which makes this
760          * command redundant anyway.
761          *
762          * Some controllers indicate that they support handling deleting
763          * stored link keys, but they don't. The quirk lets a driver
764          * just disable this command.
765          */
766         if (hdev->commands[6] & 0x80 &&
767             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
768                 struct hci_cp_delete_stored_link_key cp;
769
770                 bacpy(&cp.bdaddr, BDADDR_ANY);
771                 cp.delete_all = 0x01;
772                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
773                             sizeof(cp), &cp);
774         }
775
776         /* Set event mask page 2 if the HCI command for it is supported */
777         if (hdev->commands[22] & 0x04)
778                 hci_set_event_mask_page_2(req);
779
780         /* Read local codec list if the HCI command is supported */
781         if (hdev->commands[29] & 0x20)
782                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
783
784         /* Get MWS transport configuration if the HCI command is supported */
785         if (hdev->commands[30] & 0x08)
786                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
787
788         /* Check for Synchronization Train support */
789         if (lmp_sync_train_capable(hdev))
790                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
791
792         /* Enable Secure Connections if supported and configured */
793         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
794             bredr_sc_enabled(hdev)) {
795                 u8 support = 0x01;
796
797                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
798                             sizeof(support), &support);
799         }
800
801         /* Set Suggested Default Data Length to maximum if supported */
802         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
803                 struct hci_cp_le_write_def_data_len cp;
804
805                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
806                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
807                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
808         }
809
810         /* Set Default PHY parameters if command is supported */
811         if (hdev->commands[35] & 0x20) {
812                 struct hci_cp_le_set_default_phy cp;
813
814                 /* No transmitter PHY or receiver PHY preferences */
815                 cp.all_phys = 0x03;
816                 cp.tx_phys = 0;
817                 cp.rx_phys = 0;
818
819                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
820         }
821
822         return 0;
823 }
824
825 static int __hci_init(struct hci_dev *hdev)
826 {
827         int err;
828
829         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
830         if (err < 0)
831                 return err;
832
833         if (hci_dev_test_flag(hdev, HCI_SETUP))
834                 hci_debugfs_create_basic(hdev);
835
836         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
837         if (err < 0)
838                 return err;
839
840         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
841          * BR/EDR/LE type controllers. AMP controllers only need the
842          * first two stages of init.
843          */
844         if (hdev->dev_type != HCI_PRIMARY)
845                 return 0;
846
847         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
848         if (err < 0)
849                 return err;
850
851         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
852         if (err < 0)
853                 return err;
854
855         /* This function is only called when the controller is actually in
856          * configured state. When the controller is marked as unconfigured,
857          * this initialization procedure is not run.
858          *
859          * It means that it is possible that a controller runs through its
860          * setup phase and then discovers missing settings. If that is the
861          * case, then this function will not be called. It then will only
862          * be called during the config phase.
863          *
864          * So only when in setup phase or config phase, create the debugfs
865          * entries and register the SMP channels.
866          */
867         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
868             !hci_dev_test_flag(hdev, HCI_CONFIG))
869                 return 0;
870
871         hci_debugfs_create_common(hdev);
872
873         if (lmp_bredr_capable(hdev))
874                 hci_debugfs_create_bredr(hdev);
875
876         if (lmp_le_capable(hdev))
877                 hci_debugfs_create_le(hdev);
878
879         return 0;
880 }
881
882 static int hci_init0_req(struct hci_request *req, unsigned long opt)
883 {
884         struct hci_dev *hdev = req->hdev;
885
886         BT_DBG("%s %ld", hdev->name, opt);
887
888         /* Reset */
889         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
890                 hci_reset_req(req, 0);
891
892         /* Read Local Version */
893         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
894
895         /* Read BD Address */
896         if (hdev->set_bdaddr)
897                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
898
899         return 0;
900 }
901
902 static int __hci_unconf_init(struct hci_dev *hdev)
903 {
904         int err;
905
906         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
907                 return 0;
908
909         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
910         if (err < 0)
911                 return err;
912
913         if (hci_dev_test_flag(hdev, HCI_SETUP))
914                 hci_debugfs_create_basic(hdev);
915
916         return 0;
917 }
918
919 static int hci_scan_req(struct hci_request *req, unsigned long opt)
920 {
921         __u8 scan = opt;
922
923         BT_DBG("%s %x", req->hdev->name, scan);
924
925         /* Inquiry and Page scans */
926         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
927         return 0;
928 }
929
930 static int hci_auth_req(struct hci_request *req, unsigned long opt)
931 {
932         __u8 auth = opt;
933
934         BT_DBG("%s %x", req->hdev->name, auth);
935
936         /* Authentication */
937         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
938         return 0;
939 }
940
941 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
942 {
943         __u8 encrypt = opt;
944
945         BT_DBG("%s %x", req->hdev->name, encrypt);
946
947         /* Encryption */
948         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
949         return 0;
950 }
951
952 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
953 {
954         __le16 policy = cpu_to_le16(opt);
955
956         BT_DBG("%s %x", req->hdev->name, policy);
957
958         /* Default link policy */
959         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
960         return 0;
961 }
962
963 /* Get HCI device by index.
964  * Device is held on return. */
965 struct hci_dev *hci_dev_get(int index)
966 {
967         struct hci_dev *hdev = NULL, *d;
968
969         BT_DBG("%d", index);
970
971         if (index < 0)
972                 return NULL;
973
974         read_lock(&hci_dev_list_lock);
975         list_for_each_entry(d, &hci_dev_list, list) {
976                 if (d->id == index) {
977                         hdev = hci_dev_hold(d);
978                         break;
979                 }
980         }
981         read_unlock(&hci_dev_list_lock);
982         return hdev;
983 }
984
985 /* ---- Inquiry support ---- */
986
987 bool hci_discovery_active(struct hci_dev *hdev)
988 {
989         struct discovery_state *discov = &hdev->discovery;
990
991         switch (discov->state) {
992         case DISCOVERY_FINDING:
993         case DISCOVERY_RESOLVING:
994                 return true;
995
996         default:
997                 return false;
998         }
999 }
1000
1001 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1002 {
1003         int old_state = hdev->discovery.state;
1004
1005         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1006
1007         if (old_state == state)
1008                 return;
1009
1010         hdev->discovery.state = state;
1011
1012         switch (state) {
1013         case DISCOVERY_STOPPED:
1014                 hci_update_background_scan(hdev);
1015
1016                 if (old_state != DISCOVERY_STARTING)
1017                         mgmt_discovering(hdev, 0);
1018                 break;
1019         case DISCOVERY_STARTING:
1020                 break;
1021         case DISCOVERY_FINDING:
1022                 mgmt_discovering(hdev, 1);
1023                 break;
1024         case DISCOVERY_RESOLVING:
1025                 break;
1026         case DISCOVERY_STOPPING:
1027                 break;
1028         }
1029 }
1030
1031 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1032 {
1033         struct discovery_state *cache = &hdev->discovery;
1034         struct inquiry_entry *p, *n;
1035
1036         list_for_each_entry_safe(p, n, &cache->all, all) {
1037                 list_del(&p->all);
1038                 kfree(p);
1039         }
1040
1041         INIT_LIST_HEAD(&cache->unknown);
1042         INIT_LIST_HEAD(&cache->resolve);
1043 }
1044
1045 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1046                                                bdaddr_t *bdaddr)
1047 {
1048         struct discovery_state *cache = &hdev->discovery;
1049         struct inquiry_entry *e;
1050
1051         BT_DBG("cache %p, %pMR", cache, bdaddr);
1052
1053         list_for_each_entry(e, &cache->all, all) {
1054                 if (!bacmp(&e->data.bdaddr, bdaddr))
1055                         return e;
1056         }
1057
1058         return NULL;
1059 }
1060
1061 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1062                                                        bdaddr_t *bdaddr)
1063 {
1064         struct discovery_state *cache = &hdev->discovery;
1065         struct inquiry_entry *e;
1066
1067         BT_DBG("cache %p, %pMR", cache, bdaddr);
1068
1069         list_for_each_entry(e, &cache->unknown, list) {
1070                 if (!bacmp(&e->data.bdaddr, bdaddr))
1071                         return e;
1072         }
1073
1074         return NULL;
1075 }
1076
1077 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1078                                                        bdaddr_t *bdaddr,
1079                                                        int state)
1080 {
1081         struct discovery_state *cache = &hdev->discovery;
1082         struct inquiry_entry *e;
1083
1084         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1085
1086         list_for_each_entry(e, &cache->resolve, list) {
1087                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1088                         return e;
1089                 if (!bacmp(&e->data.bdaddr, bdaddr))
1090                         return e;
1091         }
1092
1093         return NULL;
1094 }
1095
1096 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1097                                       struct inquiry_entry *ie)
1098 {
1099         struct discovery_state *cache = &hdev->discovery;
1100         struct list_head *pos = &cache->resolve;
1101         struct inquiry_entry *p;
1102
1103         list_del(&ie->list);
1104
1105         list_for_each_entry(p, &cache->resolve, list) {
1106                 if (p->name_state != NAME_PENDING &&
1107                     abs(p->data.rssi) >= abs(ie->data.rssi))
1108                         break;
1109                 pos = &p->list;
1110         }
1111
1112         list_add(&ie->list, pos);
1113 }
1114
1115 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1116                              bool name_known)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *ie;
1120         u32 flags = 0;
1121
1122         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1123
1124         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1125
1126         if (!data->ssp_mode)
1127                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1128
1129         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1130         if (ie) {
1131                 if (!ie->data.ssp_mode)
1132                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1133
1134                 if (ie->name_state == NAME_NEEDED &&
1135                     data->rssi != ie->data.rssi) {
1136                         ie->data.rssi = data->rssi;
1137                         hci_inquiry_cache_update_resolve(hdev, ie);
1138                 }
1139
1140                 goto update;
1141         }
1142
1143         /* Entry not in the cache. Add new one. */
1144         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1145         if (!ie) {
1146                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1147                 goto done;
1148         }
1149
1150         list_add(&ie->all, &cache->all);
1151
1152         if (name_known) {
1153                 ie->name_state = NAME_KNOWN;
1154         } else {
1155                 ie->name_state = NAME_NOT_KNOWN;
1156                 list_add(&ie->list, &cache->unknown);
1157         }
1158
1159 update:
1160         if (name_known && ie->name_state != NAME_KNOWN &&
1161             ie->name_state != NAME_PENDING) {
1162                 ie->name_state = NAME_KNOWN;
1163                 list_del(&ie->list);
1164         }
1165
1166         memcpy(&ie->data, data, sizeof(*data));
1167         ie->timestamp = jiffies;
1168         cache->timestamp = jiffies;
1169
1170         if (ie->name_state == NAME_NOT_KNOWN)
1171                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1172
1173 done:
1174         return flags;
1175 }
1176
1177 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1178 {
1179         struct discovery_state *cache = &hdev->discovery;
1180         struct inquiry_info *info = (struct inquiry_info *) buf;
1181         struct inquiry_entry *e;
1182         int copied = 0;
1183
1184         list_for_each_entry(e, &cache->all, all) {
1185                 struct inquiry_data *data = &e->data;
1186
1187                 if (copied >= num)
1188                         break;
1189
1190                 bacpy(&info->bdaddr, &data->bdaddr);
1191                 info->pscan_rep_mode    = data->pscan_rep_mode;
1192                 info->pscan_period_mode = data->pscan_period_mode;
1193                 info->pscan_mode        = data->pscan_mode;
1194                 memcpy(info->dev_class, data->dev_class, 3);
1195                 info->clock_offset      = data->clock_offset;
1196
1197                 info++;
1198                 copied++;
1199         }
1200
1201         BT_DBG("cache %p, copied %d", cache, copied);
1202         return copied;
1203 }
1204
1205 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1206 {
1207         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1208         struct hci_dev *hdev = req->hdev;
1209         struct hci_cp_inquiry cp;
1210
1211         BT_DBG("%s", hdev->name);
1212
1213         if (test_bit(HCI_INQUIRY, &hdev->flags))
1214                 return 0;
1215
1216         /* Start Inquiry */
1217         memcpy(&cp.lap, &ir->lap, 3);
1218         cp.length  = ir->length;
1219         cp.num_rsp = ir->num_rsp;
1220         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1221
1222         return 0;
1223 }
1224
1225 int hci_inquiry(void __user *arg)
1226 {
1227         __u8 __user *ptr = arg;
1228         struct hci_inquiry_req ir;
1229         struct hci_dev *hdev;
1230         int err = 0, do_inquiry = 0, max_rsp;
1231         long timeo;
1232         __u8 *buf;
1233
1234         if (copy_from_user(&ir, ptr, sizeof(ir)))
1235                 return -EFAULT;
1236
1237         hdev = hci_dev_get(ir.dev_id);
1238         if (!hdev)
1239                 return -ENODEV;
1240
1241         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1242                 err = -EBUSY;
1243                 goto done;
1244         }
1245
1246         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1247                 err = -EOPNOTSUPP;
1248                 goto done;
1249         }
1250
1251         if (hdev->dev_type != HCI_PRIMARY) {
1252                 err = -EOPNOTSUPP;
1253                 goto done;
1254         }
1255
1256         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1257                 err = -EOPNOTSUPP;
1258                 goto done;
1259         }
1260
1261         /* Restrict maximum inquiry length to 60 seconds */
1262         if (ir.length > 60) {
1263                 err = -EINVAL;
1264                 goto done;
1265         }
1266
1267         hci_dev_lock(hdev);
1268         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1269             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1270                 hci_inquiry_cache_flush(hdev);
1271                 do_inquiry = 1;
1272         }
1273         hci_dev_unlock(hdev);
1274
1275         timeo = ir.length * msecs_to_jiffies(2000);
1276
1277         if (do_inquiry) {
1278                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1279                                    timeo, NULL);
1280                 if (err < 0)
1281                         goto done;
1282
1283                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1284                  * cleared). If it is interrupted by a signal, return -EINTR.
1285                  */
1286                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1287                                 TASK_INTERRUPTIBLE)) {
1288                         err = -EINTR;
1289                         goto done;
1290                 }
1291         }
1292
1293         /* for unlimited number of responses we will use buffer with
1294          * 255 entries
1295          */
1296         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1297
1298         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1299          * copy it to the user space.
1300          */
1301         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1302         if (!buf) {
1303                 err = -ENOMEM;
1304                 goto done;
1305         }
1306
1307         hci_dev_lock(hdev);
1308         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1309         hci_dev_unlock(hdev);
1310
1311         BT_DBG("num_rsp %d", ir.num_rsp);
1312
1313         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1314                 ptr += sizeof(ir);
1315                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1316                                  ir.num_rsp))
1317                         err = -EFAULT;
1318         } else
1319                 err = -EFAULT;
1320
1321         kfree(buf);
1322
1323 done:
1324         hci_dev_put(hdev);
1325         return err;
1326 }
1327
1328 static int hci_dev_do_open(struct hci_dev *hdev)
1329 {
1330         int ret = 0;
1331
1332         BT_DBG("%s %p", hdev->name, hdev);
1333
1334         hci_req_sync_lock(hdev);
1335
1336         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1337                 ret = -ENODEV;
1338                 goto done;
1339         }
1340
1341         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1342             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1343                 /* Check for rfkill but allow the HCI setup stage to
1344                  * proceed (which in itself doesn't cause any RF activity).
1345                  */
1346                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1347                         ret = -ERFKILL;
1348                         goto done;
1349                 }
1350
1351                 /* Check for valid public address or a configured static
1352                  * random adddress, but let the HCI setup proceed to
1353                  * be able to determine if there is a public address
1354                  * or not.
1355                  *
1356                  * In case of user channel usage, it is not important
1357                  * if a public address or static random address is
1358                  * available.
1359                  *
1360                  * This check is only valid for BR/EDR controllers
1361                  * since AMP controllers do not have an address.
1362                  */
1363                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1364                     hdev->dev_type == HCI_PRIMARY &&
1365                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1366                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1367                         ret = -EADDRNOTAVAIL;
1368                         goto done;
1369                 }
1370         }
1371
1372         if (test_bit(HCI_UP, &hdev->flags)) {
1373                 ret = -EALREADY;
1374                 goto done;
1375         }
1376
1377         if (hdev->open(hdev)) {
1378                 ret = -EIO;
1379                 goto done;
1380         }
1381
1382         set_bit(HCI_RUNNING, &hdev->flags);
1383         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1384
1385         atomic_set(&hdev->cmd_cnt, 1);
1386         set_bit(HCI_INIT, &hdev->flags);
1387
1388         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1389                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1390
1391                 if (hdev->setup)
1392                         ret = hdev->setup(hdev);
1393
1394                 /* The transport driver can set these quirks before
1395                  * creating the HCI device or in its setup callback.
1396                  *
1397                  * In case any of them is set, the controller has to
1398                  * start up as unconfigured.
1399                  */
1400                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1403
1404                 /* For an unconfigured controller it is required to
1405                  * read at least the version information provided by
1406                  * the Read Local Version Information command.
1407                  *
1408                  * If the set_bdaddr driver callback is provided, then
1409                  * also the original Bluetooth public device address
1410                  * will be read using the Read BD Address command.
1411                  */
1412                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413                         ret = __hci_unconf_init(hdev);
1414         }
1415
1416         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417                 /* If public address change is configured, ensure that
1418                  * the address gets programmed. If the driver does not
1419                  * support changing the public address, fail the power
1420                  * on procedure.
1421                  */
1422                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423                     hdev->set_bdaddr)
1424                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425                 else
1426                         ret = -EADDRNOTAVAIL;
1427         }
1428
1429         if (!ret) {
1430                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1432                         ret = __hci_init(hdev);
1433                         if (!ret && hdev->post_init)
1434                                 ret = hdev->post_init(hdev);
1435                 }
1436         }
1437
1438         /* If the HCI Reset command is clearing all diagnostic settings,
1439          * then they need to be reprogrammed after the init procedure
1440          * completed.
1441          */
1442         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1443             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1444             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1445                 ret = hdev->set_diag(hdev, true);
1446
1447         clear_bit(HCI_INIT, &hdev->flags);
1448
1449         if (!ret) {
1450                 hci_dev_hold(hdev);
1451                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1452                 set_bit(HCI_UP, &hdev->flags);
1453                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1454                 hci_leds_update_powered(hdev, true);
1455                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1456                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1457                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1458                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1459                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1460                     hdev->dev_type == HCI_PRIMARY) {
1461                         ret = __hci_req_hci_power_on(hdev);
1462                         mgmt_power_on(hdev, ret);
1463                 }
1464         } else {
1465                 /* Init failed, cleanup */
1466                 flush_work(&hdev->tx_work);
1467
1468                 /* Since hci_rx_work() is possible to awake new cmd_work
1469                  * it should be flushed first to avoid unexpected call of
1470                  * hci_cmd_work()
1471                  */
1472                 flush_work(&hdev->rx_work);
1473                 flush_work(&hdev->cmd_work);
1474
1475                 skb_queue_purge(&hdev->cmd_q);
1476                 skb_queue_purge(&hdev->rx_q);
1477
1478                 if (hdev->flush)
1479                         hdev->flush(hdev);
1480
1481                 if (hdev->sent_cmd) {
1482                         kfree_skb(hdev->sent_cmd);
1483                         hdev->sent_cmd = NULL;
1484                 }
1485
1486                 clear_bit(HCI_RUNNING, &hdev->flags);
1487                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1488
1489                 hdev->close(hdev);
1490                 hdev->flags &= BIT(HCI_RAW);
1491         }
1492
1493 done:
1494         hci_req_sync_unlock(hdev);
1495         return ret;
1496 }
1497
1498 /* ---- HCI ioctl helpers ---- */
1499
1500 int hci_dev_open(__u16 dev)
1501 {
1502         struct hci_dev *hdev;
1503         int err;
1504
1505         hdev = hci_dev_get(dev);
1506         if (!hdev)
1507                 return -ENODEV;
1508
1509         /* Devices that are marked as unconfigured can only be powered
1510          * up as user channel. Trying to bring them up as normal devices
1511          * will result into a failure. Only user channel operation is
1512          * possible.
1513          *
1514          * When this function is called for a user channel, the flag
1515          * HCI_USER_CHANNEL will be set first before attempting to
1516          * open the device.
1517          */
1518         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1519             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1520                 err = -EOPNOTSUPP;
1521                 goto done;
1522         }
1523
1524         /* We need to ensure that no other power on/off work is pending
1525          * before proceeding to call hci_dev_do_open. This is
1526          * particularly important if the setup procedure has not yet
1527          * completed.
1528          */
1529         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1530                 cancel_delayed_work(&hdev->power_off);
1531
1532         /* After this call it is guaranteed that the setup procedure
1533          * has finished. This means that error conditions like RFKILL
1534          * or no valid public or static random address apply.
1535          */
1536         flush_workqueue(hdev->req_workqueue);
1537
1538         /* For controllers not using the management interface and that
1539          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1540          * so that pairing works for them. Once the management interface
1541          * is in use this bit will be cleared again and userspace has
1542          * to explicitly enable it.
1543          */
1544         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1545             !hci_dev_test_flag(hdev, HCI_MGMT))
1546                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1547
1548         err = hci_dev_do_open(hdev);
1549
1550 done:
1551         hci_dev_put(hdev);
1552         return err;
1553 }
1554
1555 /* This function requires the caller holds hdev->lock */
1556 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1557 {
1558         struct hci_conn_params *p;
1559
1560         list_for_each_entry(p, &hdev->le_conn_params, list) {
1561                 if (p->conn) {
1562                         hci_conn_drop(p->conn);
1563                         hci_conn_put(p->conn);
1564                         p->conn = NULL;
1565                 }
1566                 list_del_init(&p->action);
1567         }
1568
1569         BT_DBG("All LE pending actions cleared");
1570 }
1571
1572 int hci_dev_do_close(struct hci_dev *hdev)
1573 {
1574         bool auto_off;
1575
1576         BT_DBG("%s %p", hdev->name, hdev);
1577
1578         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1579             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1580             test_bit(HCI_UP, &hdev->flags)) {
1581                 /* Execute vendor specific shutdown routine */
1582                 if (hdev->shutdown)
1583                         hdev->shutdown(hdev);
1584         }
1585
1586         cancel_delayed_work(&hdev->power_off);
1587
1588         hci_request_cancel_all(hdev);
1589         hci_req_sync_lock(hdev);
1590
1591         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1592             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1593             test_bit(HCI_UP, &hdev->flags)) {
1594                 /* Execute vendor specific shutdown routine */
1595                 if (hdev->shutdown)
1596                         hdev->shutdown(hdev);
1597         }
1598
1599         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1600                 cancel_delayed_work_sync(&hdev->cmd_timer);
1601                 hci_req_sync_unlock(hdev);
1602                 return 0;
1603         }
1604
1605         hci_leds_update_powered(hdev, false);
1606
1607         /* Flush RX and TX works */
1608         flush_work(&hdev->tx_work);
1609         flush_work(&hdev->rx_work);
1610
1611         if (hdev->discov_timeout > 0) {
1612                 hdev->discov_timeout = 0;
1613                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1614                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1615         }
1616
1617         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1618                 cancel_delayed_work(&hdev->service_cache);
1619
1620         if (hci_dev_test_flag(hdev, HCI_MGMT))
1621                 cancel_delayed_work_sync(&hdev->rpa_expired);
1622
1623         /* Avoid potential lockdep warnings from the *_flush() calls by
1624          * ensuring the workqueue is empty up front.
1625          */
1626         drain_workqueue(hdev->workqueue);
1627
1628         hci_dev_lock(hdev);
1629
1630         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1631
1632         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1633
1634         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1635             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1636             hci_dev_test_flag(hdev, HCI_MGMT))
1637                 __mgmt_power_off(hdev);
1638
1639         hci_inquiry_cache_flush(hdev);
1640         hci_pend_le_actions_clear(hdev);
1641         hci_conn_hash_flush(hdev);
1642         hci_dev_unlock(hdev);
1643
1644         smp_unregister(hdev);
1645
1646         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1647
1648         if (hdev->flush)
1649                 hdev->flush(hdev);
1650
1651         /* Reset device */
1652         skb_queue_purge(&hdev->cmd_q);
1653         atomic_set(&hdev->cmd_cnt, 1);
1654         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1655             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1656                 set_bit(HCI_INIT, &hdev->flags);
1657                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1658                 clear_bit(HCI_INIT, &hdev->flags);
1659         }
1660
1661         /* flush cmd  work */
1662         flush_work(&hdev->cmd_work);
1663
1664         /* Drop queues */
1665         skb_queue_purge(&hdev->rx_q);
1666         skb_queue_purge(&hdev->cmd_q);
1667         skb_queue_purge(&hdev->raw_q);
1668
1669         /* Drop last sent command */
1670         if (hdev->sent_cmd) {
1671                 cancel_delayed_work_sync(&hdev->cmd_timer);
1672                 kfree_skb(hdev->sent_cmd);
1673                 hdev->sent_cmd = NULL;
1674         }
1675
1676         clear_bit(HCI_RUNNING, &hdev->flags);
1677         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1678
1679         /* After this point our queues are empty
1680          * and no tasks are scheduled. */
1681         hdev->close(hdev);
1682
1683         /* Clear flags */
1684         hdev->flags &= BIT(HCI_RAW);
1685         hci_dev_clear_volatile_flags(hdev);
1686
1687         /* Controller radio is available but is currently powered down */
1688         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1689
1690         memset(hdev->eir, 0, sizeof(hdev->eir));
1691         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1692         bacpy(&hdev->random_addr, BDADDR_ANY);
1693
1694         hci_req_sync_unlock(hdev);
1695
1696         hci_dev_put(hdev);
1697         return 0;
1698 }
1699
1700 int hci_dev_close(__u16 dev)
1701 {
1702         struct hci_dev *hdev;
1703         int err;
1704
1705         hdev = hci_dev_get(dev);
1706         if (!hdev)
1707                 return -ENODEV;
1708
1709         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1710                 err = -EBUSY;
1711                 goto done;
1712         }
1713
1714         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1715                 cancel_delayed_work(&hdev->power_off);
1716
1717         err = hci_dev_do_close(hdev);
1718
1719 done:
1720         hci_dev_put(hdev);
1721         return err;
1722 }
1723
1724 static int hci_dev_do_reset(struct hci_dev *hdev)
1725 {
1726         int ret;
1727
1728         BT_DBG("%s %p", hdev->name, hdev);
1729
1730         hci_req_sync_lock(hdev);
1731
1732         /* Drop queues */
1733         skb_queue_purge(&hdev->rx_q);
1734         skb_queue_purge(&hdev->cmd_q);
1735
1736         /* Avoid potential lockdep warnings from the *_flush() calls by
1737          * ensuring the workqueue is empty up front.
1738          */
1739         drain_workqueue(hdev->workqueue);
1740
1741         hci_dev_lock(hdev);
1742         hci_inquiry_cache_flush(hdev);
1743         hci_conn_hash_flush(hdev);
1744         hci_dev_unlock(hdev);
1745
1746         if (hdev->flush)
1747                 hdev->flush(hdev);
1748
1749         atomic_set(&hdev->cmd_cnt, 1);
1750         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1751
1752         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1753
1754         hci_req_sync_unlock(hdev);
1755         return ret;
1756 }
1757
1758 int hci_dev_reset(__u16 dev)
1759 {
1760         struct hci_dev *hdev;
1761         int err;
1762
1763         hdev = hci_dev_get(dev);
1764         if (!hdev)
1765                 return -ENODEV;
1766
1767         if (!test_bit(HCI_UP, &hdev->flags)) {
1768                 err = -ENETDOWN;
1769                 goto done;
1770         }
1771
1772         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1773                 err = -EBUSY;
1774                 goto done;
1775         }
1776
1777         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1778                 err = -EOPNOTSUPP;
1779                 goto done;
1780         }
1781
1782         err = hci_dev_do_reset(hdev);
1783
1784 done:
1785         hci_dev_put(hdev);
1786         return err;
1787 }
1788
1789 int hci_dev_reset_stat(__u16 dev)
1790 {
1791         struct hci_dev *hdev;
1792         int ret = 0;
1793
1794         hdev = hci_dev_get(dev);
1795         if (!hdev)
1796                 return -ENODEV;
1797
1798         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1799                 ret = -EBUSY;
1800                 goto done;
1801         }
1802
1803         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1804                 ret = -EOPNOTSUPP;
1805                 goto done;
1806         }
1807
1808         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1809
1810 done:
1811         hci_dev_put(hdev);
1812         return ret;
1813 }
1814
1815 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1816 {
1817         bool conn_changed, discov_changed;
1818
1819         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1820
1821         if ((scan & SCAN_PAGE))
1822                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1823                                                           HCI_CONNECTABLE);
1824         else
1825                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1826                                                            HCI_CONNECTABLE);
1827
1828         if ((scan & SCAN_INQUIRY)) {
1829                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1830                                                             HCI_DISCOVERABLE);
1831         } else {
1832                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1833                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1834                                                              HCI_DISCOVERABLE);
1835         }
1836
1837         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1838                 return;
1839
1840         if (conn_changed || discov_changed) {
1841                 /* In case this was disabled through mgmt */
1842                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1843
1844                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1845                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1846
1847                 mgmt_new_settings(hdev);
1848         }
1849 }
1850
1851 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1852 {
1853         struct hci_dev *hdev;
1854         struct hci_dev_req dr;
1855         int err = 0;
1856
1857         if (copy_from_user(&dr, arg, sizeof(dr)))
1858                 return -EFAULT;
1859
1860         hdev = hci_dev_get(dr.dev_id);
1861         if (!hdev)
1862                 return -ENODEV;
1863
1864         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1865                 err = -EBUSY;
1866                 goto done;
1867         }
1868
1869         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1870                 err = -EOPNOTSUPP;
1871                 goto done;
1872         }
1873
1874         if (hdev->dev_type != HCI_PRIMARY) {
1875                 err = -EOPNOTSUPP;
1876                 goto done;
1877         }
1878
1879         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1880                 err = -EOPNOTSUPP;
1881                 goto done;
1882         }
1883
1884         switch (cmd) {
1885         case HCISETAUTH:
1886                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1887                                    HCI_INIT_TIMEOUT, NULL);
1888                 break;
1889
1890         case HCISETENCRYPT:
1891                 if (!lmp_encrypt_capable(hdev)) {
1892                         err = -EOPNOTSUPP;
1893                         break;
1894                 }
1895
1896                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1897                         /* Auth must be enabled first */
1898                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1899                                            HCI_INIT_TIMEOUT, NULL);
1900                         if (err)
1901                                 break;
1902                 }
1903
1904                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1905                                    HCI_INIT_TIMEOUT, NULL);
1906                 break;
1907
1908         case HCISETSCAN:
1909                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1910                                    HCI_INIT_TIMEOUT, NULL);
1911
1912                 /* Ensure that the connectable and discoverable states
1913                  * get correctly modified as this was a non-mgmt change.
1914                  */
1915                 if (!err)
1916                         hci_update_scan_state(hdev, dr.dev_opt);
1917                 break;
1918
1919         case HCISETLINKPOL:
1920                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1921                                    HCI_INIT_TIMEOUT, NULL);
1922                 break;
1923
1924         case HCISETLINKMODE:
1925                 hdev->link_mode = ((__u16) dr.dev_opt) &
1926                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1927                 break;
1928
1929         case HCISETPTYPE:
1930                 hdev->pkt_type = (__u16) dr.dev_opt;
1931                 break;
1932
1933         case HCISETACLMTU:
1934                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1935                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1936                 break;
1937
1938         case HCISETSCOMTU:
1939                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1940                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1941                 break;
1942
1943         default:
1944                 err = -EINVAL;
1945                 break;
1946         }
1947
1948 done:
1949         hci_dev_put(hdev);
1950         return err;
1951 }
1952
1953 int hci_get_dev_list(void __user *arg)
1954 {
1955         struct hci_dev *hdev;
1956         struct hci_dev_list_req *dl;
1957         struct hci_dev_req *dr;
1958         int n = 0, size, err;
1959         __u16 dev_num;
1960
1961         if (get_user(dev_num, (__u16 __user *) arg))
1962                 return -EFAULT;
1963
1964         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1965                 return -EINVAL;
1966
1967         size = sizeof(*dl) + dev_num * sizeof(*dr);
1968
1969         dl = kzalloc(size, GFP_KERNEL);
1970         if (!dl)
1971                 return -ENOMEM;
1972
1973         dr = dl->dev_req;
1974
1975         read_lock(&hci_dev_list_lock);
1976         list_for_each_entry(hdev, &hci_dev_list, list) {
1977                 unsigned long flags = hdev->flags;
1978
1979                 /* When the auto-off is configured it means the transport
1980                  * is running, but in that case still indicate that the
1981                  * device is actually down.
1982                  */
1983                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1984                         flags &= ~BIT(HCI_UP);
1985
1986                 (dr + n)->dev_id  = hdev->id;
1987                 (dr + n)->dev_opt = flags;
1988
1989                 if (++n >= dev_num)
1990                         break;
1991         }
1992         read_unlock(&hci_dev_list_lock);
1993
1994         dl->dev_num = n;
1995         size = sizeof(*dl) + n * sizeof(*dr);
1996
1997         err = copy_to_user(arg, dl, size);
1998         kfree(dl);
1999
2000         return err ? -EFAULT : 0;
2001 }
2002
2003 int hci_get_dev_info(void __user *arg)
2004 {
2005         struct hci_dev *hdev;
2006         struct hci_dev_info di;
2007         unsigned long flags;
2008         int err = 0;
2009
2010         if (copy_from_user(&di, arg, sizeof(di)))
2011                 return -EFAULT;
2012
2013         hdev = hci_dev_get(di.dev_id);
2014         if (!hdev)
2015                 return -ENODEV;
2016
2017         /* When the auto-off is configured it means the transport
2018          * is running, but in that case still indicate that the
2019          * device is actually down.
2020          */
2021         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2022                 flags = hdev->flags & ~BIT(HCI_UP);
2023         else
2024                 flags = hdev->flags;
2025
2026         strcpy(di.name, hdev->name);
2027         di.bdaddr   = hdev->bdaddr;
2028         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2029         di.flags    = flags;
2030         di.pkt_type = hdev->pkt_type;
2031         if (lmp_bredr_capable(hdev)) {
2032                 di.acl_mtu  = hdev->acl_mtu;
2033                 di.acl_pkts = hdev->acl_pkts;
2034                 di.sco_mtu  = hdev->sco_mtu;
2035                 di.sco_pkts = hdev->sco_pkts;
2036         } else {
2037                 di.acl_mtu  = hdev->le_mtu;
2038                 di.acl_pkts = hdev->le_pkts;
2039                 di.sco_mtu  = 0;
2040                 di.sco_pkts = 0;
2041         }
2042         di.link_policy = hdev->link_policy;
2043         di.link_mode   = hdev->link_mode;
2044
2045         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2046         memcpy(&di.features, &hdev->features, sizeof(di.features));
2047
2048         if (copy_to_user(arg, &di, sizeof(di)))
2049                 err = -EFAULT;
2050
2051         hci_dev_put(hdev);
2052
2053         return err;
2054 }
2055
2056 /* ---- Interface to HCI drivers ---- */
2057
2058 static int hci_rfkill_set_block(void *data, bool blocked)
2059 {
2060         struct hci_dev *hdev = data;
2061
2062         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2063
2064         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2065                 return -EBUSY;
2066
2067         if (blocked) {
2068                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2069                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2070                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2071                         hci_dev_do_close(hdev);
2072         } else {
2073                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2074         }
2075
2076         return 0;
2077 }
2078
2079 static const struct rfkill_ops hci_rfkill_ops = {
2080         .set_block = hci_rfkill_set_block,
2081 };
2082
2083 static void hci_power_on(struct work_struct *work)
2084 {
2085         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2086         int err;
2087
2088         BT_DBG("%s", hdev->name);
2089
2090         if (test_bit(HCI_UP, &hdev->flags) &&
2091             hci_dev_test_flag(hdev, HCI_MGMT) &&
2092             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2093                 cancel_delayed_work(&hdev->power_off);
2094                 hci_req_sync_lock(hdev);
2095                 err = __hci_req_hci_power_on(hdev);
2096                 hci_req_sync_unlock(hdev);
2097                 mgmt_power_on(hdev, err);
2098                 return;
2099         }
2100
2101         err = hci_dev_do_open(hdev);
2102         if (err < 0) {
2103                 hci_dev_lock(hdev);
2104                 mgmt_set_powered_failed(hdev, err);
2105                 hci_dev_unlock(hdev);
2106                 return;
2107         }
2108
2109         /* During the HCI setup phase, a few error conditions are
2110          * ignored and they need to be checked now. If they are still
2111          * valid, it is important to turn the device back off.
2112          */
2113         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2114             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2115             (hdev->dev_type == HCI_PRIMARY &&
2116              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2117              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2118                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2119                 hci_dev_do_close(hdev);
2120         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2121                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2122                                    HCI_AUTO_OFF_TIMEOUT);
2123         }
2124
2125         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2126                 /* For unconfigured devices, set the HCI_RAW flag
2127                  * so that userspace can easily identify them.
2128                  */
2129                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2130                         set_bit(HCI_RAW, &hdev->flags);
2131
2132                 /* For fully configured devices, this will send
2133                  * the Index Added event. For unconfigured devices,
2134                  * it will send Unconfigued Index Added event.
2135                  *
2136                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2137                  * and no event will be send.
2138                  */
2139                 mgmt_index_added(hdev);
2140         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2141                 /* When the controller is now configured, then it
2142                  * is important to clear the HCI_RAW flag.
2143                  */
2144                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2145                         clear_bit(HCI_RAW, &hdev->flags);
2146
2147                 /* Powering on the controller with HCI_CONFIG set only
2148                  * happens with the transition from unconfigured to
2149                  * configured. This will send the Index Added event.
2150                  */
2151                 mgmt_index_added(hdev);
2152         }
2153 }
2154
2155 static void hci_power_off(struct work_struct *work)
2156 {
2157         struct hci_dev *hdev = container_of(work, struct hci_dev,
2158                                             power_off.work);
2159
2160         BT_DBG("%s", hdev->name);
2161
2162         hci_dev_do_close(hdev);
2163 }
2164
2165 static void hci_error_reset(struct work_struct *work)
2166 {
2167         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2168
2169         BT_DBG("%s", hdev->name);
2170
2171         if (hdev->hw_error)
2172                 hdev->hw_error(hdev, hdev->hw_error_code);
2173         else
2174                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2175                        hdev->hw_error_code);
2176
2177         if (hci_dev_do_close(hdev))
2178                 return;
2179
2180         hci_dev_do_open(hdev);
2181 }
2182
2183 void hci_uuids_clear(struct hci_dev *hdev)
2184 {
2185         struct bt_uuid *uuid, *tmp;
2186
2187         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2188                 list_del(&uuid->list);
2189                 kfree(uuid);
2190         }
2191 }
2192
2193 void hci_link_keys_clear(struct hci_dev *hdev)
2194 {
2195         struct link_key *key;
2196
2197         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2198                 list_del_rcu(&key->list);
2199                 kfree_rcu(key, rcu);
2200         }
2201 }
2202
2203 void hci_smp_ltks_clear(struct hci_dev *hdev)
2204 {
2205         struct smp_ltk *k;
2206
2207         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2208                 list_del_rcu(&k->list);
2209                 kfree_rcu(k, rcu);
2210         }
2211 }
2212
2213 void hci_smp_irks_clear(struct hci_dev *hdev)
2214 {
2215         struct smp_irk *k;
2216
2217         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2218                 list_del_rcu(&k->list);
2219                 kfree_rcu(k, rcu);
2220         }
2221 }
2222
2223 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2224 {
2225         struct link_key *k;
2226
2227         rcu_read_lock();
2228         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2229                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2230                         rcu_read_unlock();
2231                         return k;
2232                 }
2233         }
2234         rcu_read_unlock();
2235
2236         return NULL;
2237 }
2238
2239 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2240                                u8 key_type, u8 old_key_type)
2241 {
2242         /* Legacy key */
2243         if (key_type < 0x03)
2244                 return true;
2245
2246         /* Debug keys are insecure so don't store them persistently */
2247         if (key_type == HCI_LK_DEBUG_COMBINATION)
2248                 return false;
2249
2250         /* Changed combination key and there's no previous one */
2251         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2252                 return false;
2253
2254         /* Security mode 3 case */
2255         if (!conn)
2256                 return true;
2257
2258         /* BR/EDR key derived using SC from an LE link */
2259         if (conn->type == LE_LINK)
2260                 return true;
2261
2262         /* Neither local nor remote side had no-bonding as requirement */
2263         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2264                 return true;
2265
2266         /* Local side had dedicated bonding as requirement */
2267         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2268                 return true;
2269
2270         /* Remote side had dedicated bonding as requirement */
2271         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2272                 return true;
2273
2274         /* If none of the above criteria match, then don't store the key
2275          * persistently */
2276         return false;
2277 }
2278
2279 static u8 ltk_role(u8 type)
2280 {
2281         if (type == SMP_LTK)
2282                 return HCI_ROLE_MASTER;
2283
2284         return HCI_ROLE_SLAVE;
2285 }
2286
2287 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2288                              u8 addr_type, u8 role)
2289 {
2290         struct smp_ltk *k;
2291
2292         rcu_read_lock();
2293         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2294                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2295                         continue;
2296
2297                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2298                         rcu_read_unlock();
2299                         return k;
2300                 }
2301         }
2302         rcu_read_unlock();
2303
2304         return NULL;
2305 }
2306
2307 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2308 {
2309         struct smp_irk *irk;
2310
2311         rcu_read_lock();
2312         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2313                 if (!bacmp(&irk->rpa, rpa)) {
2314                         rcu_read_unlock();
2315                         return irk;
2316                 }
2317         }
2318
2319         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2320                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2321                         bacpy(&irk->rpa, rpa);
2322                         rcu_read_unlock();
2323                         return irk;
2324                 }
2325         }
2326         rcu_read_unlock();
2327
2328         return NULL;
2329 }
2330
2331 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2332                                      u8 addr_type)
2333 {
2334         struct smp_irk *irk;
2335
2336         /* Identity Address must be public or static random */
2337         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2338                 return NULL;
2339
2340         rcu_read_lock();
2341         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2342                 if (addr_type == irk->addr_type &&
2343                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2344                         rcu_read_unlock();
2345                         return irk;
2346                 }
2347         }
2348         rcu_read_unlock();
2349
2350         return NULL;
2351 }
2352
2353 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2354                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2355                                   u8 pin_len, bool *persistent)
2356 {
2357         struct link_key *key, *old_key;
2358         u8 old_key_type;
2359
2360         old_key = hci_find_link_key(hdev, bdaddr);
2361         if (old_key) {
2362                 old_key_type = old_key->type;
2363                 key = old_key;
2364         } else {
2365                 old_key_type = conn ? conn->key_type : 0xff;
2366                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2367                 if (!key)
2368                         return NULL;
2369                 list_add_rcu(&key->list, &hdev->link_keys);
2370         }
2371
2372         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2373
2374         /* Some buggy controller combinations generate a changed
2375          * combination key for legacy pairing even when there's no
2376          * previous key */
2377         if (type == HCI_LK_CHANGED_COMBINATION &&
2378             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2379                 type = HCI_LK_COMBINATION;
2380                 if (conn)
2381                         conn->key_type = type;
2382         }
2383
2384         bacpy(&key->bdaddr, bdaddr);
2385         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2386         key->pin_len = pin_len;
2387
2388         if (type == HCI_LK_CHANGED_COMBINATION)
2389                 key->type = old_key_type;
2390         else
2391                 key->type = type;
2392
2393         if (persistent)
2394                 *persistent = hci_persistent_key(hdev, conn, type,
2395                                                  old_key_type);
2396
2397         return key;
2398 }
2399
2400 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401                             u8 addr_type, u8 type, u8 authenticated,
2402                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2403 {
2404         struct smp_ltk *key, *old_key;
2405         u8 role = ltk_role(type);
2406
2407         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2408         if (old_key)
2409                 key = old_key;
2410         else {
2411                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2412                 if (!key)
2413                         return NULL;
2414                 list_add_rcu(&key->list, &hdev->long_term_keys);
2415         }
2416
2417         bacpy(&key->bdaddr, bdaddr);
2418         key->bdaddr_type = addr_type;
2419         memcpy(key->val, tk, sizeof(key->val));
2420         key->authenticated = authenticated;
2421         key->ediv = ediv;
2422         key->rand = rand;
2423         key->enc_size = enc_size;
2424         key->type = type;
2425
2426         return key;
2427 }
2428
2429 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2430                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2431 {
2432         struct smp_irk *irk;
2433
2434         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2435         if (!irk) {
2436                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2437                 if (!irk)
2438                         return NULL;
2439
2440                 bacpy(&irk->bdaddr, bdaddr);
2441                 irk->addr_type = addr_type;
2442
2443                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2444         }
2445
2446         memcpy(irk->val, val, 16);
2447         bacpy(&irk->rpa, rpa);
2448
2449         return irk;
2450 }
2451
2452 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2453 {
2454         struct link_key *key;
2455
2456         key = hci_find_link_key(hdev, bdaddr);
2457         if (!key)
2458                 return -ENOENT;
2459
2460         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2461
2462         list_del_rcu(&key->list);
2463         kfree_rcu(key, rcu);
2464
2465         return 0;
2466 }
2467
2468 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2469 {
2470         struct smp_ltk *k;
2471         int removed = 0;
2472
2473         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2474                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2475                         continue;
2476
2477                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2478
2479                 list_del_rcu(&k->list);
2480                 kfree_rcu(k, rcu);
2481                 removed++;
2482         }
2483
2484         return removed ? 0 : -ENOENT;
2485 }
2486
2487 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2488 {
2489         struct smp_irk *k;
2490
2491         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2492                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2493                         continue;
2494
2495                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2496
2497                 list_del_rcu(&k->list);
2498                 kfree_rcu(k, rcu);
2499         }
2500 }
2501
2502 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2503 {
2504         struct smp_ltk *k;
2505         struct smp_irk *irk;
2506         u8 addr_type;
2507
2508         if (type == BDADDR_BREDR) {
2509                 if (hci_find_link_key(hdev, bdaddr))
2510                         return true;
2511                 return false;
2512         }
2513
2514         /* Convert to HCI addr type which struct smp_ltk uses */
2515         if (type == BDADDR_LE_PUBLIC)
2516                 addr_type = ADDR_LE_DEV_PUBLIC;
2517         else
2518                 addr_type = ADDR_LE_DEV_RANDOM;
2519
2520         irk = hci_get_irk(hdev, bdaddr, addr_type);
2521         if (irk) {
2522                 bdaddr = &irk->bdaddr;
2523                 addr_type = irk->addr_type;
2524         }
2525
2526         rcu_read_lock();
2527         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2528                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2529                         rcu_read_unlock();
2530                         return true;
2531                 }
2532         }
2533         rcu_read_unlock();
2534
2535         return false;
2536 }
2537
2538 /* HCI command timer function */
2539 static void hci_cmd_timeout(struct work_struct *work)
2540 {
2541         struct hci_dev *hdev = container_of(work, struct hci_dev,
2542                                             cmd_timer.work);
2543
2544         if (hdev->sent_cmd) {
2545                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2546                 u16 opcode = __le16_to_cpu(sent->opcode);
2547
2548                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2549         } else {
2550                 BT_ERR("%s command tx timeout", hdev->name);
2551         }
2552
2553         atomic_set(&hdev->cmd_cnt, 1);
2554         queue_work(hdev->workqueue, &hdev->cmd_work);
2555 }
2556
2557 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2558                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2559 {
2560         struct oob_data *data;
2561
2562         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2563                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2564                         continue;
2565                 if (data->bdaddr_type != bdaddr_type)
2566                         continue;
2567                 return data;
2568         }
2569
2570         return NULL;
2571 }
2572
2573 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2574                                u8 bdaddr_type)
2575 {
2576         struct oob_data *data;
2577
2578         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2579         if (!data)
2580                 return -ENOENT;
2581
2582         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2583
2584         list_del(&data->list);
2585         kfree(data);
2586
2587         return 0;
2588 }
2589
2590 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2591 {
2592         struct oob_data *data, *n;
2593
2594         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2595                 list_del(&data->list);
2596                 kfree(data);
2597         }
2598 }
2599
2600 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2601                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2602                             u8 *hash256, u8 *rand256)
2603 {
2604         struct oob_data *data;
2605
2606         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2607         if (!data) {
2608                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2609                 if (!data)
2610                         return -ENOMEM;
2611
2612                 bacpy(&data->bdaddr, bdaddr);
2613                 data->bdaddr_type = bdaddr_type;
2614                 list_add(&data->list, &hdev->remote_oob_data);
2615         }
2616
2617         if (hash192 && rand192) {
2618                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2619                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2620                 if (hash256 && rand256)
2621                         data->present = 0x03;
2622         } else {
2623                 memset(data->hash192, 0, sizeof(data->hash192));
2624                 memset(data->rand192, 0, sizeof(data->rand192));
2625                 if (hash256 && rand256)
2626                         data->present = 0x02;
2627                 else
2628                         data->present = 0x00;
2629         }
2630
2631         if (hash256 && rand256) {
2632                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2633                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2634         } else {
2635                 memset(data->hash256, 0, sizeof(data->hash256));
2636                 memset(data->rand256, 0, sizeof(data->rand256));
2637                 if (hash192 && rand192)
2638                         data->present = 0x01;
2639         }
2640
2641         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2642
2643         return 0;
2644 }
2645
2646 /* This function requires the caller holds hdev->lock */
2647 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2648 {
2649         struct adv_info *adv_instance;
2650
2651         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2652                 if (adv_instance->instance == instance)
2653                         return adv_instance;
2654         }
2655
2656         return NULL;
2657 }
2658
2659 /* This function requires the caller holds hdev->lock */
2660 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2661 {
2662         struct adv_info *cur_instance;
2663
2664         cur_instance = hci_find_adv_instance(hdev, instance);
2665         if (!cur_instance)
2666                 return NULL;
2667
2668         if (cur_instance == list_last_entry(&hdev->adv_instances,
2669                                             struct adv_info, list))
2670                 return list_first_entry(&hdev->adv_instances,
2671                                                  struct adv_info, list);
2672         else
2673                 return list_next_entry(cur_instance, list);
2674 }
2675
2676 /* This function requires the caller holds hdev->lock */
2677 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2678 {
2679         struct adv_info *adv_instance;
2680
2681         adv_instance = hci_find_adv_instance(hdev, instance);
2682         if (!adv_instance)
2683                 return -ENOENT;
2684
2685         BT_DBG("%s removing %dMR", hdev->name, instance);
2686
2687         if (hdev->cur_adv_instance == instance) {
2688                 if (hdev->adv_instance_timeout) {
2689                         cancel_delayed_work(&hdev->adv_instance_expire);
2690                         hdev->adv_instance_timeout = 0;
2691                 }
2692                 hdev->cur_adv_instance = 0x00;
2693         }
2694
2695         list_del(&adv_instance->list);
2696         kfree(adv_instance);
2697
2698         hdev->adv_instance_cnt--;
2699
2700         return 0;
2701 }
2702
2703 /* This function requires the caller holds hdev->lock */
2704 void hci_adv_instances_clear(struct hci_dev *hdev)
2705 {
2706         struct adv_info *adv_instance, *n;
2707
2708         if (hdev->adv_instance_timeout) {
2709                 cancel_delayed_work(&hdev->adv_instance_expire);
2710                 hdev->adv_instance_timeout = 0;
2711         }
2712
2713         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2714                 list_del(&adv_instance->list);
2715                 kfree(adv_instance);
2716         }
2717
2718         hdev->adv_instance_cnt = 0;
2719         hdev->cur_adv_instance = 0x00;
2720 }
2721
2722 /* This function requires the caller holds hdev->lock */
2723 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2724                          u16 adv_data_len, u8 *adv_data,
2725                          u16 scan_rsp_len, u8 *scan_rsp_data,
2726                          u16 timeout, u16 duration)
2727 {
2728         struct adv_info *adv_instance;
2729
2730         adv_instance = hci_find_adv_instance(hdev, instance);
2731         if (adv_instance) {
2732                 memset(adv_instance->adv_data, 0,
2733                        sizeof(adv_instance->adv_data));
2734                 memset(adv_instance->scan_rsp_data, 0,
2735                        sizeof(adv_instance->scan_rsp_data));
2736         } else {
2737                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2738                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2739                         return -EOVERFLOW;
2740
2741                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2742                 if (!adv_instance)
2743                         return -ENOMEM;
2744
2745                 adv_instance->pending = true;
2746                 adv_instance->instance = instance;
2747                 list_add(&adv_instance->list, &hdev->adv_instances);
2748                 hdev->adv_instance_cnt++;
2749         }
2750
2751         adv_instance->flags = flags;
2752         adv_instance->adv_data_len = adv_data_len;
2753         adv_instance->scan_rsp_len = scan_rsp_len;
2754
2755         if (adv_data_len)
2756                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2757
2758         if (scan_rsp_len)
2759                 memcpy(adv_instance->scan_rsp_data,
2760                        scan_rsp_data, scan_rsp_len);
2761
2762         adv_instance->timeout = timeout;
2763         adv_instance->remaining_time = timeout;
2764
2765         if (duration == 0)
2766                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2767         else
2768                 adv_instance->duration = duration;
2769
2770         BT_DBG("%s for %dMR", hdev->name, instance);
2771
2772         return 0;
2773 }
2774
2775 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2776                                          bdaddr_t *bdaddr, u8 type)
2777 {
2778         struct bdaddr_list *b;
2779
2780         list_for_each_entry(b, bdaddr_list, list) {
2781                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2782                         return b;
2783         }
2784
2785         return NULL;
2786 }
2787
2788 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2789 {
2790         struct bdaddr_list *b, *n;
2791
2792         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2793                 list_del(&b->list);
2794                 kfree(b);
2795         }
2796 }
2797
2798 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2799 {
2800         struct bdaddr_list *entry;
2801
2802         if (!bacmp(bdaddr, BDADDR_ANY))
2803                 return -EBADF;
2804
2805         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2806                 return -EEXIST;
2807
2808         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2809         if (!entry)
2810                 return -ENOMEM;
2811
2812         bacpy(&entry->bdaddr, bdaddr);
2813         entry->bdaddr_type = type;
2814
2815         list_add(&entry->list, list);
2816
2817         return 0;
2818 }
2819
2820 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2821 {
2822         struct bdaddr_list *entry;
2823
2824         if (!bacmp(bdaddr, BDADDR_ANY)) {
2825                 hci_bdaddr_list_clear(list);
2826                 return 0;
2827         }
2828
2829         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2830         if (!entry)
2831                 return -ENOENT;
2832
2833         list_del(&entry->list);
2834         kfree(entry);
2835
2836         return 0;
2837 }
2838
2839 /* This function requires the caller holds hdev->lock */
2840 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2841                                                bdaddr_t *addr, u8 addr_type)
2842 {
2843         struct hci_conn_params *params;
2844
2845         list_for_each_entry(params, &hdev->le_conn_params, list) {
2846                 if (bacmp(&params->addr, addr) == 0 &&
2847                     params->addr_type == addr_type) {
2848                         return params;
2849                 }
2850         }
2851
2852         return NULL;
2853 }
2854
2855 /* This function requires the caller holds hdev->lock */
2856 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2857                                                   bdaddr_t *addr, u8 addr_type)
2858 {
2859         struct hci_conn_params *param;
2860
2861         list_for_each_entry(param, list, action) {
2862                 if (bacmp(&param->addr, addr) == 0 &&
2863                     param->addr_type == addr_type)
2864                         return param;
2865         }
2866
2867         return NULL;
2868 }
2869
2870 /* This function requires the caller holds hdev->lock */
2871 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2872                                             bdaddr_t *addr, u8 addr_type)
2873 {
2874         struct hci_conn_params *params;
2875
2876         params = hci_conn_params_lookup(hdev, addr, addr_type);
2877         if (params)
2878                 return params;
2879
2880         params = kzalloc(sizeof(*params), GFP_KERNEL);
2881         if (!params) {
2882                 BT_ERR("Out of memory");
2883                 return NULL;
2884         }
2885
2886         bacpy(&params->addr, addr);
2887         params->addr_type = addr_type;
2888
2889         list_add(&params->list, &hdev->le_conn_params);
2890         INIT_LIST_HEAD(&params->action);
2891
2892         params->conn_min_interval = hdev->le_conn_min_interval;
2893         params->conn_max_interval = hdev->le_conn_max_interval;
2894         params->conn_latency = hdev->le_conn_latency;
2895         params->supervision_timeout = hdev->le_supv_timeout;
2896         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2897
2898         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2899
2900         return params;
2901 }
2902
2903 static void hci_conn_params_free(struct hci_conn_params *params)
2904 {
2905         if (params->conn) {
2906                 hci_conn_drop(params->conn);
2907                 hci_conn_put(params->conn);
2908         }
2909
2910         list_del(&params->action);
2911         list_del(&params->list);
2912         kfree(params);
2913 }
2914
2915 /* This function requires the caller holds hdev->lock */
2916 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2917 {
2918         struct hci_conn_params *params;
2919
2920         params = hci_conn_params_lookup(hdev, addr, addr_type);
2921         if (!params)
2922                 return;
2923
2924         hci_conn_params_free(params);
2925
2926         hci_update_background_scan(hdev);
2927
2928         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2929 }
2930
2931 /* This function requires the caller holds hdev->lock */
2932 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2933 {
2934         struct hci_conn_params *params, *tmp;
2935
2936         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2937                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2938                         continue;
2939
2940                 /* If trying to estabilish one time connection to disabled
2941                  * device, leave the params, but mark them as just once.
2942                  */
2943                 if (params->explicit_connect) {
2944                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2945                         continue;
2946                 }
2947
2948                 list_del(&params->list);
2949                 kfree(params);
2950         }
2951
2952         BT_DBG("All LE disabled connection parameters were removed");
2953 }
2954
2955 /* This function requires the caller holds hdev->lock */
2956 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2957 {
2958         struct hci_conn_params *params, *tmp;
2959
2960         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2961                 hci_conn_params_free(params);
2962
2963         BT_DBG("All LE connection parameters were removed");
2964 }
2965
2966 /* Copy the Identity Address of the controller.
2967  *
2968  * If the controller has a public BD_ADDR, then by default use that one.
2969  * If this is a LE only controller without a public address, default to
2970  * the static random address.
2971  *
2972  * For debugging purposes it is possible to force controllers with a
2973  * public address to use the static random address instead.
2974  *
2975  * In case BR/EDR has been disabled on a dual-mode controller and
2976  * userspace has configured a static address, then that address
2977  * becomes the identity address instead of the public BR/EDR address.
2978  */
2979 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2980                                u8 *bdaddr_type)
2981 {
2982         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2983             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2984             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2985              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2986                 bacpy(bdaddr, &hdev->static_addr);
2987                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2988         } else {
2989                 bacpy(bdaddr, &hdev->bdaddr);
2990                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2991         }
2992 }
2993
2994 /* Alloc HCI device */
2995 struct hci_dev *hci_alloc_dev(void)
2996 {
2997         struct hci_dev *hdev;
2998
2999         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3000         if (!hdev)
3001                 return NULL;
3002
3003         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3004         hdev->esco_type = (ESCO_HV1);
3005         hdev->link_mode = (HCI_LM_ACCEPT);
3006         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3007         hdev->io_capability = 0x03;     /* No Input No Output */
3008         hdev->manufacturer = 0xffff;    /* Default to internal use */
3009         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3010         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3011         hdev->adv_instance_cnt = 0;
3012         hdev->cur_adv_instance = 0x00;
3013         hdev->adv_instance_timeout = 0;
3014
3015         hdev->sniff_max_interval = 800;
3016         hdev->sniff_min_interval = 80;
3017
3018         hdev->le_adv_channel_map = 0x07;
3019         hdev->le_adv_min_interval = 0x0800;
3020         hdev->le_adv_max_interval = 0x0800;
3021         hdev->le_scan_interval = 0x0060;
3022         hdev->le_scan_window = 0x0030;
3023         hdev->le_conn_min_interval = 0x0018;
3024         hdev->le_conn_max_interval = 0x0028;
3025         hdev->le_conn_latency = 0x0000;
3026         hdev->le_supv_timeout = 0x002a;
3027         hdev->le_def_tx_len = 0x001b;
3028         hdev->le_def_tx_time = 0x0148;
3029         hdev->le_max_tx_len = 0x001b;
3030         hdev->le_max_tx_time = 0x0148;
3031         hdev->le_max_rx_len = 0x001b;
3032         hdev->le_max_rx_time = 0x0148;
3033
3034         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3035         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3036         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3037         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3038
3039         mutex_init(&hdev->lock);
3040         mutex_init(&hdev->req_lock);
3041
3042         INIT_LIST_HEAD(&hdev->mgmt_pending);
3043         INIT_LIST_HEAD(&hdev->blacklist);
3044         INIT_LIST_HEAD(&hdev->whitelist);
3045         INIT_LIST_HEAD(&hdev->uuids);
3046         INIT_LIST_HEAD(&hdev->link_keys);
3047         INIT_LIST_HEAD(&hdev->long_term_keys);
3048         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3049         INIT_LIST_HEAD(&hdev->remote_oob_data);
3050         INIT_LIST_HEAD(&hdev->le_white_list);
3051         INIT_LIST_HEAD(&hdev->le_conn_params);
3052         INIT_LIST_HEAD(&hdev->pend_le_conns);
3053         INIT_LIST_HEAD(&hdev->pend_le_reports);
3054         INIT_LIST_HEAD(&hdev->conn_hash.list);
3055         INIT_LIST_HEAD(&hdev->adv_instances);
3056
3057         INIT_WORK(&hdev->rx_work, hci_rx_work);
3058         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3059         INIT_WORK(&hdev->tx_work, hci_tx_work);
3060         INIT_WORK(&hdev->power_on, hci_power_on);
3061         INIT_WORK(&hdev->error_reset, hci_error_reset);
3062
3063         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3064
3065         skb_queue_head_init(&hdev->rx_q);
3066         skb_queue_head_init(&hdev->cmd_q);
3067         skb_queue_head_init(&hdev->raw_q);
3068
3069         init_waitqueue_head(&hdev->req_wait_q);
3070
3071         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3072
3073         hci_request_setup(hdev);
3074
3075         hci_init_sysfs(hdev);
3076         discovery_init(hdev);
3077
3078         return hdev;
3079 }
3080 EXPORT_SYMBOL(hci_alloc_dev);
3081
3082 /* Free HCI device */
3083 void hci_free_dev(struct hci_dev *hdev)
3084 {
3085         /* will free via device release */
3086         put_device(&hdev->dev);
3087 }
3088 EXPORT_SYMBOL(hci_free_dev);
3089
3090 /* Register HCI device */
3091 int hci_register_dev(struct hci_dev *hdev)
3092 {
3093         int id, error;
3094
3095         if (!hdev->open || !hdev->close || !hdev->send)
3096                 return -EINVAL;
3097
3098         /* Do not allow HCI_AMP devices to register at index 0,
3099          * so the index can be used as the AMP controller ID.
3100          */
3101         switch (hdev->dev_type) {
3102         case HCI_PRIMARY:
3103                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3104                 break;
3105         case HCI_AMP:
3106                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3107                 break;
3108         default:
3109                 return -EINVAL;
3110         }
3111
3112         if (id < 0)
3113                 return id;
3114
3115         sprintf(hdev->name, "hci%d", id);
3116         hdev->id = id;
3117
3118         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3119
3120         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3121         if (!hdev->workqueue) {
3122                 error = -ENOMEM;
3123                 goto err;
3124         }
3125
3126         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3127                                                       hdev->name);
3128         if (!hdev->req_workqueue) {
3129                 destroy_workqueue(hdev->workqueue);
3130                 error = -ENOMEM;
3131                 goto err;
3132         }
3133
3134         if (!IS_ERR_OR_NULL(bt_debugfs))
3135                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3136
3137         dev_set_name(&hdev->dev, "%s", hdev->name);
3138
3139         error = device_add(&hdev->dev);
3140         if (error < 0)
3141                 goto err_wqueue;
3142
3143         hci_leds_init(hdev);
3144
3145         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3146                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3147                                     hdev);
3148         if (hdev->rfkill) {
3149                 if (rfkill_register(hdev->rfkill) < 0) {
3150                         rfkill_destroy(hdev->rfkill);
3151                         hdev->rfkill = NULL;
3152                 }
3153         }
3154
3155         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3156                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3157
3158         hci_dev_set_flag(hdev, HCI_SETUP);
3159         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3160
3161         if (hdev->dev_type == HCI_PRIMARY) {
3162                 /* Assume BR/EDR support until proven otherwise (such as
3163                  * through reading supported features during init.
3164                  */
3165                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3166         }
3167
3168         write_lock(&hci_dev_list_lock);
3169         list_add(&hdev->list, &hci_dev_list);
3170         write_unlock(&hci_dev_list_lock);
3171
3172         /* Devices that are marked for raw-only usage are unconfigured
3173          * and should not be included in normal operation.
3174          */
3175         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3176                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3177
3178         hci_sock_dev_event(hdev, HCI_DEV_REG);
3179         hci_dev_hold(hdev);
3180
3181         queue_work(hdev->req_workqueue, &hdev->power_on);
3182
3183         return id;
3184
3185 err_wqueue:
3186         destroy_workqueue(hdev->workqueue);
3187         destroy_workqueue(hdev->req_workqueue);
3188 err:
3189         ida_simple_remove(&hci_index_ida, hdev->id);
3190
3191         return error;
3192 }
3193 EXPORT_SYMBOL(hci_register_dev);
3194
3195 /* Unregister HCI device */
3196 void hci_unregister_dev(struct hci_dev *hdev)
3197 {
3198         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3199
3200         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3201
3202         write_lock(&hci_dev_list_lock);
3203         list_del(&hdev->list);
3204         write_unlock(&hci_dev_list_lock);
3205
3206         cancel_work_sync(&hdev->power_on);
3207
3208         hci_dev_do_close(hdev);
3209
3210         if (!test_bit(HCI_INIT, &hdev->flags) &&
3211             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3212             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3213                 hci_dev_lock(hdev);
3214                 mgmt_index_removed(hdev);
3215                 hci_dev_unlock(hdev);
3216         }
3217
3218         /* mgmt_index_removed should take care of emptying the
3219          * pending list */
3220         BUG_ON(!list_empty(&hdev->mgmt_pending));
3221
3222         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3223
3224         if (hdev->rfkill) {
3225                 rfkill_unregister(hdev->rfkill);
3226                 rfkill_destroy(hdev->rfkill);
3227         }
3228
3229         device_del(&hdev->dev);
3230         /* Actual cleanup is deferred until hci_cleanup_dev(). */
3231         hci_dev_put(hdev);
3232 }
3233 EXPORT_SYMBOL(hci_unregister_dev);
3234
3235 /* Cleanup HCI device */
3236 void hci_cleanup_dev(struct hci_dev *hdev)
3237 {
3238         debugfs_remove_recursive(hdev->debugfs);
3239         kfree_const(hdev->hw_info);
3240         kfree_const(hdev->fw_info);
3241
3242         destroy_workqueue(hdev->workqueue);
3243         destroy_workqueue(hdev->req_workqueue);
3244
3245         hci_dev_lock(hdev);
3246         hci_bdaddr_list_clear(&hdev->blacklist);
3247         hci_bdaddr_list_clear(&hdev->whitelist);
3248         hci_uuids_clear(hdev);
3249         hci_link_keys_clear(hdev);
3250         hci_smp_ltks_clear(hdev);
3251         hci_smp_irks_clear(hdev);
3252         hci_remote_oob_data_clear(hdev);
3253         hci_adv_instances_clear(hdev);
3254         hci_bdaddr_list_clear(&hdev->le_white_list);
3255         hci_conn_params_clear_all(hdev);
3256         hci_discovery_filter_clear(hdev);
3257         hci_dev_unlock(hdev);
3258
3259         ida_simple_remove(&hci_index_ida, hdev->id);
3260 }
3261
3262 /* Suspend HCI device */
3263 int hci_suspend_dev(struct hci_dev *hdev)
3264 {
3265         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3266         return 0;
3267 }
3268 EXPORT_SYMBOL(hci_suspend_dev);
3269
3270 /* Resume HCI device */
3271 int hci_resume_dev(struct hci_dev *hdev)
3272 {
3273         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3274         return 0;
3275 }
3276 EXPORT_SYMBOL(hci_resume_dev);
3277
3278 /* Reset HCI device */
3279 int hci_reset_dev(struct hci_dev *hdev)
3280 {
3281         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3282         struct sk_buff *skb;
3283
3284         skb = bt_skb_alloc(3, GFP_ATOMIC);
3285         if (!skb)
3286                 return -ENOMEM;
3287
3288         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3289         skb_put_data(skb, hw_err, 3);
3290
3291         /* Send Hardware Error to upper stack */
3292         return hci_recv_frame(hdev, skb);
3293 }
3294 EXPORT_SYMBOL(hci_reset_dev);
3295
3296 /* Receive frame from HCI drivers */
3297 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3298 {
3299         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3300                       && !test_bit(HCI_INIT, &hdev->flags))) {
3301                 kfree_skb(skb);
3302                 return -ENXIO;
3303         }
3304
3305         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3306             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3307             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3308                 kfree_skb(skb);
3309                 return -EINVAL;
3310         }
3311
3312         /* Incoming skb */
3313         bt_cb(skb)->incoming = 1;
3314
3315         /* Time stamp */
3316         __net_timestamp(skb);
3317
3318         skb_queue_tail(&hdev->rx_q, skb);
3319         queue_work(hdev->workqueue, &hdev->rx_work);
3320
3321         return 0;
3322 }
3323 EXPORT_SYMBOL(hci_recv_frame);
3324
3325 /* Receive diagnostic message from HCI drivers */
3326 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3327 {
3328         /* Mark as diagnostic packet */
3329         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3330
3331         /* Time stamp */
3332         __net_timestamp(skb);
3333
3334         skb_queue_tail(&hdev->rx_q, skb);
3335         queue_work(hdev->workqueue, &hdev->rx_work);
3336
3337         return 0;
3338 }
3339 EXPORT_SYMBOL(hci_recv_diag);
3340
3341 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3342 {
3343         va_list vargs;
3344
3345         va_start(vargs, fmt);
3346         kfree_const(hdev->hw_info);
3347         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3348         va_end(vargs);
3349 }
3350 EXPORT_SYMBOL(hci_set_hw_info);
3351
3352 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3353 {
3354         va_list vargs;
3355
3356         va_start(vargs, fmt);
3357         kfree_const(hdev->fw_info);
3358         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3359         va_end(vargs);
3360 }
3361 EXPORT_SYMBOL(hci_set_fw_info);
3362
3363 /* ---- Interface to upper protocols ---- */
3364
3365 int hci_register_cb(struct hci_cb *cb)
3366 {
3367         BT_DBG("%p name %s", cb, cb->name);
3368
3369         mutex_lock(&hci_cb_list_lock);
3370         list_add_tail(&cb->list, &hci_cb_list);
3371         mutex_unlock(&hci_cb_list_lock);
3372
3373         return 0;
3374 }
3375 EXPORT_SYMBOL(hci_register_cb);
3376
3377 int hci_unregister_cb(struct hci_cb *cb)
3378 {
3379         BT_DBG("%p name %s", cb, cb->name);
3380
3381         mutex_lock(&hci_cb_list_lock);
3382         list_del(&cb->list);
3383         mutex_unlock(&hci_cb_list_lock);
3384
3385         return 0;
3386 }
3387 EXPORT_SYMBOL(hci_unregister_cb);
3388
3389 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3390 {
3391         int err;
3392
3393         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3394                skb->len);
3395
3396         /* Time stamp */
3397         __net_timestamp(skb);
3398
3399         /* Send copy to monitor */
3400         hci_send_to_monitor(hdev, skb);
3401
3402         if (atomic_read(&hdev->promisc)) {
3403                 /* Send copy to the sockets */
3404                 hci_send_to_sock(hdev, skb);
3405         }
3406
3407         /* Get rid of skb owner, prior to sending to the driver. */
3408         skb_orphan(skb);
3409
3410         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3411                 kfree_skb(skb);
3412                 return;
3413         }
3414
3415         err = hdev->send(hdev, skb);
3416         if (err < 0) {
3417                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3418                 kfree_skb(skb);
3419         }
3420 }
3421
3422 /* Send HCI command */
3423 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3424                  const void *param)
3425 {
3426         struct sk_buff *skb;
3427
3428         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3429
3430         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3431         if (!skb) {
3432                 BT_ERR("%s no memory for command", hdev->name);
3433                 return -ENOMEM;
3434         }
3435
3436         /* Stand-alone HCI commands must be flagged as
3437          * single-command requests.
3438          */
3439         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3440
3441         skb_queue_tail(&hdev->cmd_q, skb);
3442         queue_work(hdev->workqueue, &hdev->cmd_work);
3443
3444         return 0;
3445 }
3446
3447 /* Get data from the previously sent command */
3448 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3449 {
3450         struct hci_command_hdr *hdr;
3451
3452         if (!hdev->sent_cmd)
3453                 return NULL;
3454
3455         hdr = (void *) hdev->sent_cmd->data;
3456
3457         if (hdr->opcode != cpu_to_le16(opcode))
3458                 return NULL;
3459
3460         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3461
3462         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3463 }
3464
3465 /* Send HCI command and wait for command commplete event */
3466 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3467                              const void *param, u32 timeout)
3468 {
3469         struct sk_buff *skb;
3470
3471         if (!test_bit(HCI_UP, &hdev->flags))
3472                 return ERR_PTR(-ENETDOWN);
3473
3474         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3475
3476         hci_req_sync_lock(hdev);
3477         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3478         hci_req_sync_unlock(hdev);
3479
3480         return skb;
3481 }
3482 EXPORT_SYMBOL(hci_cmd_sync);
3483
3484 /* Send ACL data */
3485 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3486 {
3487         struct hci_acl_hdr *hdr;
3488         int len = skb->len;
3489
3490         skb_push(skb, HCI_ACL_HDR_SIZE);
3491         skb_reset_transport_header(skb);
3492         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3493         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3494         hdr->dlen   = cpu_to_le16(len);
3495 }
3496
3497 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3498                           struct sk_buff *skb, __u16 flags)
3499 {
3500         struct hci_conn *conn = chan->conn;
3501         struct hci_dev *hdev = conn->hdev;
3502         struct sk_buff *list;
3503
3504         skb->len = skb_headlen(skb);
3505         skb->data_len = 0;
3506
3507         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3508
3509         switch (hdev->dev_type) {
3510         case HCI_PRIMARY:
3511                 hci_add_acl_hdr(skb, conn->handle, flags);
3512                 break;
3513         case HCI_AMP:
3514                 hci_add_acl_hdr(skb, chan->handle, flags);
3515                 break;
3516         default:
3517                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3518                 return;
3519         }
3520
3521         list = skb_shinfo(skb)->frag_list;
3522         if (!list) {
3523                 /* Non fragmented */
3524                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3525
3526                 skb_queue_tail(queue, skb);
3527         } else {
3528                 /* Fragmented */
3529                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3530
3531                 skb_shinfo(skb)->frag_list = NULL;
3532
3533                 /* Queue all fragments atomically. We need to use spin_lock_bh
3534                  * here because of 6LoWPAN links, as there this function is
3535                  * called from softirq and using normal spin lock could cause
3536                  * deadlocks.
3537                  */
3538                 spin_lock_bh(&queue->lock);
3539
3540                 __skb_queue_tail(queue, skb);
3541
3542                 flags &= ~ACL_START;
3543                 flags |= ACL_CONT;
3544                 do {
3545                         skb = list; list = list->next;
3546
3547                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3548                         hci_add_acl_hdr(skb, conn->handle, flags);
3549
3550                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3551
3552                         __skb_queue_tail(queue, skb);
3553                 } while (list);
3554
3555                 spin_unlock_bh(&queue->lock);
3556         }
3557 }
3558
3559 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3560 {
3561         struct hci_dev *hdev = chan->conn->hdev;
3562
3563         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3564
3565         hci_queue_acl(chan, &chan->data_q, skb, flags);
3566
3567         queue_work(hdev->workqueue, &hdev->tx_work);
3568 }
3569
3570 /* Send SCO data */
3571 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3572 {
3573         struct hci_dev *hdev = conn->hdev;
3574         struct hci_sco_hdr hdr;
3575
3576         BT_DBG("%s len %d", hdev->name, skb->len);
3577
3578         hdr.handle = cpu_to_le16(conn->handle);
3579         hdr.dlen   = skb->len;
3580
3581         skb_push(skb, HCI_SCO_HDR_SIZE);
3582         skb_reset_transport_header(skb);
3583         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3584
3585         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3586
3587         skb_queue_tail(&conn->data_q, skb);
3588         queue_work(hdev->workqueue, &hdev->tx_work);
3589 }
3590
3591 /* ---- HCI TX task (outgoing data) ---- */
3592
3593 /* HCI Connection scheduler */
3594 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3595                                      int *quote)
3596 {
3597         struct hci_conn_hash *h = &hdev->conn_hash;
3598         struct hci_conn *conn = NULL, *c;
3599         unsigned int num = 0, min = ~0;
3600
3601         /* We don't have to lock device here. Connections are always
3602          * added and removed with TX task disabled. */
3603
3604         rcu_read_lock();
3605
3606         list_for_each_entry_rcu(c, &h->list, list) {
3607                 if (c->type != type || skb_queue_empty(&c->data_q))
3608                         continue;
3609
3610                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3611                         continue;
3612
3613                 num++;
3614
3615                 if (c->sent < min) {
3616                         min  = c->sent;
3617                         conn = c;
3618                 }
3619
3620                 if (hci_conn_num(hdev, type) == num)
3621                         break;
3622         }
3623
3624         rcu_read_unlock();
3625
3626         if (conn) {
3627                 int cnt, q;
3628
3629                 switch (conn->type) {
3630                 case ACL_LINK:
3631                         cnt = hdev->acl_cnt;
3632                         break;
3633                 case SCO_LINK:
3634                 case ESCO_LINK:
3635                         cnt = hdev->sco_cnt;
3636                         break;
3637                 case LE_LINK:
3638                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3639                         break;
3640                 default:
3641                         cnt = 0;
3642                         BT_ERR("Unknown link type");
3643                 }
3644
3645                 q = cnt / num;
3646                 *quote = q ? q : 1;
3647         } else
3648                 *quote = 0;
3649
3650         BT_DBG("conn %p quote %d", conn, *quote);
3651         return conn;
3652 }
3653
3654 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3655 {
3656         struct hci_conn_hash *h = &hdev->conn_hash;
3657         struct hci_conn *c;
3658
3659         BT_ERR("%s link tx timeout", hdev->name);
3660
3661         rcu_read_lock();
3662
3663         /* Kill stalled connections */
3664         list_for_each_entry_rcu(c, &h->list, list) {
3665                 if (c->type == type && c->sent) {
3666                         BT_ERR("%s killing stalled connection %pMR",
3667                                hdev->name, &c->dst);
3668                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3669                 }
3670         }
3671
3672         rcu_read_unlock();
3673 }
3674
3675 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3676                                       int *quote)
3677 {
3678         struct hci_conn_hash *h = &hdev->conn_hash;
3679         struct hci_chan *chan = NULL;
3680         unsigned int num = 0, min = ~0, cur_prio = 0;
3681         struct hci_conn *conn;
3682         int cnt, q, conn_num = 0;
3683
3684         BT_DBG("%s", hdev->name);
3685
3686         rcu_read_lock();
3687
3688         list_for_each_entry_rcu(conn, &h->list, list) {
3689                 struct hci_chan *tmp;
3690
3691                 if (conn->type != type)
3692                         continue;
3693
3694                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3695                         continue;
3696
3697                 conn_num++;
3698
3699                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3700                         struct sk_buff *skb;
3701
3702                         if (skb_queue_empty(&tmp->data_q))
3703                                 continue;
3704
3705                         skb = skb_peek(&tmp->data_q);
3706                         if (skb->priority < cur_prio)
3707                                 continue;
3708
3709                         if (skb->priority > cur_prio) {
3710                                 num = 0;
3711                                 min = ~0;
3712                                 cur_prio = skb->priority;
3713                         }
3714
3715                         num++;
3716
3717                         if (conn->sent < min) {
3718                                 min  = conn->sent;
3719                                 chan = tmp;
3720                         }
3721                 }
3722
3723                 if (hci_conn_num(hdev, type) == conn_num)
3724                         break;
3725         }
3726
3727         rcu_read_unlock();
3728
3729         if (!chan)
3730                 return NULL;
3731
3732         switch (chan->conn->type) {
3733         case ACL_LINK:
3734                 cnt = hdev->acl_cnt;
3735                 break;
3736         case AMP_LINK:
3737                 cnt = hdev->block_cnt;
3738                 break;
3739         case SCO_LINK:
3740         case ESCO_LINK:
3741                 cnt = hdev->sco_cnt;
3742                 break;
3743         case LE_LINK:
3744                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3745                 break;
3746         default:
3747                 cnt = 0;
3748                 BT_ERR("Unknown link type");
3749         }
3750
3751         q = cnt / num;
3752         *quote = q ? q : 1;
3753         BT_DBG("chan %p quote %d", chan, *quote);
3754         return chan;
3755 }
3756
3757 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3758 {
3759         struct hci_conn_hash *h = &hdev->conn_hash;
3760         struct hci_conn *conn;
3761         int num = 0;
3762
3763         BT_DBG("%s", hdev->name);
3764
3765         rcu_read_lock();
3766
3767         list_for_each_entry_rcu(conn, &h->list, list) {
3768                 struct hci_chan *chan;
3769
3770                 if (conn->type != type)
3771                         continue;
3772
3773                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3774                         continue;
3775
3776                 num++;
3777
3778                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3779                         struct sk_buff *skb;
3780
3781                         if (chan->sent) {
3782                                 chan->sent = 0;
3783                                 continue;
3784                         }
3785
3786                         if (skb_queue_empty(&chan->data_q))
3787                                 continue;
3788
3789                         skb = skb_peek(&chan->data_q);
3790                         if (skb->priority >= HCI_PRIO_MAX - 1)
3791                                 continue;
3792
3793                         skb->priority = HCI_PRIO_MAX - 1;
3794
3795                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3796                                skb->priority);
3797                 }
3798
3799                 if (hci_conn_num(hdev, type) == num)
3800                         break;
3801         }
3802
3803         rcu_read_unlock();
3804
3805 }
3806
3807 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3808 {
3809         /* Calculate count of blocks used by this packet */
3810         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3811 }
3812
3813 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3814 {
3815         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3816                 /* ACL tx timeout must be longer than maximum
3817                  * link supervision timeout (40.9 seconds) */
3818                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3819                                        HCI_ACL_TX_TIMEOUT))
3820                         hci_link_tx_to(hdev, ACL_LINK);
3821         }
3822 }
3823
3824 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3825 {
3826         unsigned int cnt = hdev->acl_cnt;
3827         struct hci_chan *chan;
3828         struct sk_buff *skb;
3829         int quote;
3830
3831         __check_timeout(hdev, cnt);
3832
3833         while (hdev->acl_cnt &&
3834                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3835                 u32 priority = (skb_peek(&chan->data_q))->priority;
3836                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3837                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3838                                skb->len, skb->priority);
3839
3840                         /* Stop if priority has changed */
3841                         if (skb->priority < priority)
3842                                 break;
3843
3844                         skb = skb_dequeue(&chan->data_q);
3845
3846                         hci_conn_enter_active_mode(chan->conn,
3847                                                    bt_cb(skb)->force_active);
3848
3849                         hci_send_frame(hdev, skb);
3850                         hdev->acl_last_tx = jiffies;
3851
3852                         hdev->acl_cnt--;
3853                         chan->sent++;
3854                         chan->conn->sent++;
3855                 }
3856         }
3857
3858         if (cnt != hdev->acl_cnt)
3859                 hci_prio_recalculate(hdev, ACL_LINK);
3860 }
3861
3862 static void hci_sched_acl_blk(struct hci_dev *hdev)
3863 {
3864         unsigned int cnt = hdev->block_cnt;
3865         struct hci_chan *chan;
3866         struct sk_buff *skb;
3867         int quote;
3868         u8 type;
3869
3870         __check_timeout(hdev, cnt);
3871
3872         BT_DBG("%s", hdev->name);
3873
3874         if (hdev->dev_type == HCI_AMP)
3875                 type = AMP_LINK;
3876         else
3877                 type = ACL_LINK;
3878
3879         while (hdev->block_cnt > 0 &&
3880                (chan = hci_chan_sent(hdev, type, &quote))) {
3881                 u32 priority = (skb_peek(&chan->data_q))->priority;
3882                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3883                         int blocks;
3884
3885                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3886                                skb->len, skb->priority);
3887
3888                         /* Stop if priority has changed */
3889                         if (skb->priority < priority)
3890                                 break;
3891
3892                         skb = skb_dequeue(&chan->data_q);
3893
3894                         blocks = __get_blocks(hdev, skb);
3895                         if (blocks > hdev->block_cnt)
3896                                 return;
3897
3898                         hci_conn_enter_active_mode(chan->conn,
3899                                                    bt_cb(skb)->force_active);
3900
3901                         hci_send_frame(hdev, skb);
3902                         hdev->acl_last_tx = jiffies;
3903
3904                         hdev->block_cnt -= blocks;
3905                         quote -= blocks;
3906
3907                         chan->sent += blocks;
3908                         chan->conn->sent += blocks;
3909                 }
3910         }
3911
3912         if (cnt != hdev->block_cnt)
3913                 hci_prio_recalculate(hdev, type);
3914 }
3915
3916 static void hci_sched_acl(struct hci_dev *hdev)
3917 {
3918         BT_DBG("%s", hdev->name);
3919
3920         /* No ACL link over BR/EDR controller */
3921         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3922                 return;
3923
3924         /* No AMP link over AMP controller */
3925         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3926                 return;
3927
3928         switch (hdev->flow_ctl_mode) {
3929         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3930                 hci_sched_acl_pkt(hdev);
3931                 break;
3932
3933         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3934                 hci_sched_acl_blk(hdev);
3935                 break;
3936         }
3937 }
3938
3939 /* Schedule SCO */
3940 static void hci_sched_sco(struct hci_dev *hdev)
3941 {
3942         struct hci_conn *conn;
3943         struct sk_buff *skb;
3944         int quote;
3945
3946         BT_DBG("%s", hdev->name);
3947
3948         if (!hci_conn_num(hdev, SCO_LINK))
3949                 return;
3950
3951         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3952                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3953                         BT_DBG("skb %p len %d", skb, skb->len);
3954                         hci_send_frame(hdev, skb);
3955
3956                         conn->sent++;
3957                         if (conn->sent == ~0)
3958                                 conn->sent = 0;
3959                 }
3960         }
3961 }
3962
3963 static void hci_sched_esco(struct hci_dev *hdev)
3964 {
3965         struct hci_conn *conn;
3966         struct sk_buff *skb;
3967         int quote;
3968
3969         BT_DBG("%s", hdev->name);
3970
3971         if (!hci_conn_num(hdev, ESCO_LINK))
3972                 return;
3973
3974         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3975                                                      &quote))) {
3976                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3977                         BT_DBG("skb %p len %d", skb, skb->len);
3978                         hci_send_frame(hdev, skb);
3979
3980                         conn->sent++;
3981                         if (conn->sent == ~0)
3982                                 conn->sent = 0;
3983                 }
3984         }
3985 }
3986
3987 static void hci_sched_le(struct hci_dev *hdev)
3988 {
3989         struct hci_chan *chan;
3990         struct sk_buff *skb;
3991         int quote, cnt, tmp;
3992
3993         BT_DBG("%s", hdev->name);
3994
3995         if (!hci_conn_num(hdev, LE_LINK))
3996                 return;
3997
3998         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3999                 /* LE tx timeout must be longer than maximum
4000                  * link supervision timeout (40.9 seconds) */
4001                 if (!hdev->le_cnt && hdev->le_pkts &&
4002                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4003                         hci_link_tx_to(hdev, LE_LINK);
4004         }
4005
4006         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4007         tmp = cnt;
4008         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4009                 u32 priority = (skb_peek(&chan->data_q))->priority;
4010                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4011                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4012                                skb->len, skb->priority);
4013
4014                         /* Stop if priority has changed */
4015                         if (skb->priority < priority)
4016                                 break;
4017
4018                         skb = skb_dequeue(&chan->data_q);
4019
4020                         hci_send_frame(hdev, skb);
4021                         hdev->le_last_tx = jiffies;
4022
4023                         cnt--;
4024                         chan->sent++;
4025                         chan->conn->sent++;
4026                 }
4027         }
4028
4029         if (hdev->le_pkts)
4030                 hdev->le_cnt = cnt;
4031         else
4032                 hdev->acl_cnt = cnt;
4033
4034         if (cnt != tmp)
4035                 hci_prio_recalculate(hdev, LE_LINK);
4036 }
4037
4038 static void hci_tx_work(struct work_struct *work)
4039 {
4040         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4041         struct sk_buff *skb;
4042
4043         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4044                hdev->sco_cnt, hdev->le_cnt);
4045
4046         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4047                 /* Schedule queues and send stuff to HCI driver */
4048                 hci_sched_acl(hdev);
4049                 hci_sched_sco(hdev);
4050                 hci_sched_esco(hdev);
4051                 hci_sched_le(hdev);
4052         }
4053
4054         /* Send next queued raw (unknown type) packet */
4055         while ((skb = skb_dequeue(&hdev->raw_q)))
4056                 hci_send_frame(hdev, skb);
4057 }
4058
4059 /* ----- HCI RX task (incoming data processing) ----- */
4060
4061 /* ACL data packet */
4062 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4063 {
4064         struct hci_acl_hdr *hdr = (void *) skb->data;
4065         struct hci_conn *conn;
4066         __u16 handle, flags;
4067
4068         skb_pull(skb, HCI_ACL_HDR_SIZE);
4069
4070         handle = __le16_to_cpu(hdr->handle);
4071         flags  = hci_flags(handle);
4072         handle = hci_handle(handle);
4073
4074         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4075                handle, flags);
4076
4077         hdev->stat.acl_rx++;
4078
4079         hci_dev_lock(hdev);
4080         conn = hci_conn_hash_lookup_handle(hdev, handle);
4081         hci_dev_unlock(hdev);
4082
4083         if (conn) {
4084                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4085
4086                 /* Send to upper protocol */
4087                 l2cap_recv_acldata(conn, skb, flags);
4088                 return;
4089         } else {
4090                 BT_ERR("%s ACL packet for unknown connection handle %d",
4091                        hdev->name, handle);
4092         }
4093
4094         kfree_skb(skb);
4095 }
4096
4097 /* SCO data packet */
4098 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4099 {
4100         struct hci_sco_hdr *hdr = (void *) skb->data;
4101         struct hci_conn *conn;
4102         __u16 handle;
4103
4104         skb_pull(skb, HCI_SCO_HDR_SIZE);
4105
4106         handle = __le16_to_cpu(hdr->handle);
4107
4108         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4109
4110         hdev->stat.sco_rx++;
4111
4112         hci_dev_lock(hdev);
4113         conn = hci_conn_hash_lookup_handle(hdev, handle);
4114         hci_dev_unlock(hdev);
4115
4116         if (conn) {
4117                 /* Send to upper protocol */
4118                 sco_recv_scodata(conn, skb);
4119                 return;
4120         } else {
4121                 BT_ERR("%s SCO packet for unknown connection handle %d",
4122                        hdev->name, handle);
4123         }
4124
4125         kfree_skb(skb);
4126 }
4127
4128 static bool hci_req_is_complete(struct hci_dev *hdev)
4129 {
4130         struct sk_buff *skb;
4131
4132         skb = skb_peek(&hdev->cmd_q);
4133         if (!skb)
4134                 return true;
4135
4136         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4137 }
4138
4139 static void hci_resend_last(struct hci_dev *hdev)
4140 {
4141         struct hci_command_hdr *sent;
4142         struct sk_buff *skb;
4143         u16 opcode;
4144
4145         if (!hdev->sent_cmd)
4146                 return;
4147
4148         sent = (void *) hdev->sent_cmd->data;
4149         opcode = __le16_to_cpu(sent->opcode);
4150         if (opcode == HCI_OP_RESET)
4151                 return;
4152
4153         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4154         if (!skb)
4155                 return;
4156
4157         skb_queue_head(&hdev->cmd_q, skb);
4158         queue_work(hdev->workqueue, &hdev->cmd_work);
4159 }
4160
4161 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4162                           hci_req_complete_t *req_complete,
4163                           hci_req_complete_skb_t *req_complete_skb)
4164 {
4165         struct sk_buff *skb;
4166         unsigned long flags;
4167
4168         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4169
4170         /* If the completed command doesn't match the last one that was
4171          * sent we need to do special handling of it.
4172          */
4173         if (!hci_sent_cmd_data(hdev, opcode)) {
4174                 /* Some CSR based controllers generate a spontaneous
4175                  * reset complete event during init and any pending
4176                  * command will never be completed. In such a case we
4177                  * need to resend whatever was the last sent
4178                  * command.
4179                  */
4180                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4181                         hci_resend_last(hdev);
4182
4183                 return;
4184         }
4185
4186         /* If the command succeeded and there's still more commands in
4187          * this request the request is not yet complete.
4188          */
4189         if (!status && !hci_req_is_complete(hdev))
4190                 return;
4191
4192         /* If this was the last command in a request the complete
4193          * callback would be found in hdev->sent_cmd instead of the
4194          * command queue (hdev->cmd_q).
4195          */
4196         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4197                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4198                 return;
4199         }
4200
4201         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4202                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4203                 return;
4204         }
4205
4206         /* Remove all pending commands belonging to this request */
4207         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4208         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4209                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4210                         __skb_queue_head(&hdev->cmd_q, skb);
4211                         break;
4212                 }
4213
4214                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4215                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4216                 else
4217                         *req_complete = bt_cb(skb)->hci.req_complete;
4218                 kfree_skb(skb);
4219         }
4220         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4221 }
4222
4223 static void hci_rx_work(struct work_struct *work)
4224 {
4225         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4226         struct sk_buff *skb;
4227
4228         BT_DBG("%s", hdev->name);
4229
4230         while ((skb = skb_dequeue(&hdev->rx_q))) {
4231                 /* Send copy to monitor */
4232                 hci_send_to_monitor(hdev, skb);
4233
4234                 if (atomic_read(&hdev->promisc)) {
4235                         /* Send copy to the sockets */
4236                         hci_send_to_sock(hdev, skb);
4237                 }
4238
4239                 /* If the device has been opened in HCI_USER_CHANNEL,
4240                  * the userspace has exclusive access to device.
4241                  * When device is HCI_INIT, we still need to process
4242                  * the data packets to the driver in order
4243                  * to complete its setup().
4244                  */
4245                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4246                     !test_bit(HCI_INIT, &hdev->flags)) {
4247                         kfree_skb(skb);
4248                         continue;
4249                 }
4250
4251                 if (test_bit(HCI_INIT, &hdev->flags)) {
4252                         /* Don't process data packets in this states. */
4253                         switch (hci_skb_pkt_type(skb)) {
4254                         case HCI_ACLDATA_PKT:
4255                         case HCI_SCODATA_PKT:
4256                                 kfree_skb(skb);
4257                                 continue;
4258                         }
4259                 }
4260
4261                 /* Process frame */
4262                 switch (hci_skb_pkt_type(skb)) {
4263                 case HCI_EVENT_PKT:
4264                         BT_DBG("%s Event packet", hdev->name);
4265                         hci_event_packet(hdev, skb);
4266                         break;
4267
4268                 case HCI_ACLDATA_PKT:
4269                         BT_DBG("%s ACL data packet", hdev->name);
4270                         hci_acldata_packet(hdev, skb);
4271                         break;
4272
4273                 case HCI_SCODATA_PKT:
4274                         BT_DBG("%s SCO data packet", hdev->name);
4275                         hci_scodata_packet(hdev, skb);
4276                         break;
4277
4278                 default:
4279                         kfree_skb(skb);
4280                         break;
4281                 }
4282         }
4283 }
4284
4285 static void hci_cmd_work(struct work_struct *work)
4286 {
4287         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4288         struct sk_buff *skb;
4289
4290         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4291                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4292
4293         /* Send queued commands */
4294         if (atomic_read(&hdev->cmd_cnt)) {
4295                 skb = skb_dequeue(&hdev->cmd_q);
4296                 if (!skb)
4297                         return;
4298
4299                 kfree_skb(hdev->sent_cmd);
4300
4301                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4302                 if (hdev->sent_cmd) {
4303                         atomic_dec(&hdev->cmd_cnt);
4304                         hci_send_frame(hdev, skb);
4305                         if (test_bit(HCI_RESET, &hdev->flags))
4306                                 cancel_delayed_work(&hdev->cmd_timer);
4307                         else
4308                                 schedule_delayed_work(&hdev->cmd_timer,
4309                                                       HCI_CMD_TIMEOUT);
4310                 } else {
4311                         skb_queue_head(&hdev->cmd_q, skb);
4312                         queue_work(hdev->workqueue, &hdev->cmd_work);
4313                 }
4314         }
4315 }