GNU Linux-libre 4.14.313-gnu1
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         char buf[32];
80         size_t buf_size = min(count, (sizeof(buf)-1));
81         bool enable;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         char buf[32];
139         size_t buf_size = min(count, (sizeof(buf)-1));
140         bool enable;
141         int err;
142
143         if (copy_from_user(buf, user_buf, buf_size))
144                 return -EFAULT;
145
146         buf[buf_size] = '\0';
147         if (strtobool(buf, &enable))
148                 return -EINVAL;
149
150         /* When the diagnostic flags are not persistent and the transport
151          * is not active or in user channel operation, then there is no need
152          * for the vendor callback. Instead just store the desired value and
153          * the setting will be programmed when the controller gets powered on.
154          */
155         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
156             (!test_bit(HCI_RUNNING, &hdev->flags) ||
157              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
158                 goto done;
159
160         hci_req_sync_lock(hdev);
161         err = hdev->set_diag(hdev, enable);
162         hci_req_sync_unlock(hdev);
163
164         if (err < 0)
165                 return err;
166
167 done:
168         if (enable)
169                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170         else
171                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173         return count;
174 }
175
176 static const struct file_operations vendor_diag_fops = {
177         .open           = simple_open,
178         .read           = vendor_diag_read,
179         .write          = vendor_diag_write,
180         .llseek         = default_llseek,
181 };
182
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186                             &dut_mode_fops);
187
188         if (hdev->set_diag)
189                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190                                     &vendor_diag_fops);
191 }
192
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195         BT_DBG("%s %ld", req->hdev->name, opt);
196
197         /* Reset device */
198         set_bit(HCI_RESET, &req->hdev->flags);
199         hci_req_add(req, HCI_OP_RESET, 0, NULL);
200         return 0;
201 }
202
203 static void bredr_init(struct hci_request *req)
204 {
205         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207         /* Read Local Supported Features */
208         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Local Version */
211         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213         /* Read BD Address */
214         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216
217 static void amp_init1(struct hci_request *req)
218 {
219         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221         /* Read Local Version */
222         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Local Supported Commands */
225         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227         /* Read Local AMP Info */
228         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230         /* Read Data Blk size */
231         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233         /* Read Flow Control Mode */
234         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236         /* Read Location Data */
237         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239
240 static int amp_init2(struct hci_request *req)
241 {
242         /* Read Local Supported Features. Not all AMP controllers
243          * support this so it's placed conditionally in the second
244          * stage init.
245          */
246         if (req->hdev->commands[14] & 0x20)
247                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249         return 0;
250 }
251
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254         struct hci_dev *hdev = req->hdev;
255
256         BT_DBG("%s %ld", hdev->name, opt);
257
258         /* Reset */
259         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260                 hci_reset_req(req, 0);
261
262         switch (hdev->dev_type) {
263         case HCI_PRIMARY:
264                 bredr_init(req);
265                 break;
266         case HCI_AMP:
267                 amp_init1(req);
268                 break;
269         default:
270                 BT_ERR("Unknown device type %d", hdev->dev_type);
271                 break;
272         }
273
274         return 0;
275 }
276
277 static void bredr_setup(struct hci_request *req)
278 {
279         __le16 param;
280         __u8 flt_type;
281
282         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
284
285         /* Read Class of Device */
286         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
287
288         /* Read Local Name */
289         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
290
291         /* Read Voice Setting */
292         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
293
294         /* Read Number of Supported IAC */
295         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
297         /* Read Current IAC LAP */
298         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
300         /* Clear Event Filters */
301         flt_type = HCI_FLT_CLEAR_ALL;
302         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
303
304         /* Connection accept timeout ~20 secs */
305         param = cpu_to_le16(0x7d00);
306         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
307 }
308
309 static void le_setup(struct hci_request *req)
310 {
311         struct hci_dev *hdev = req->hdev;
312
313         /* Read LE Buffer Size */
314         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
315
316         /* Read LE Local Supported Features */
317         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
318
319         /* Read LE Supported States */
320         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
322         /* LE-only controllers have LE implicitly enabled */
323         if (!lmp_bredr_capable(hdev))
324                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
325 }
326
327 static void hci_setup_event_mask(struct hci_request *req)
328 {
329         struct hci_dev *hdev = req->hdev;
330
331         /* The second byte is 0xff instead of 0x9f (two reserved bits
332          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333          * command otherwise.
334          */
335         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338          * any event mask for pre 1.2 devices.
339          */
340         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341                 return;
342
343         if (lmp_bredr_capable(hdev)) {
344                 events[4] |= 0x01; /* Flow Specification Complete */
345         } else {
346                 /* Use a different default for LE-only devices */
347                 memset(events, 0, sizeof(events));
348                 events[1] |= 0x20; /* Command Complete */
349                 events[1] |= 0x40; /* Command Status */
350                 events[1] |= 0x80; /* Hardware Error */
351
352                 /* If the controller supports the Disconnect command, enable
353                  * the corresponding event. In addition enable packet flow
354                  * control related events.
355                  */
356                 if (hdev->commands[0] & 0x20) {
357                         events[0] |= 0x10; /* Disconnection Complete */
358                         events[2] |= 0x04; /* Number of Completed Packets */
359                         events[3] |= 0x02; /* Data Buffer Overflow */
360                 }
361
362                 /* If the controller supports the Read Remote Version
363                  * Information command, enable the corresponding event.
364                  */
365                 if (hdev->commands[2] & 0x80)
366                         events[1] |= 0x08; /* Read Remote Version Information
367                                             * Complete
368                                             */
369
370                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371                         events[0] |= 0x80; /* Encryption Change */
372                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
373                 }
374         }
375
376         if (lmp_inq_rssi_capable(hdev) ||
377             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378                 events[4] |= 0x02; /* Inquiry Result with RSSI */
379
380         if (lmp_ext_feat_capable(hdev))
381                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
382
383         if (lmp_esco_capable(hdev)) {
384                 events[5] |= 0x08; /* Synchronous Connection Complete */
385                 events[5] |= 0x10; /* Synchronous Connection Changed */
386         }
387
388         if (lmp_sniffsubr_capable(hdev))
389                 events[5] |= 0x20; /* Sniff Subrating */
390
391         if (lmp_pause_enc_capable(hdev))
392                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
393
394         if (lmp_ext_inq_capable(hdev))
395                 events[5] |= 0x40; /* Extended Inquiry Result */
396
397         if (lmp_no_flush_capable(hdev))
398                 events[7] |= 0x01; /* Enhanced Flush Complete */
399
400         if (lmp_lsto_capable(hdev))
401                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
402
403         if (lmp_ssp_capable(hdev)) {
404                 events[6] |= 0x01;      /* IO Capability Request */
405                 events[6] |= 0x02;      /* IO Capability Response */
406                 events[6] |= 0x04;      /* User Confirmation Request */
407                 events[6] |= 0x08;      /* User Passkey Request */
408                 events[6] |= 0x10;      /* Remote OOB Data Request */
409                 events[6] |= 0x20;      /* Simple Pairing Complete */
410                 events[7] |= 0x04;      /* User Passkey Notification */
411                 events[7] |= 0x08;      /* Keypress Notification */
412                 events[7] |= 0x10;      /* Remote Host Supported
413                                          * Features Notification
414                                          */
415         }
416
417         if (lmp_le_capable(hdev))
418                 events[7] |= 0x20;      /* LE Meta-Event */
419
420         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
421 }
422
423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
424 {
425         struct hci_dev *hdev = req->hdev;
426
427         if (hdev->dev_type == HCI_AMP)
428                 return amp_init2(req);
429
430         if (lmp_bredr_capable(hdev))
431                 bredr_setup(req);
432         else
433                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
434
435         if (lmp_le_capable(hdev))
436                 le_setup(req);
437
438         /* All Bluetooth 1.2 and later controllers should support the
439          * HCI command for reading the local supported commands.
440          *
441          * Unfortunately some controllers indicate Bluetooth 1.2 support,
442          * but do not have support for this command. If that is the case,
443          * the driver can quirk the behavior and skip reading the local
444          * supported commands.
445          */
446         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
449
450         if (lmp_ssp_capable(hdev)) {
451                 /* When SSP is available, then the host features page
452                  * should also be available as well. However some
453                  * controllers list the max_page as 0 as long as SSP
454                  * has not been enabled. To achieve proper debugging
455                  * output, force the minimum max_page to 1 at least.
456                  */
457                 hdev->max_page = 0x01;
458
459                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
460                         u8 mode = 0x01;
461
462                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463                                     sizeof(mode), &mode);
464                 } else {
465                         struct hci_cp_write_eir cp;
466
467                         memset(hdev->eir, 0, sizeof(hdev->eir));
468                         memset(&cp, 0, sizeof(cp));
469
470                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
471                 }
472         }
473
474         if (lmp_inq_rssi_capable(hdev) ||
475             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
476                 u8 mode;
477
478                 /* If Extended Inquiry Result events are supported, then
479                  * they are clearly preferred over Inquiry Result with RSSI
480                  * events.
481                  */
482                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485         }
486
487         if (lmp_inq_tx_pwr_capable(hdev))
488                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
489
490         if (lmp_ext_feat_capable(hdev)) {
491                 struct hci_cp_read_local_ext_features cp;
492
493                 cp.page = 0x01;
494                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495                             sizeof(cp), &cp);
496         }
497
498         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
499                 u8 enable = 1;
500                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501                             &enable);
502         }
503
504         return 0;
505 }
506
507 static void hci_setup_link_policy(struct hci_request *req)
508 {
509         struct hci_dev *hdev = req->hdev;
510         struct hci_cp_write_def_link_policy cp;
511         u16 link_policy = 0;
512
513         if (lmp_rswitch_capable(hdev))
514                 link_policy |= HCI_LP_RSWITCH;
515         if (lmp_hold_capable(hdev))
516                 link_policy |= HCI_LP_HOLD;
517         if (lmp_sniff_capable(hdev))
518                 link_policy |= HCI_LP_SNIFF;
519         if (lmp_park_capable(hdev))
520                 link_policy |= HCI_LP_PARK;
521
522         cp.policy = cpu_to_le16(link_policy);
523         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
524 }
525
526 static void hci_set_le_support(struct hci_request *req)
527 {
528         struct hci_dev *hdev = req->hdev;
529         struct hci_cp_write_le_host_supported cp;
530
531         /* LE-only devices do not support explicit enablement */
532         if (!lmp_bredr_capable(hdev))
533                 return;
534
535         memset(&cp, 0, sizeof(cp));
536
537         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
538                 cp.le = 0x01;
539                 cp.simul = 0x00;
540         }
541
542         if (cp.le != lmp_host_le_capable(hdev))
543                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544                             &cp);
545 }
546
547 static void hci_set_event_mask_page_2(struct hci_request *req)
548 {
549         struct hci_dev *hdev = req->hdev;
550         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551         bool changed = false;
552
553         /* If Connectionless Slave Broadcast master role is supported
554          * enable all necessary events for it.
555          */
556         if (lmp_csb_master_capable(hdev)) {
557                 events[1] |= 0x40;      /* Triggered Clock Capture */
558                 events[1] |= 0x80;      /* Synchronization Train Complete */
559                 events[2] |= 0x10;      /* Slave Page Response Timeout */
560                 events[2] |= 0x20;      /* CSB Channel Map Change */
561                 changed = true;
562         }
563
564         /* If Connectionless Slave Broadcast slave role is supported
565          * enable all necessary events for it.
566          */
567         if (lmp_csb_slave_capable(hdev)) {
568                 events[2] |= 0x01;      /* Synchronization Train Received */
569                 events[2] |= 0x02;      /* CSB Receive */
570                 events[2] |= 0x04;      /* CSB Timeout */
571                 events[2] |= 0x08;      /* Truncated Page Complete */
572                 changed = true;
573         }
574
575         /* Enable Authenticated Payload Timeout Expired event if supported */
576         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
577                 events[2] |= 0x80;
578                 changed = true;
579         }
580
581         /* Some Broadcom based controllers indicate support for Set Event
582          * Mask Page 2 command, but then actually do not support it. Since
583          * the default value is all bits set to zero, the command is only
584          * required if the event mask has to be changed. In case no change
585          * to the event mask is needed, skip this command.
586          */
587         if (changed)
588                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
589                             sizeof(events), events);
590 }
591
592 static int hci_init3_req(struct hci_request *req, unsigned long opt)
593 {
594         struct hci_dev *hdev = req->hdev;
595         u8 p;
596
597         hci_setup_event_mask(req);
598
599         if (hdev->commands[6] & 0x20 &&
600             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
601                 struct hci_cp_read_stored_link_key cp;
602
603                 bacpy(&cp.bdaddr, BDADDR_ANY);
604                 cp.read_all = 0x01;
605                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
606         }
607
608         if (hdev->commands[5] & 0x10)
609                 hci_setup_link_policy(req);
610
611         if (hdev->commands[8] & 0x01)
612                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
613
614         /* Some older Broadcom based Bluetooth 1.2 controllers do not
615          * support the Read Page Scan Type command. Check support for
616          * this command in the bit mask of supported commands.
617          */
618         if (hdev->commands[13] & 0x01)
619                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
620
621         if (lmp_le_capable(hdev)) {
622                 u8 events[8];
623
624                 memset(events, 0, sizeof(events));
625
626                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
627                         events[0] |= 0x10;      /* LE Long Term Key Request */
628
629                 /* If controller supports the Connection Parameters Request
630                  * Link Layer Procedure, enable the corresponding event.
631                  */
632                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
633                         events[0] |= 0x20;      /* LE Remote Connection
634                                                  * Parameter Request
635                                                  */
636
637                 /* If the controller supports the Data Length Extension
638                  * feature, enable the corresponding event.
639                  */
640                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
641                         events[0] |= 0x40;      /* LE Data Length Change */
642
643                 /* If the controller supports Extended Scanner Filter
644                  * Policies, enable the correspondig event.
645                  */
646                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
647                         events[1] |= 0x04;      /* LE Direct Advertising
648                                                  * Report
649                                                  */
650
651                 /* If the controller supports Channel Selection Algorithm #2
652                  * feature, enable the corresponding event.
653                  */
654                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
655                         events[2] |= 0x08;      /* LE Channel Selection
656                                                  * Algorithm
657                                                  */
658
659                 /* If the controller supports the LE Set Scan Enable command,
660                  * enable the corresponding advertising report event.
661                  */
662                 if (hdev->commands[26] & 0x08)
663                         events[0] |= 0x02;      /* LE Advertising Report */
664
665                 /* If the controller supports the LE Create Connection
666                  * command, enable the corresponding event.
667                  */
668                 if (hdev->commands[26] & 0x10)
669                         events[0] |= 0x01;      /* LE Connection Complete */
670
671                 /* If the controller supports the LE Connection Update
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[27] & 0x04)
675                         events[0] |= 0x04;      /* LE Connection Update
676                                                  * Complete
677                                                  */
678
679                 /* If the controller supports the LE Read Remote Used Features
680                  * command, enable the corresponding event.
681                  */
682                 if (hdev->commands[27] & 0x20)
683                         events[0] |= 0x08;      /* LE Read Remote Used
684                                                  * Features Complete
685                                                  */
686
687                 /* If the controller supports the LE Read Local P-256
688                  * Public Key command, enable the corresponding event.
689                  */
690                 if (hdev->commands[34] & 0x02)
691                         events[0] |= 0x80;      /* LE Read Local P-256
692                                                  * Public Key Complete
693                                                  */
694
695                 /* If the controller supports the LE Generate DHKey
696                  * command, enable the corresponding event.
697                  */
698                 if (hdev->commands[34] & 0x04)
699                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
700
701                 /* If the controller supports the LE Set Default PHY or
702                  * LE Set PHY commands, enable the corresponding event.
703                  */
704                 if (hdev->commands[35] & (0x20 | 0x40))
705                         events[1] |= 0x08;        /* LE PHY Update Complete */
706
707                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
708                             events);
709
710                 if (hdev->commands[25] & 0x40) {
711                         /* Read LE Advertising Channel TX Power */
712                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
713                 }
714
715                 if (hdev->commands[26] & 0x40) {
716                         /* Read LE White List Size */
717                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
718                                     0, NULL);
719                 }
720
721                 if (hdev->commands[26] & 0x80) {
722                         /* Clear LE White List */
723                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
724                 }
725
726                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
727                         /* Read LE Maximum Data Length */
728                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
729
730                         /* Read LE Suggested Default Data Length */
731                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
732                 }
733
734                 hci_set_le_support(req);
735         }
736
737         /* Read features beyond page 1 if available */
738         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
739                 struct hci_cp_read_local_ext_features cp;
740
741                 cp.page = p;
742                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
743                             sizeof(cp), &cp);
744         }
745
746         return 0;
747 }
748
749 static int hci_init4_req(struct hci_request *req, unsigned long opt)
750 {
751         struct hci_dev *hdev = req->hdev;
752
753         /* Some Broadcom based Bluetooth controllers do not support the
754          * Delete Stored Link Key command. They are clearly indicating its
755          * absence in the bit mask of supported commands.
756          *
757          * Check the supported commands and only if the the command is marked
758          * as supported send it. If not supported assume that the controller
759          * does not have actual support for stored link keys which makes this
760          * command redundant anyway.
761          *
762          * Some controllers indicate that they support handling deleting
763          * stored link keys, but they don't. The quirk lets a driver
764          * just disable this command.
765          */
766         if (hdev->commands[6] & 0x80 &&
767             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
768                 struct hci_cp_delete_stored_link_key cp;
769
770                 bacpy(&cp.bdaddr, BDADDR_ANY);
771                 cp.delete_all = 0x01;
772                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
773                             sizeof(cp), &cp);
774         }
775
776         /* Set event mask page 2 if the HCI command for it is supported */
777         if (hdev->commands[22] & 0x04)
778                 hci_set_event_mask_page_2(req);
779
780         /* Read local codec list if the HCI command is supported */
781         if (hdev->commands[29] & 0x20)
782                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
783
784         /* Get MWS transport configuration if the HCI command is supported */
785         if (hdev->commands[30] & 0x08)
786                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
787
788         /* Check for Synchronization Train support */
789         if (lmp_sync_train_capable(hdev))
790                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
791
792         /* Enable Secure Connections if supported and configured */
793         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
794             bredr_sc_enabled(hdev)) {
795                 u8 support = 0x01;
796
797                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
798                             sizeof(support), &support);
799         }
800
801         /* Set Suggested Default Data Length to maximum if supported */
802         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
803                 struct hci_cp_le_write_def_data_len cp;
804
805                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
806                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
807                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
808         }
809
810         /* Set Default PHY parameters if command is supported */
811         if (hdev->commands[35] & 0x20) {
812                 struct hci_cp_le_set_default_phy cp;
813
814                 /* No transmitter PHY or receiver PHY preferences */
815                 cp.all_phys = 0x03;
816                 cp.tx_phys = 0;
817                 cp.rx_phys = 0;
818
819                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
820         }
821
822         return 0;
823 }
824
825 static int __hci_init(struct hci_dev *hdev)
826 {
827         int err;
828
829         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
830         if (err < 0)
831                 return err;
832
833         if (hci_dev_test_flag(hdev, HCI_SETUP))
834                 hci_debugfs_create_basic(hdev);
835
836         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
837         if (err < 0)
838                 return err;
839
840         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
841          * BR/EDR/LE type controllers. AMP controllers only need the
842          * first two stages of init.
843          */
844         if (hdev->dev_type != HCI_PRIMARY)
845                 return 0;
846
847         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
848         if (err < 0)
849                 return err;
850
851         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
852         if (err < 0)
853                 return err;
854
855         /* This function is only called when the controller is actually in
856          * configured state. When the controller is marked as unconfigured,
857          * this initialization procedure is not run.
858          *
859          * It means that it is possible that a controller runs through its
860          * setup phase and then discovers missing settings. If that is the
861          * case, then this function will not be called. It then will only
862          * be called during the config phase.
863          *
864          * So only when in setup phase or config phase, create the debugfs
865          * entries and register the SMP channels.
866          */
867         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
868             !hci_dev_test_flag(hdev, HCI_CONFIG))
869                 return 0;
870
871         hci_debugfs_create_common(hdev);
872
873         if (lmp_bredr_capable(hdev))
874                 hci_debugfs_create_bredr(hdev);
875
876         if (lmp_le_capable(hdev))
877                 hci_debugfs_create_le(hdev);
878
879         return 0;
880 }
881
882 static int hci_init0_req(struct hci_request *req, unsigned long opt)
883 {
884         struct hci_dev *hdev = req->hdev;
885
886         BT_DBG("%s %ld", hdev->name, opt);
887
888         /* Reset */
889         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
890                 hci_reset_req(req, 0);
891
892         /* Read Local Version */
893         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
894
895         /* Read BD Address */
896         if (hdev->set_bdaddr)
897                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
898
899         return 0;
900 }
901
902 static int __hci_unconf_init(struct hci_dev *hdev)
903 {
904         int err;
905
906         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
907                 return 0;
908
909         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
910         if (err < 0)
911                 return err;
912
913         if (hci_dev_test_flag(hdev, HCI_SETUP))
914                 hci_debugfs_create_basic(hdev);
915
916         return 0;
917 }
918
919 static int hci_scan_req(struct hci_request *req, unsigned long opt)
920 {
921         __u8 scan = opt;
922
923         BT_DBG("%s %x", req->hdev->name, scan);
924
925         /* Inquiry and Page scans */
926         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
927         return 0;
928 }
929
930 static int hci_auth_req(struct hci_request *req, unsigned long opt)
931 {
932         __u8 auth = opt;
933
934         BT_DBG("%s %x", req->hdev->name, auth);
935
936         /* Authentication */
937         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
938         return 0;
939 }
940
941 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
942 {
943         __u8 encrypt = opt;
944
945         BT_DBG("%s %x", req->hdev->name, encrypt);
946
947         /* Encryption */
948         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
949         return 0;
950 }
951
952 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
953 {
954         __le16 policy = cpu_to_le16(opt);
955
956         BT_DBG("%s %x", req->hdev->name, policy);
957
958         /* Default link policy */
959         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
960         return 0;
961 }
962
963 /* Get HCI device by index.
964  * Device is held on return. */
965 struct hci_dev *hci_dev_get(int index)
966 {
967         struct hci_dev *hdev = NULL, *d;
968
969         BT_DBG("%d", index);
970
971         if (index < 0)
972                 return NULL;
973
974         read_lock(&hci_dev_list_lock);
975         list_for_each_entry(d, &hci_dev_list, list) {
976                 if (d->id == index) {
977                         hdev = hci_dev_hold(d);
978                         break;
979                 }
980         }
981         read_unlock(&hci_dev_list_lock);
982         return hdev;
983 }
984
985 /* ---- Inquiry support ---- */
986
987 bool hci_discovery_active(struct hci_dev *hdev)
988 {
989         struct discovery_state *discov = &hdev->discovery;
990
991         switch (discov->state) {
992         case DISCOVERY_FINDING:
993         case DISCOVERY_RESOLVING:
994                 return true;
995
996         default:
997                 return false;
998         }
999 }
1000
1001 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1002 {
1003         int old_state = hdev->discovery.state;
1004
1005         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1006
1007         if (old_state == state)
1008                 return;
1009
1010         hdev->discovery.state = state;
1011
1012         switch (state) {
1013         case DISCOVERY_STOPPED:
1014                 hci_update_background_scan(hdev);
1015
1016                 if (old_state != DISCOVERY_STARTING)
1017                         mgmt_discovering(hdev, 0);
1018                 break;
1019         case DISCOVERY_STARTING:
1020                 break;
1021         case DISCOVERY_FINDING:
1022                 mgmt_discovering(hdev, 1);
1023                 break;
1024         case DISCOVERY_RESOLVING:
1025                 break;
1026         case DISCOVERY_STOPPING:
1027                 break;
1028         }
1029 }
1030
1031 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1032 {
1033         struct discovery_state *cache = &hdev->discovery;
1034         struct inquiry_entry *p, *n;
1035
1036         list_for_each_entry_safe(p, n, &cache->all, all) {
1037                 list_del(&p->all);
1038                 kfree(p);
1039         }
1040
1041         INIT_LIST_HEAD(&cache->unknown);
1042         INIT_LIST_HEAD(&cache->resolve);
1043 }
1044
1045 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1046                                                bdaddr_t *bdaddr)
1047 {
1048         struct discovery_state *cache = &hdev->discovery;
1049         struct inquiry_entry *e;
1050
1051         BT_DBG("cache %p, %pMR", cache, bdaddr);
1052
1053         list_for_each_entry(e, &cache->all, all) {
1054                 if (!bacmp(&e->data.bdaddr, bdaddr))
1055                         return e;
1056         }
1057
1058         return NULL;
1059 }
1060
1061 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1062                                                        bdaddr_t *bdaddr)
1063 {
1064         struct discovery_state *cache = &hdev->discovery;
1065         struct inquiry_entry *e;
1066
1067         BT_DBG("cache %p, %pMR", cache, bdaddr);
1068
1069         list_for_each_entry(e, &cache->unknown, list) {
1070                 if (!bacmp(&e->data.bdaddr, bdaddr))
1071                         return e;
1072         }
1073
1074         return NULL;
1075 }
1076
1077 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1078                                                        bdaddr_t *bdaddr,
1079                                                        int state)
1080 {
1081         struct discovery_state *cache = &hdev->discovery;
1082         struct inquiry_entry *e;
1083
1084         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1085
1086         list_for_each_entry(e, &cache->resolve, list) {
1087                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1088                         return e;
1089                 if (!bacmp(&e->data.bdaddr, bdaddr))
1090                         return e;
1091         }
1092
1093         return NULL;
1094 }
1095
1096 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1097                                       struct inquiry_entry *ie)
1098 {
1099         struct discovery_state *cache = &hdev->discovery;
1100         struct list_head *pos = &cache->resolve;
1101         struct inquiry_entry *p;
1102
1103         list_del(&ie->list);
1104
1105         list_for_each_entry(p, &cache->resolve, list) {
1106                 if (p->name_state != NAME_PENDING &&
1107                     abs(p->data.rssi) >= abs(ie->data.rssi))
1108                         break;
1109                 pos = &p->list;
1110         }
1111
1112         list_add(&ie->list, pos);
1113 }
1114
1115 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1116                              bool name_known)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *ie;
1120         u32 flags = 0;
1121
1122         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1123
1124         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1125
1126         if (!data->ssp_mode)
1127                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1128
1129         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1130         if (ie) {
1131                 if (!ie->data.ssp_mode)
1132                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1133
1134                 if (ie->name_state == NAME_NEEDED &&
1135                     data->rssi != ie->data.rssi) {
1136                         ie->data.rssi = data->rssi;
1137                         hci_inquiry_cache_update_resolve(hdev, ie);
1138                 }
1139
1140                 goto update;
1141         }
1142
1143         /* Entry not in the cache. Add new one. */
1144         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1145         if (!ie) {
1146                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1147                 goto done;
1148         }
1149
1150         list_add(&ie->all, &cache->all);
1151
1152         if (name_known) {
1153                 ie->name_state = NAME_KNOWN;
1154         } else {
1155                 ie->name_state = NAME_NOT_KNOWN;
1156                 list_add(&ie->list, &cache->unknown);
1157         }
1158
1159 update:
1160         if (name_known && ie->name_state != NAME_KNOWN &&
1161             ie->name_state != NAME_PENDING) {
1162                 ie->name_state = NAME_KNOWN;
1163                 list_del(&ie->list);
1164         }
1165
1166         memcpy(&ie->data, data, sizeof(*data));
1167         ie->timestamp = jiffies;
1168         cache->timestamp = jiffies;
1169
1170         if (ie->name_state == NAME_NOT_KNOWN)
1171                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1172
1173 done:
1174         return flags;
1175 }
1176
1177 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1178 {
1179         struct discovery_state *cache = &hdev->discovery;
1180         struct inquiry_info *info = (struct inquiry_info *) buf;
1181         struct inquiry_entry *e;
1182         int copied = 0;
1183
1184         list_for_each_entry(e, &cache->all, all) {
1185                 struct inquiry_data *data = &e->data;
1186
1187                 if (copied >= num)
1188                         break;
1189
1190                 bacpy(&info->bdaddr, &data->bdaddr);
1191                 info->pscan_rep_mode    = data->pscan_rep_mode;
1192                 info->pscan_period_mode = data->pscan_period_mode;
1193                 info->pscan_mode        = data->pscan_mode;
1194                 memcpy(info->dev_class, data->dev_class, 3);
1195                 info->clock_offset      = data->clock_offset;
1196
1197                 info++;
1198                 copied++;
1199         }
1200
1201         BT_DBG("cache %p, copied %d", cache, copied);
1202         return copied;
1203 }
1204
1205 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1206 {
1207         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1208         struct hci_dev *hdev = req->hdev;
1209         struct hci_cp_inquiry cp;
1210
1211         BT_DBG("%s", hdev->name);
1212
1213         if (test_bit(HCI_INQUIRY, &hdev->flags))
1214                 return 0;
1215
1216         /* Start Inquiry */
1217         memcpy(&cp.lap, &ir->lap, 3);
1218         cp.length  = ir->length;
1219         cp.num_rsp = ir->num_rsp;
1220         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1221
1222         return 0;
1223 }
1224
1225 int hci_inquiry(void __user *arg)
1226 {
1227         __u8 __user *ptr = arg;
1228         struct hci_inquiry_req ir;
1229         struct hci_dev *hdev;
1230         int err = 0, do_inquiry = 0, max_rsp;
1231         long timeo;
1232         __u8 *buf;
1233
1234         if (copy_from_user(&ir, ptr, sizeof(ir)))
1235                 return -EFAULT;
1236
1237         hdev = hci_dev_get(ir.dev_id);
1238         if (!hdev)
1239                 return -ENODEV;
1240
1241         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1242                 err = -EBUSY;
1243                 goto done;
1244         }
1245
1246         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1247                 err = -EOPNOTSUPP;
1248                 goto done;
1249         }
1250
1251         if (hdev->dev_type != HCI_PRIMARY) {
1252                 err = -EOPNOTSUPP;
1253                 goto done;
1254         }
1255
1256         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1257                 err = -EOPNOTSUPP;
1258                 goto done;
1259         }
1260
1261         /* Restrict maximum inquiry length to 60 seconds */
1262         if (ir.length > 60) {
1263                 err = -EINVAL;
1264                 goto done;
1265         }
1266
1267         hci_dev_lock(hdev);
1268         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1269             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1270                 hci_inquiry_cache_flush(hdev);
1271                 do_inquiry = 1;
1272         }
1273         hci_dev_unlock(hdev);
1274
1275         timeo = ir.length * msecs_to_jiffies(2000);
1276
1277         if (do_inquiry) {
1278                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1279                                    timeo, NULL);
1280                 if (err < 0)
1281                         goto done;
1282
1283                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1284                  * cleared). If it is interrupted by a signal, return -EINTR.
1285                  */
1286                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1287                                 TASK_INTERRUPTIBLE)) {
1288                         err = -EINTR;
1289                         goto done;
1290                 }
1291         }
1292
1293         /* for unlimited number of responses we will use buffer with
1294          * 255 entries
1295          */
1296         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1297
1298         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1299          * copy it to the user space.
1300          */
1301         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1302         if (!buf) {
1303                 err = -ENOMEM;
1304                 goto done;
1305         }
1306
1307         hci_dev_lock(hdev);
1308         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1309         hci_dev_unlock(hdev);
1310
1311         BT_DBG("num_rsp %d", ir.num_rsp);
1312
1313         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1314                 ptr += sizeof(ir);
1315                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1316                                  ir.num_rsp))
1317                         err = -EFAULT;
1318         } else
1319                 err = -EFAULT;
1320
1321         kfree(buf);
1322
1323 done:
1324         hci_dev_put(hdev);
1325         return err;
1326 }
1327
1328 static int hci_dev_do_open(struct hci_dev *hdev)
1329 {
1330         int ret = 0;
1331
1332         BT_DBG("%s %p", hdev->name, hdev);
1333
1334         hci_req_sync_lock(hdev);
1335
1336         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1337                 ret = -ENODEV;
1338                 goto done;
1339         }
1340
1341         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1342             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1343                 /* Check for rfkill but allow the HCI setup stage to
1344                  * proceed (which in itself doesn't cause any RF activity).
1345                  */
1346                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1347                         ret = -ERFKILL;
1348                         goto done;
1349                 }
1350
1351                 /* Check for valid public address or a configured static
1352                  * random adddress, but let the HCI setup proceed to
1353                  * be able to determine if there is a public address
1354                  * or not.
1355                  *
1356                  * In case of user channel usage, it is not important
1357                  * if a public address or static random address is
1358                  * available.
1359                  *
1360                  * This check is only valid for BR/EDR controllers
1361                  * since AMP controllers do not have an address.
1362                  */
1363                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1364                     hdev->dev_type == HCI_PRIMARY &&
1365                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1366                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1367                         ret = -EADDRNOTAVAIL;
1368                         goto done;
1369                 }
1370         }
1371
1372         if (test_bit(HCI_UP, &hdev->flags)) {
1373                 ret = -EALREADY;
1374                 goto done;
1375         }
1376
1377         if (hdev->open(hdev)) {
1378                 ret = -EIO;
1379                 goto done;
1380         }
1381
1382         set_bit(HCI_RUNNING, &hdev->flags);
1383         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1384
1385         atomic_set(&hdev->cmd_cnt, 1);
1386         set_bit(HCI_INIT, &hdev->flags);
1387
1388         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1389                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1390
1391                 if (hdev->setup)
1392                         ret = hdev->setup(hdev);
1393
1394                 /* The transport driver can set these quirks before
1395                  * creating the HCI device or in its setup callback.
1396                  *
1397                  * In case any of them is set, the controller has to
1398                  * start up as unconfigured.
1399                  */
1400                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1403
1404                 /* For an unconfigured controller it is required to
1405                  * read at least the version information provided by
1406                  * the Read Local Version Information command.
1407                  *
1408                  * If the set_bdaddr driver callback is provided, then
1409                  * also the original Bluetooth public device address
1410                  * will be read using the Read BD Address command.
1411                  */
1412                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413                         ret = __hci_unconf_init(hdev);
1414         }
1415
1416         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417                 /* If public address change is configured, ensure that
1418                  * the address gets programmed. If the driver does not
1419                  * support changing the public address, fail the power
1420                  * on procedure.
1421                  */
1422                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423                     hdev->set_bdaddr)
1424                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425                 else
1426                         ret = -EADDRNOTAVAIL;
1427         }
1428
1429         if (!ret) {
1430                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1432                         ret = __hci_init(hdev);
1433                         if (!ret && hdev->post_init)
1434                                 ret = hdev->post_init(hdev);
1435                 }
1436         }
1437
1438         /* If the HCI Reset command is clearing all diagnostic settings,
1439          * then they need to be reprogrammed after the init procedure
1440          * completed.
1441          */
1442         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1443             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1444             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1445                 ret = hdev->set_diag(hdev, true);
1446
1447         clear_bit(HCI_INIT, &hdev->flags);
1448
1449         if (!ret) {
1450                 hci_dev_hold(hdev);
1451                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1452                 set_bit(HCI_UP, &hdev->flags);
1453                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1454                 hci_leds_update_powered(hdev, true);
1455                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1456                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1457                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1458                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1459                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1460                     hdev->dev_type == HCI_PRIMARY) {
1461                         ret = __hci_req_hci_power_on(hdev);
1462                         mgmt_power_on(hdev, ret);
1463                 }
1464         } else {
1465                 /* Init failed, cleanup */
1466                 flush_work(&hdev->tx_work);
1467
1468                 /* Since hci_rx_work() is possible to awake new cmd_work
1469                  * it should be flushed first to avoid unexpected call of
1470                  * hci_cmd_work()
1471                  */
1472                 flush_work(&hdev->rx_work);
1473                 flush_work(&hdev->cmd_work);
1474
1475                 skb_queue_purge(&hdev->cmd_q);
1476                 skb_queue_purge(&hdev->rx_q);
1477
1478                 if (hdev->flush)
1479                         hdev->flush(hdev);
1480
1481                 if (hdev->sent_cmd) {
1482                         cancel_delayed_work_sync(&hdev->cmd_timer);
1483                         kfree_skb(hdev->sent_cmd);
1484                         hdev->sent_cmd = NULL;
1485                 }
1486
1487                 clear_bit(HCI_RUNNING, &hdev->flags);
1488                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1489
1490                 hdev->close(hdev);
1491                 hdev->flags &= BIT(HCI_RAW);
1492         }
1493
1494 done:
1495         hci_req_sync_unlock(hdev);
1496         return ret;
1497 }
1498
1499 /* ---- HCI ioctl helpers ---- */
1500
1501 int hci_dev_open(__u16 dev)
1502 {
1503         struct hci_dev *hdev;
1504         int err;
1505
1506         hdev = hci_dev_get(dev);
1507         if (!hdev)
1508                 return -ENODEV;
1509
1510         /* Devices that are marked as unconfigured can only be powered
1511          * up as user channel. Trying to bring them up as normal devices
1512          * will result into a failure. Only user channel operation is
1513          * possible.
1514          *
1515          * When this function is called for a user channel, the flag
1516          * HCI_USER_CHANNEL will be set first before attempting to
1517          * open the device.
1518          */
1519         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1520             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1521                 err = -EOPNOTSUPP;
1522                 goto done;
1523         }
1524
1525         /* We need to ensure that no other power on/off work is pending
1526          * before proceeding to call hci_dev_do_open. This is
1527          * particularly important if the setup procedure has not yet
1528          * completed.
1529          */
1530         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1531                 cancel_delayed_work(&hdev->power_off);
1532
1533         /* After this call it is guaranteed that the setup procedure
1534          * has finished. This means that error conditions like RFKILL
1535          * or no valid public or static random address apply.
1536          */
1537         flush_workqueue(hdev->req_workqueue);
1538
1539         /* For controllers not using the management interface and that
1540          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1541          * so that pairing works for them. Once the management interface
1542          * is in use this bit will be cleared again and userspace has
1543          * to explicitly enable it.
1544          */
1545         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1546             !hci_dev_test_flag(hdev, HCI_MGMT))
1547                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1548
1549         err = hci_dev_do_open(hdev);
1550
1551 done:
1552         hci_dev_put(hdev);
1553         return err;
1554 }
1555
1556 /* This function requires the caller holds hdev->lock */
1557 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1558 {
1559         struct hci_conn_params *p;
1560
1561         list_for_each_entry(p, &hdev->le_conn_params, list) {
1562                 if (p->conn) {
1563                         hci_conn_drop(p->conn);
1564                         hci_conn_put(p->conn);
1565                         p->conn = NULL;
1566                 }
1567                 list_del_init(&p->action);
1568         }
1569
1570         BT_DBG("All LE pending actions cleared");
1571 }
1572
1573 int hci_dev_do_close(struct hci_dev *hdev)
1574 {
1575         bool auto_off;
1576
1577         BT_DBG("%s %p", hdev->name, hdev);
1578
1579         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1580             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1581             test_bit(HCI_UP, &hdev->flags)) {
1582                 /* Execute vendor specific shutdown routine */
1583                 if (hdev->shutdown)
1584                         hdev->shutdown(hdev);
1585         }
1586
1587         cancel_delayed_work(&hdev->power_off);
1588
1589         hci_request_cancel_all(hdev);
1590         hci_req_sync_lock(hdev);
1591
1592         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1593             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1594             test_bit(HCI_UP, &hdev->flags)) {
1595                 /* Execute vendor specific shutdown routine */
1596                 if (hdev->shutdown)
1597                         hdev->shutdown(hdev);
1598         }
1599
1600         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1601                 cancel_delayed_work_sync(&hdev->cmd_timer);
1602                 hci_req_sync_unlock(hdev);
1603                 return 0;
1604         }
1605
1606         hci_leds_update_powered(hdev, false);
1607
1608         /* Flush RX and TX works */
1609         flush_work(&hdev->tx_work);
1610         flush_work(&hdev->rx_work);
1611
1612         if (hdev->discov_timeout > 0) {
1613                 hdev->discov_timeout = 0;
1614                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1615                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1616         }
1617
1618         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1619                 cancel_delayed_work(&hdev->service_cache);
1620
1621         if (hci_dev_test_flag(hdev, HCI_MGMT))
1622                 cancel_delayed_work_sync(&hdev->rpa_expired);
1623
1624         /* Avoid potential lockdep warnings from the *_flush() calls by
1625          * ensuring the workqueue is empty up front.
1626          */
1627         drain_workqueue(hdev->workqueue);
1628
1629         hci_dev_lock(hdev);
1630
1631         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1632
1633         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1634
1635         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1636             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1637             hci_dev_test_flag(hdev, HCI_MGMT))
1638                 __mgmt_power_off(hdev);
1639
1640         hci_inquiry_cache_flush(hdev);
1641         hci_pend_le_actions_clear(hdev);
1642         hci_conn_hash_flush(hdev);
1643         hci_dev_unlock(hdev);
1644
1645         smp_unregister(hdev);
1646
1647         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1648
1649         if (hdev->flush)
1650                 hdev->flush(hdev);
1651
1652         /* Reset device */
1653         skb_queue_purge(&hdev->cmd_q);
1654         atomic_set(&hdev->cmd_cnt, 1);
1655         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1656             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1657                 set_bit(HCI_INIT, &hdev->flags);
1658                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1659                 clear_bit(HCI_INIT, &hdev->flags);
1660         }
1661
1662         /* flush cmd  work */
1663         flush_work(&hdev->cmd_work);
1664
1665         /* Drop queues */
1666         skb_queue_purge(&hdev->rx_q);
1667         skb_queue_purge(&hdev->cmd_q);
1668         skb_queue_purge(&hdev->raw_q);
1669
1670         /* Drop last sent command */
1671         if (hdev->sent_cmd) {
1672                 cancel_delayed_work_sync(&hdev->cmd_timer);
1673                 kfree_skb(hdev->sent_cmd);
1674                 hdev->sent_cmd = NULL;
1675         }
1676
1677         clear_bit(HCI_RUNNING, &hdev->flags);
1678         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1679
1680         /* After this point our queues are empty
1681          * and no tasks are scheduled. */
1682         hdev->close(hdev);
1683
1684         /* Clear flags */
1685         hdev->flags &= BIT(HCI_RAW);
1686         hci_dev_clear_volatile_flags(hdev);
1687
1688         /* Controller radio is available but is currently powered down */
1689         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1690
1691         memset(hdev->eir, 0, sizeof(hdev->eir));
1692         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1693         bacpy(&hdev->random_addr, BDADDR_ANY);
1694
1695         hci_req_sync_unlock(hdev);
1696
1697         hci_dev_put(hdev);
1698         return 0;
1699 }
1700
1701 int hci_dev_close(__u16 dev)
1702 {
1703         struct hci_dev *hdev;
1704         int err;
1705
1706         hdev = hci_dev_get(dev);
1707         if (!hdev)
1708                 return -ENODEV;
1709
1710         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1711                 err = -EBUSY;
1712                 goto done;
1713         }
1714
1715         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1716                 cancel_delayed_work(&hdev->power_off);
1717
1718         err = hci_dev_do_close(hdev);
1719
1720 done:
1721         hci_dev_put(hdev);
1722         return err;
1723 }
1724
1725 static int hci_dev_do_reset(struct hci_dev *hdev)
1726 {
1727         int ret;
1728
1729         BT_DBG("%s %p", hdev->name, hdev);
1730
1731         hci_req_sync_lock(hdev);
1732
1733         /* Drop queues */
1734         skb_queue_purge(&hdev->rx_q);
1735         skb_queue_purge(&hdev->cmd_q);
1736
1737         /* Avoid potential lockdep warnings from the *_flush() calls by
1738          * ensuring the workqueue is empty up front.
1739          */
1740         drain_workqueue(hdev->workqueue);
1741
1742         hci_dev_lock(hdev);
1743         hci_inquiry_cache_flush(hdev);
1744         hci_conn_hash_flush(hdev);
1745         hci_dev_unlock(hdev);
1746
1747         if (hdev->flush)
1748                 hdev->flush(hdev);
1749
1750         atomic_set(&hdev->cmd_cnt, 1);
1751         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1752
1753         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1754
1755         hci_req_sync_unlock(hdev);
1756         return ret;
1757 }
1758
1759 int hci_dev_reset(__u16 dev)
1760 {
1761         struct hci_dev *hdev;
1762         int err;
1763
1764         hdev = hci_dev_get(dev);
1765         if (!hdev)
1766                 return -ENODEV;
1767
1768         if (!test_bit(HCI_UP, &hdev->flags)) {
1769                 err = -ENETDOWN;
1770                 goto done;
1771         }
1772
1773         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1774                 err = -EBUSY;
1775                 goto done;
1776         }
1777
1778         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1779                 err = -EOPNOTSUPP;
1780                 goto done;
1781         }
1782
1783         err = hci_dev_do_reset(hdev);
1784
1785 done:
1786         hci_dev_put(hdev);
1787         return err;
1788 }
1789
1790 int hci_dev_reset_stat(__u16 dev)
1791 {
1792         struct hci_dev *hdev;
1793         int ret = 0;
1794
1795         hdev = hci_dev_get(dev);
1796         if (!hdev)
1797                 return -ENODEV;
1798
1799         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1800                 ret = -EBUSY;
1801                 goto done;
1802         }
1803
1804         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1805                 ret = -EOPNOTSUPP;
1806                 goto done;
1807         }
1808
1809         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1810
1811 done:
1812         hci_dev_put(hdev);
1813         return ret;
1814 }
1815
1816 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1817 {
1818         bool conn_changed, discov_changed;
1819
1820         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1821
1822         if ((scan & SCAN_PAGE))
1823                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1824                                                           HCI_CONNECTABLE);
1825         else
1826                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1827                                                            HCI_CONNECTABLE);
1828
1829         if ((scan & SCAN_INQUIRY)) {
1830                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1831                                                             HCI_DISCOVERABLE);
1832         } else {
1833                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1834                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1835                                                              HCI_DISCOVERABLE);
1836         }
1837
1838         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1839                 return;
1840
1841         if (conn_changed || discov_changed) {
1842                 /* In case this was disabled through mgmt */
1843                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1844
1845                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1846                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1847
1848                 mgmt_new_settings(hdev);
1849         }
1850 }
1851
1852 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1853 {
1854         struct hci_dev *hdev;
1855         struct hci_dev_req dr;
1856         int err = 0;
1857
1858         if (copy_from_user(&dr, arg, sizeof(dr)))
1859                 return -EFAULT;
1860
1861         hdev = hci_dev_get(dr.dev_id);
1862         if (!hdev)
1863                 return -ENODEV;
1864
1865         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1866                 err = -EBUSY;
1867                 goto done;
1868         }
1869
1870         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1871                 err = -EOPNOTSUPP;
1872                 goto done;
1873         }
1874
1875         if (hdev->dev_type != HCI_PRIMARY) {
1876                 err = -EOPNOTSUPP;
1877                 goto done;
1878         }
1879
1880         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1881                 err = -EOPNOTSUPP;
1882                 goto done;
1883         }
1884
1885         switch (cmd) {
1886         case HCISETAUTH:
1887                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1888                                    HCI_INIT_TIMEOUT, NULL);
1889                 break;
1890
1891         case HCISETENCRYPT:
1892                 if (!lmp_encrypt_capable(hdev)) {
1893                         err = -EOPNOTSUPP;
1894                         break;
1895                 }
1896
1897                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1898                         /* Auth must be enabled first */
1899                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1900                                            HCI_INIT_TIMEOUT, NULL);
1901                         if (err)
1902                                 break;
1903                 }
1904
1905                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1906                                    HCI_INIT_TIMEOUT, NULL);
1907                 break;
1908
1909         case HCISETSCAN:
1910                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1911                                    HCI_INIT_TIMEOUT, NULL);
1912
1913                 /* Ensure that the connectable and discoverable states
1914                  * get correctly modified as this was a non-mgmt change.
1915                  */
1916                 if (!err)
1917                         hci_update_scan_state(hdev, dr.dev_opt);
1918                 break;
1919
1920         case HCISETLINKPOL:
1921                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1922                                    HCI_INIT_TIMEOUT, NULL);
1923                 break;
1924
1925         case HCISETLINKMODE:
1926                 hdev->link_mode = ((__u16) dr.dev_opt) &
1927                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1928                 break;
1929
1930         case HCISETPTYPE:
1931                 hdev->pkt_type = (__u16) dr.dev_opt;
1932                 break;
1933
1934         case HCISETACLMTU:
1935                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1936                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1937                 break;
1938
1939         case HCISETSCOMTU:
1940                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1941                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1942                 break;
1943
1944         default:
1945                 err = -EINVAL;
1946                 break;
1947         }
1948
1949 done:
1950         hci_dev_put(hdev);
1951         return err;
1952 }
1953
1954 int hci_get_dev_list(void __user *arg)
1955 {
1956         struct hci_dev *hdev;
1957         struct hci_dev_list_req *dl;
1958         struct hci_dev_req *dr;
1959         int n = 0, size, err;
1960         __u16 dev_num;
1961
1962         if (get_user(dev_num, (__u16 __user *) arg))
1963                 return -EFAULT;
1964
1965         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1966                 return -EINVAL;
1967
1968         size = sizeof(*dl) + dev_num * sizeof(*dr);
1969
1970         dl = kzalloc(size, GFP_KERNEL);
1971         if (!dl)
1972                 return -ENOMEM;
1973
1974         dr = dl->dev_req;
1975
1976         read_lock(&hci_dev_list_lock);
1977         list_for_each_entry(hdev, &hci_dev_list, list) {
1978                 unsigned long flags = hdev->flags;
1979
1980                 /* When the auto-off is configured it means the transport
1981                  * is running, but in that case still indicate that the
1982                  * device is actually down.
1983                  */
1984                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1985                         flags &= ~BIT(HCI_UP);
1986
1987                 (dr + n)->dev_id  = hdev->id;
1988                 (dr + n)->dev_opt = flags;
1989
1990                 if (++n >= dev_num)
1991                         break;
1992         }
1993         read_unlock(&hci_dev_list_lock);
1994
1995         dl->dev_num = n;
1996         size = sizeof(*dl) + n * sizeof(*dr);
1997
1998         err = copy_to_user(arg, dl, size);
1999         kfree(dl);
2000
2001         return err ? -EFAULT : 0;
2002 }
2003
2004 int hci_get_dev_info(void __user *arg)
2005 {
2006         struct hci_dev *hdev;
2007         struct hci_dev_info di;
2008         unsigned long flags;
2009         int err = 0;
2010
2011         if (copy_from_user(&di, arg, sizeof(di)))
2012                 return -EFAULT;
2013
2014         hdev = hci_dev_get(di.dev_id);
2015         if (!hdev)
2016                 return -ENODEV;
2017
2018         /* When the auto-off is configured it means the transport
2019          * is running, but in that case still indicate that the
2020          * device is actually down.
2021          */
2022         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2023                 flags = hdev->flags & ~BIT(HCI_UP);
2024         else
2025                 flags = hdev->flags;
2026
2027         strcpy(di.name, hdev->name);
2028         di.bdaddr   = hdev->bdaddr;
2029         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2030         di.flags    = flags;
2031         di.pkt_type = hdev->pkt_type;
2032         if (lmp_bredr_capable(hdev)) {
2033                 di.acl_mtu  = hdev->acl_mtu;
2034                 di.acl_pkts = hdev->acl_pkts;
2035                 di.sco_mtu  = hdev->sco_mtu;
2036                 di.sco_pkts = hdev->sco_pkts;
2037         } else {
2038                 di.acl_mtu  = hdev->le_mtu;
2039                 di.acl_pkts = hdev->le_pkts;
2040                 di.sco_mtu  = 0;
2041                 di.sco_pkts = 0;
2042         }
2043         di.link_policy = hdev->link_policy;
2044         di.link_mode   = hdev->link_mode;
2045
2046         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2047         memcpy(&di.features, &hdev->features, sizeof(di.features));
2048
2049         if (copy_to_user(arg, &di, sizeof(di)))
2050                 err = -EFAULT;
2051
2052         hci_dev_put(hdev);
2053
2054         return err;
2055 }
2056
2057 /* ---- Interface to HCI drivers ---- */
2058
2059 static int hci_rfkill_set_block(void *data, bool blocked)
2060 {
2061         struct hci_dev *hdev = data;
2062
2063         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2064
2065         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2066                 return -EBUSY;
2067
2068         if (blocked) {
2069                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2070                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2071                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2072                         hci_dev_do_close(hdev);
2073         } else {
2074                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2075         }
2076
2077         return 0;
2078 }
2079
2080 static const struct rfkill_ops hci_rfkill_ops = {
2081         .set_block = hci_rfkill_set_block,
2082 };
2083
2084 static void hci_power_on(struct work_struct *work)
2085 {
2086         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2087         int err;
2088
2089         BT_DBG("%s", hdev->name);
2090
2091         if (test_bit(HCI_UP, &hdev->flags) &&
2092             hci_dev_test_flag(hdev, HCI_MGMT) &&
2093             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2094                 cancel_delayed_work(&hdev->power_off);
2095                 hci_req_sync_lock(hdev);
2096                 err = __hci_req_hci_power_on(hdev);
2097                 hci_req_sync_unlock(hdev);
2098                 mgmt_power_on(hdev, err);
2099                 return;
2100         }
2101
2102         err = hci_dev_do_open(hdev);
2103         if (err < 0) {
2104                 hci_dev_lock(hdev);
2105                 mgmt_set_powered_failed(hdev, err);
2106                 hci_dev_unlock(hdev);
2107                 return;
2108         }
2109
2110         /* During the HCI setup phase, a few error conditions are
2111          * ignored and they need to be checked now. If they are still
2112          * valid, it is important to turn the device back off.
2113          */
2114         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2115             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2116             (hdev->dev_type == HCI_PRIMARY &&
2117              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2118              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2119                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2120                 hci_dev_do_close(hdev);
2121         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2122                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2123                                    HCI_AUTO_OFF_TIMEOUT);
2124         }
2125
2126         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2127                 /* For unconfigured devices, set the HCI_RAW flag
2128                  * so that userspace can easily identify them.
2129                  */
2130                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2131                         set_bit(HCI_RAW, &hdev->flags);
2132
2133                 /* For fully configured devices, this will send
2134                  * the Index Added event. For unconfigured devices,
2135                  * it will send Unconfigued Index Added event.
2136                  *
2137                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2138                  * and no event will be send.
2139                  */
2140                 mgmt_index_added(hdev);
2141         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2142                 /* When the controller is now configured, then it
2143                  * is important to clear the HCI_RAW flag.
2144                  */
2145                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2146                         clear_bit(HCI_RAW, &hdev->flags);
2147
2148                 /* Powering on the controller with HCI_CONFIG set only
2149                  * happens with the transition from unconfigured to
2150                  * configured. This will send the Index Added event.
2151                  */
2152                 mgmt_index_added(hdev);
2153         }
2154 }
2155
2156 static void hci_power_off(struct work_struct *work)
2157 {
2158         struct hci_dev *hdev = container_of(work, struct hci_dev,
2159                                             power_off.work);
2160
2161         BT_DBG("%s", hdev->name);
2162
2163         hci_dev_do_close(hdev);
2164 }
2165
2166 static void hci_error_reset(struct work_struct *work)
2167 {
2168         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2169
2170         BT_DBG("%s", hdev->name);
2171
2172         if (hdev->hw_error)
2173                 hdev->hw_error(hdev, hdev->hw_error_code);
2174         else
2175                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2176                        hdev->hw_error_code);
2177
2178         if (hci_dev_do_close(hdev))
2179                 return;
2180
2181         hci_dev_do_open(hdev);
2182 }
2183
2184 void hci_uuids_clear(struct hci_dev *hdev)
2185 {
2186         struct bt_uuid *uuid, *tmp;
2187
2188         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2189                 list_del(&uuid->list);
2190                 kfree(uuid);
2191         }
2192 }
2193
2194 void hci_link_keys_clear(struct hci_dev *hdev)
2195 {
2196         struct link_key *key;
2197
2198         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2199                 list_del_rcu(&key->list);
2200                 kfree_rcu(key, rcu);
2201         }
2202 }
2203
2204 void hci_smp_ltks_clear(struct hci_dev *hdev)
2205 {
2206         struct smp_ltk *k;
2207
2208         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2209                 list_del_rcu(&k->list);
2210                 kfree_rcu(k, rcu);
2211         }
2212 }
2213
2214 void hci_smp_irks_clear(struct hci_dev *hdev)
2215 {
2216         struct smp_irk *k;
2217
2218         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2219                 list_del_rcu(&k->list);
2220                 kfree_rcu(k, rcu);
2221         }
2222 }
2223
2224 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2225 {
2226         struct link_key *k;
2227
2228         rcu_read_lock();
2229         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2230                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2231                         rcu_read_unlock();
2232                         return k;
2233                 }
2234         }
2235         rcu_read_unlock();
2236
2237         return NULL;
2238 }
2239
2240 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2241                                u8 key_type, u8 old_key_type)
2242 {
2243         /* Legacy key */
2244         if (key_type < 0x03)
2245                 return true;
2246
2247         /* Debug keys are insecure so don't store them persistently */
2248         if (key_type == HCI_LK_DEBUG_COMBINATION)
2249                 return false;
2250
2251         /* Changed combination key and there's no previous one */
2252         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2253                 return false;
2254
2255         /* Security mode 3 case */
2256         if (!conn)
2257                 return true;
2258
2259         /* BR/EDR key derived using SC from an LE link */
2260         if (conn->type == LE_LINK)
2261                 return true;
2262
2263         /* Neither local nor remote side had no-bonding as requirement */
2264         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2265                 return true;
2266
2267         /* Local side had dedicated bonding as requirement */
2268         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2269                 return true;
2270
2271         /* Remote side had dedicated bonding as requirement */
2272         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2273                 return true;
2274
2275         /* If none of the above criteria match, then don't store the key
2276          * persistently */
2277         return false;
2278 }
2279
2280 static u8 ltk_role(u8 type)
2281 {
2282         if (type == SMP_LTK)
2283                 return HCI_ROLE_MASTER;
2284
2285         return HCI_ROLE_SLAVE;
2286 }
2287
2288 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2289                              u8 addr_type, u8 role)
2290 {
2291         struct smp_ltk *k;
2292
2293         rcu_read_lock();
2294         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2295                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2296                         continue;
2297
2298                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2299                         rcu_read_unlock();
2300                         return k;
2301                 }
2302         }
2303         rcu_read_unlock();
2304
2305         return NULL;
2306 }
2307
2308 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2309 {
2310         struct smp_irk *irk;
2311
2312         rcu_read_lock();
2313         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2314                 if (!bacmp(&irk->rpa, rpa)) {
2315                         rcu_read_unlock();
2316                         return irk;
2317                 }
2318         }
2319
2320         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2321                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2322                         bacpy(&irk->rpa, rpa);
2323                         rcu_read_unlock();
2324                         return irk;
2325                 }
2326         }
2327         rcu_read_unlock();
2328
2329         return NULL;
2330 }
2331
2332 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2333                                      u8 addr_type)
2334 {
2335         struct smp_irk *irk;
2336
2337         /* Identity Address must be public or static random */
2338         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2339                 return NULL;
2340
2341         rcu_read_lock();
2342         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2343                 if (addr_type == irk->addr_type &&
2344                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2345                         rcu_read_unlock();
2346                         return irk;
2347                 }
2348         }
2349         rcu_read_unlock();
2350
2351         return NULL;
2352 }
2353
2354 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2355                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2356                                   u8 pin_len, bool *persistent)
2357 {
2358         struct link_key *key, *old_key;
2359         u8 old_key_type;
2360
2361         old_key = hci_find_link_key(hdev, bdaddr);
2362         if (old_key) {
2363                 old_key_type = old_key->type;
2364                 key = old_key;
2365         } else {
2366                 old_key_type = conn ? conn->key_type : 0xff;
2367                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2368                 if (!key)
2369                         return NULL;
2370                 list_add_rcu(&key->list, &hdev->link_keys);
2371         }
2372
2373         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2374
2375         /* Some buggy controller combinations generate a changed
2376          * combination key for legacy pairing even when there's no
2377          * previous key */
2378         if (type == HCI_LK_CHANGED_COMBINATION &&
2379             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2380                 type = HCI_LK_COMBINATION;
2381                 if (conn)
2382                         conn->key_type = type;
2383         }
2384
2385         bacpy(&key->bdaddr, bdaddr);
2386         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2387         key->pin_len = pin_len;
2388
2389         if (type == HCI_LK_CHANGED_COMBINATION)
2390                 key->type = old_key_type;
2391         else
2392                 key->type = type;
2393
2394         if (persistent)
2395                 *persistent = hci_persistent_key(hdev, conn, type,
2396                                                  old_key_type);
2397
2398         return key;
2399 }
2400
2401 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2402                             u8 addr_type, u8 type, u8 authenticated,
2403                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2404 {
2405         struct smp_ltk *key, *old_key;
2406         u8 role = ltk_role(type);
2407
2408         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2409         if (old_key)
2410                 key = old_key;
2411         else {
2412                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2413                 if (!key)
2414                         return NULL;
2415                 list_add_rcu(&key->list, &hdev->long_term_keys);
2416         }
2417
2418         bacpy(&key->bdaddr, bdaddr);
2419         key->bdaddr_type = addr_type;
2420         memcpy(key->val, tk, sizeof(key->val));
2421         key->authenticated = authenticated;
2422         key->ediv = ediv;
2423         key->rand = rand;
2424         key->enc_size = enc_size;
2425         key->type = type;
2426
2427         return key;
2428 }
2429
2430 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2431                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2432 {
2433         struct smp_irk *irk;
2434
2435         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2436         if (!irk) {
2437                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2438                 if (!irk)
2439                         return NULL;
2440
2441                 bacpy(&irk->bdaddr, bdaddr);
2442                 irk->addr_type = addr_type;
2443
2444                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2445         }
2446
2447         memcpy(irk->val, val, 16);
2448         bacpy(&irk->rpa, rpa);
2449
2450         return irk;
2451 }
2452
2453 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2454 {
2455         struct link_key *key;
2456
2457         key = hci_find_link_key(hdev, bdaddr);
2458         if (!key)
2459                 return -ENOENT;
2460
2461         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462
2463         list_del_rcu(&key->list);
2464         kfree_rcu(key, rcu);
2465
2466         return 0;
2467 }
2468
2469 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2470 {
2471         struct smp_ltk *k;
2472         int removed = 0;
2473
2474         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2475                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2476                         continue;
2477
2478                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2479
2480                 list_del_rcu(&k->list);
2481                 kfree_rcu(k, rcu);
2482                 removed++;
2483         }
2484
2485         return removed ? 0 : -ENOENT;
2486 }
2487
2488 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2489 {
2490         struct smp_irk *k;
2491
2492         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2493                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2494                         continue;
2495
2496                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2497
2498                 list_del_rcu(&k->list);
2499                 kfree_rcu(k, rcu);
2500         }
2501 }
2502
2503 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2504 {
2505         struct smp_ltk *k;
2506         struct smp_irk *irk;
2507         u8 addr_type;
2508
2509         if (type == BDADDR_BREDR) {
2510                 if (hci_find_link_key(hdev, bdaddr))
2511                         return true;
2512                 return false;
2513         }
2514
2515         /* Convert to HCI addr type which struct smp_ltk uses */
2516         if (type == BDADDR_LE_PUBLIC)
2517                 addr_type = ADDR_LE_DEV_PUBLIC;
2518         else
2519                 addr_type = ADDR_LE_DEV_RANDOM;
2520
2521         irk = hci_get_irk(hdev, bdaddr, addr_type);
2522         if (irk) {
2523                 bdaddr = &irk->bdaddr;
2524                 addr_type = irk->addr_type;
2525         }
2526
2527         rcu_read_lock();
2528         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2529                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2530                         rcu_read_unlock();
2531                         return true;
2532                 }
2533         }
2534         rcu_read_unlock();
2535
2536         return false;
2537 }
2538
2539 /* HCI command timer function */
2540 static void hci_cmd_timeout(struct work_struct *work)
2541 {
2542         struct hci_dev *hdev = container_of(work, struct hci_dev,
2543                                             cmd_timer.work);
2544
2545         if (hdev->sent_cmd) {
2546                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2547                 u16 opcode = __le16_to_cpu(sent->opcode);
2548
2549                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2550         } else {
2551                 BT_ERR("%s command tx timeout", hdev->name);
2552         }
2553
2554         atomic_set(&hdev->cmd_cnt, 1);
2555         queue_work(hdev->workqueue, &hdev->cmd_work);
2556 }
2557
2558 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2559                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2560 {
2561         struct oob_data *data;
2562
2563         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2564                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2565                         continue;
2566                 if (data->bdaddr_type != bdaddr_type)
2567                         continue;
2568                 return data;
2569         }
2570
2571         return NULL;
2572 }
2573
2574 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2575                                u8 bdaddr_type)
2576 {
2577         struct oob_data *data;
2578
2579         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2580         if (!data)
2581                 return -ENOENT;
2582
2583         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2584
2585         list_del(&data->list);
2586         kfree(data);
2587
2588         return 0;
2589 }
2590
2591 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2592 {
2593         struct oob_data *data, *n;
2594
2595         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2596                 list_del(&data->list);
2597                 kfree(data);
2598         }
2599 }
2600
2601 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2602                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2603                             u8 *hash256, u8 *rand256)
2604 {
2605         struct oob_data *data;
2606
2607         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2608         if (!data) {
2609                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2610                 if (!data)
2611                         return -ENOMEM;
2612
2613                 bacpy(&data->bdaddr, bdaddr);
2614                 data->bdaddr_type = bdaddr_type;
2615                 list_add(&data->list, &hdev->remote_oob_data);
2616         }
2617
2618         if (hash192 && rand192) {
2619                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2620                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2621                 if (hash256 && rand256)
2622                         data->present = 0x03;
2623         } else {
2624                 memset(data->hash192, 0, sizeof(data->hash192));
2625                 memset(data->rand192, 0, sizeof(data->rand192));
2626                 if (hash256 && rand256)
2627                         data->present = 0x02;
2628                 else
2629                         data->present = 0x00;
2630         }
2631
2632         if (hash256 && rand256) {
2633                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2634                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2635         } else {
2636                 memset(data->hash256, 0, sizeof(data->hash256));
2637                 memset(data->rand256, 0, sizeof(data->rand256));
2638                 if (hash192 && rand192)
2639                         data->present = 0x01;
2640         }
2641
2642         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2643
2644         return 0;
2645 }
2646
2647 /* This function requires the caller holds hdev->lock */
2648 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2649 {
2650         struct adv_info *adv_instance;
2651
2652         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2653                 if (adv_instance->instance == instance)
2654                         return adv_instance;
2655         }
2656
2657         return NULL;
2658 }
2659
2660 /* This function requires the caller holds hdev->lock */
2661 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2662 {
2663         struct adv_info *cur_instance;
2664
2665         cur_instance = hci_find_adv_instance(hdev, instance);
2666         if (!cur_instance)
2667                 return NULL;
2668
2669         if (cur_instance == list_last_entry(&hdev->adv_instances,
2670                                             struct adv_info, list))
2671                 return list_first_entry(&hdev->adv_instances,
2672                                                  struct adv_info, list);
2673         else
2674                 return list_next_entry(cur_instance, list);
2675 }
2676
2677 /* This function requires the caller holds hdev->lock */
2678 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2679 {
2680         struct adv_info *adv_instance;
2681
2682         adv_instance = hci_find_adv_instance(hdev, instance);
2683         if (!adv_instance)
2684                 return -ENOENT;
2685
2686         BT_DBG("%s removing %dMR", hdev->name, instance);
2687
2688         if (hdev->cur_adv_instance == instance) {
2689                 if (hdev->adv_instance_timeout) {
2690                         cancel_delayed_work(&hdev->adv_instance_expire);
2691                         hdev->adv_instance_timeout = 0;
2692                 }
2693                 hdev->cur_adv_instance = 0x00;
2694         }
2695
2696         list_del(&adv_instance->list);
2697         kfree(adv_instance);
2698
2699         hdev->adv_instance_cnt--;
2700
2701         return 0;
2702 }
2703
2704 /* This function requires the caller holds hdev->lock */
2705 void hci_adv_instances_clear(struct hci_dev *hdev)
2706 {
2707         struct adv_info *adv_instance, *n;
2708
2709         if (hdev->adv_instance_timeout) {
2710                 cancel_delayed_work(&hdev->adv_instance_expire);
2711                 hdev->adv_instance_timeout = 0;
2712         }
2713
2714         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2715                 list_del(&adv_instance->list);
2716                 kfree(adv_instance);
2717         }
2718
2719         hdev->adv_instance_cnt = 0;
2720         hdev->cur_adv_instance = 0x00;
2721 }
2722
2723 /* This function requires the caller holds hdev->lock */
2724 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2725                          u16 adv_data_len, u8 *adv_data,
2726                          u16 scan_rsp_len, u8 *scan_rsp_data,
2727                          u16 timeout, u16 duration)
2728 {
2729         struct adv_info *adv_instance;
2730
2731         adv_instance = hci_find_adv_instance(hdev, instance);
2732         if (adv_instance) {
2733                 memset(adv_instance->adv_data, 0,
2734                        sizeof(adv_instance->adv_data));
2735                 memset(adv_instance->scan_rsp_data, 0,
2736                        sizeof(adv_instance->scan_rsp_data));
2737         } else {
2738                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2739                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2740                         return -EOVERFLOW;
2741
2742                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2743                 if (!adv_instance)
2744                         return -ENOMEM;
2745
2746                 adv_instance->pending = true;
2747                 adv_instance->instance = instance;
2748                 list_add(&adv_instance->list, &hdev->adv_instances);
2749                 hdev->adv_instance_cnt++;
2750         }
2751
2752         adv_instance->flags = flags;
2753         adv_instance->adv_data_len = adv_data_len;
2754         adv_instance->scan_rsp_len = scan_rsp_len;
2755
2756         if (adv_data_len)
2757                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2758
2759         if (scan_rsp_len)
2760                 memcpy(adv_instance->scan_rsp_data,
2761                        scan_rsp_data, scan_rsp_len);
2762
2763         adv_instance->timeout = timeout;
2764         adv_instance->remaining_time = timeout;
2765
2766         if (duration == 0)
2767                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2768         else
2769                 adv_instance->duration = duration;
2770
2771         BT_DBG("%s for %dMR", hdev->name, instance);
2772
2773         return 0;
2774 }
2775
2776 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2777                                          bdaddr_t *bdaddr, u8 type)
2778 {
2779         struct bdaddr_list *b;
2780
2781         list_for_each_entry(b, bdaddr_list, list) {
2782                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2783                         return b;
2784         }
2785
2786         return NULL;
2787 }
2788
2789 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2790 {
2791         struct bdaddr_list *b, *n;
2792
2793         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2794                 list_del(&b->list);
2795                 kfree(b);
2796         }
2797 }
2798
2799 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2800 {
2801         struct bdaddr_list *entry;
2802
2803         if (!bacmp(bdaddr, BDADDR_ANY))
2804                 return -EBADF;
2805
2806         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2807                 return -EEXIST;
2808
2809         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2810         if (!entry)
2811                 return -ENOMEM;
2812
2813         bacpy(&entry->bdaddr, bdaddr);
2814         entry->bdaddr_type = type;
2815
2816         list_add(&entry->list, list);
2817
2818         return 0;
2819 }
2820
2821 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2822 {
2823         struct bdaddr_list *entry;
2824
2825         if (!bacmp(bdaddr, BDADDR_ANY)) {
2826                 hci_bdaddr_list_clear(list);
2827                 return 0;
2828         }
2829
2830         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2831         if (!entry)
2832                 return -ENOENT;
2833
2834         list_del(&entry->list);
2835         kfree(entry);
2836
2837         return 0;
2838 }
2839
2840 /* This function requires the caller holds hdev->lock */
2841 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2842                                                bdaddr_t *addr, u8 addr_type)
2843 {
2844         struct hci_conn_params *params;
2845
2846         list_for_each_entry(params, &hdev->le_conn_params, list) {
2847                 if (bacmp(&params->addr, addr) == 0 &&
2848                     params->addr_type == addr_type) {
2849                         return params;
2850                 }
2851         }
2852
2853         return NULL;
2854 }
2855
2856 /* This function requires the caller holds hdev->lock */
2857 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2858                                                   bdaddr_t *addr, u8 addr_type)
2859 {
2860         struct hci_conn_params *param;
2861
2862         list_for_each_entry(param, list, action) {
2863                 if (bacmp(&param->addr, addr) == 0 &&
2864                     param->addr_type == addr_type)
2865                         return param;
2866         }
2867
2868         return NULL;
2869 }
2870
2871 /* This function requires the caller holds hdev->lock */
2872 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2873                                             bdaddr_t *addr, u8 addr_type)
2874 {
2875         struct hci_conn_params *params;
2876
2877         params = hci_conn_params_lookup(hdev, addr, addr_type);
2878         if (params)
2879                 return params;
2880
2881         params = kzalloc(sizeof(*params), GFP_KERNEL);
2882         if (!params) {
2883                 BT_ERR("Out of memory");
2884                 return NULL;
2885         }
2886
2887         bacpy(&params->addr, addr);
2888         params->addr_type = addr_type;
2889
2890         list_add(&params->list, &hdev->le_conn_params);
2891         INIT_LIST_HEAD(&params->action);
2892
2893         params->conn_min_interval = hdev->le_conn_min_interval;
2894         params->conn_max_interval = hdev->le_conn_max_interval;
2895         params->conn_latency = hdev->le_conn_latency;
2896         params->supervision_timeout = hdev->le_supv_timeout;
2897         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2898
2899         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2900
2901         return params;
2902 }
2903
2904 static void hci_conn_params_free(struct hci_conn_params *params)
2905 {
2906         if (params->conn) {
2907                 hci_conn_drop(params->conn);
2908                 hci_conn_put(params->conn);
2909         }
2910
2911         list_del(&params->action);
2912         list_del(&params->list);
2913         kfree(params);
2914 }
2915
2916 /* This function requires the caller holds hdev->lock */
2917 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2918 {
2919         struct hci_conn_params *params;
2920
2921         params = hci_conn_params_lookup(hdev, addr, addr_type);
2922         if (!params)
2923                 return;
2924
2925         hci_conn_params_free(params);
2926
2927         hci_update_background_scan(hdev);
2928
2929         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2930 }
2931
2932 /* This function requires the caller holds hdev->lock */
2933 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2934 {
2935         struct hci_conn_params *params, *tmp;
2936
2937         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2938                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2939                         continue;
2940
2941                 /* If trying to estabilish one time connection to disabled
2942                  * device, leave the params, but mark them as just once.
2943                  */
2944                 if (params->explicit_connect) {
2945                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2946                         continue;
2947                 }
2948
2949                 list_del(&params->list);
2950                 kfree(params);
2951         }
2952
2953         BT_DBG("All LE disabled connection parameters were removed");
2954 }
2955
2956 /* This function requires the caller holds hdev->lock */
2957 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2958 {
2959         struct hci_conn_params *params, *tmp;
2960
2961         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2962                 hci_conn_params_free(params);
2963
2964         BT_DBG("All LE connection parameters were removed");
2965 }
2966
2967 /* Copy the Identity Address of the controller.
2968  *
2969  * If the controller has a public BD_ADDR, then by default use that one.
2970  * If this is a LE only controller without a public address, default to
2971  * the static random address.
2972  *
2973  * For debugging purposes it is possible to force controllers with a
2974  * public address to use the static random address instead.
2975  *
2976  * In case BR/EDR has been disabled on a dual-mode controller and
2977  * userspace has configured a static address, then that address
2978  * becomes the identity address instead of the public BR/EDR address.
2979  */
2980 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2981                                u8 *bdaddr_type)
2982 {
2983         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2984             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2985             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2986              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2987                 bacpy(bdaddr, &hdev->static_addr);
2988                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2989         } else {
2990                 bacpy(bdaddr, &hdev->bdaddr);
2991                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2992         }
2993 }
2994
2995 /* Alloc HCI device */
2996 struct hci_dev *hci_alloc_dev(void)
2997 {
2998         struct hci_dev *hdev;
2999
3000         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3001         if (!hdev)
3002                 return NULL;
3003
3004         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3005         hdev->esco_type = (ESCO_HV1);
3006         hdev->link_mode = (HCI_LM_ACCEPT);
3007         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3008         hdev->io_capability = 0x03;     /* No Input No Output */
3009         hdev->manufacturer = 0xffff;    /* Default to internal use */
3010         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3011         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3012         hdev->adv_instance_cnt = 0;
3013         hdev->cur_adv_instance = 0x00;
3014         hdev->adv_instance_timeout = 0;
3015
3016         hdev->sniff_max_interval = 800;
3017         hdev->sniff_min_interval = 80;
3018
3019         hdev->le_adv_channel_map = 0x07;
3020         hdev->le_adv_min_interval = 0x0800;
3021         hdev->le_adv_max_interval = 0x0800;
3022         hdev->le_scan_interval = 0x0060;
3023         hdev->le_scan_window = 0x0030;
3024         hdev->le_conn_min_interval = 0x0018;
3025         hdev->le_conn_max_interval = 0x0028;
3026         hdev->le_conn_latency = 0x0000;
3027         hdev->le_supv_timeout = 0x002a;
3028         hdev->le_def_tx_len = 0x001b;
3029         hdev->le_def_tx_time = 0x0148;
3030         hdev->le_max_tx_len = 0x001b;
3031         hdev->le_max_tx_time = 0x0148;
3032         hdev->le_max_rx_len = 0x001b;
3033         hdev->le_max_rx_time = 0x0148;
3034
3035         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3036         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3037         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3038         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3039
3040         mutex_init(&hdev->lock);
3041         mutex_init(&hdev->req_lock);
3042
3043         INIT_LIST_HEAD(&hdev->mgmt_pending);
3044         INIT_LIST_HEAD(&hdev->blacklist);
3045         INIT_LIST_HEAD(&hdev->whitelist);
3046         INIT_LIST_HEAD(&hdev->uuids);
3047         INIT_LIST_HEAD(&hdev->link_keys);
3048         INIT_LIST_HEAD(&hdev->long_term_keys);
3049         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3050         INIT_LIST_HEAD(&hdev->remote_oob_data);
3051         INIT_LIST_HEAD(&hdev->le_white_list);
3052         INIT_LIST_HEAD(&hdev->le_conn_params);
3053         INIT_LIST_HEAD(&hdev->pend_le_conns);
3054         INIT_LIST_HEAD(&hdev->pend_le_reports);
3055         INIT_LIST_HEAD(&hdev->conn_hash.list);
3056         INIT_LIST_HEAD(&hdev->adv_instances);
3057
3058         INIT_WORK(&hdev->rx_work, hci_rx_work);
3059         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3060         INIT_WORK(&hdev->tx_work, hci_tx_work);
3061         INIT_WORK(&hdev->power_on, hci_power_on);
3062         INIT_WORK(&hdev->error_reset, hci_error_reset);
3063
3064         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3065
3066         skb_queue_head_init(&hdev->rx_q);
3067         skb_queue_head_init(&hdev->cmd_q);
3068         skb_queue_head_init(&hdev->raw_q);
3069
3070         init_waitqueue_head(&hdev->req_wait_q);
3071
3072         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3073
3074         hci_request_setup(hdev);
3075
3076         hci_init_sysfs(hdev);
3077         discovery_init(hdev);
3078
3079         return hdev;
3080 }
3081 EXPORT_SYMBOL(hci_alloc_dev);
3082
3083 /* Free HCI device */
3084 void hci_free_dev(struct hci_dev *hdev)
3085 {
3086         /* will free via device release */
3087         put_device(&hdev->dev);
3088 }
3089 EXPORT_SYMBOL(hci_free_dev);
3090
3091 /* Register HCI device */
3092 int hci_register_dev(struct hci_dev *hdev)
3093 {
3094         int id, error;
3095
3096         if (!hdev->open || !hdev->close || !hdev->send)
3097                 return -EINVAL;
3098
3099         /* Do not allow HCI_AMP devices to register at index 0,
3100          * so the index can be used as the AMP controller ID.
3101          */
3102         switch (hdev->dev_type) {
3103         case HCI_PRIMARY:
3104                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3105                 break;
3106         case HCI_AMP:
3107                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3108                 break;
3109         default:
3110                 return -EINVAL;
3111         }
3112
3113         if (id < 0)
3114                 return id;
3115
3116         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3117         hdev->id = id;
3118
3119         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3120
3121         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3122         if (!hdev->workqueue) {
3123                 error = -ENOMEM;
3124                 goto err;
3125         }
3126
3127         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3128                                                       hdev->name);
3129         if (!hdev->req_workqueue) {
3130                 destroy_workqueue(hdev->workqueue);
3131                 error = -ENOMEM;
3132                 goto err;
3133         }
3134
3135         if (!IS_ERR_OR_NULL(bt_debugfs))
3136                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3137
3138         dev_set_name(&hdev->dev, "%s", hdev->name);
3139
3140         error = device_add(&hdev->dev);
3141         if (error < 0)
3142                 goto err_wqueue;
3143
3144         hci_leds_init(hdev);
3145
3146         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3147                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3148                                     hdev);
3149         if (hdev->rfkill) {
3150                 if (rfkill_register(hdev->rfkill) < 0) {
3151                         rfkill_destroy(hdev->rfkill);
3152                         hdev->rfkill = NULL;
3153                 }
3154         }
3155
3156         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3157                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3158
3159         hci_dev_set_flag(hdev, HCI_SETUP);
3160         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3161
3162         if (hdev->dev_type == HCI_PRIMARY) {
3163                 /* Assume BR/EDR support until proven otherwise (such as
3164                  * through reading supported features during init.
3165                  */
3166                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3167         }
3168
3169         write_lock(&hci_dev_list_lock);
3170         list_add(&hdev->list, &hci_dev_list);
3171         write_unlock(&hci_dev_list_lock);
3172
3173         /* Devices that are marked for raw-only usage are unconfigured
3174          * and should not be included in normal operation.
3175          */
3176         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3177                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3178
3179         hci_sock_dev_event(hdev, HCI_DEV_REG);
3180         hci_dev_hold(hdev);
3181
3182         queue_work(hdev->req_workqueue, &hdev->power_on);
3183
3184         return id;
3185
3186 err_wqueue:
3187         debugfs_remove_recursive(hdev->debugfs);
3188         destroy_workqueue(hdev->workqueue);
3189         destroy_workqueue(hdev->req_workqueue);
3190 err:
3191         ida_simple_remove(&hci_index_ida, hdev->id);
3192
3193         return error;
3194 }
3195 EXPORT_SYMBOL(hci_register_dev);
3196
3197 /* Unregister HCI device */
3198 void hci_unregister_dev(struct hci_dev *hdev)
3199 {
3200         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3201
3202         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3203
3204         write_lock(&hci_dev_list_lock);
3205         list_del(&hdev->list);
3206         write_unlock(&hci_dev_list_lock);
3207
3208         cancel_work_sync(&hdev->power_on);
3209
3210         hci_dev_do_close(hdev);
3211
3212         if (!test_bit(HCI_INIT, &hdev->flags) &&
3213             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3214             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3215                 hci_dev_lock(hdev);
3216                 mgmt_index_removed(hdev);
3217                 hci_dev_unlock(hdev);
3218         }
3219
3220         /* mgmt_index_removed should take care of emptying the
3221          * pending list */
3222         BUG_ON(!list_empty(&hdev->mgmt_pending));
3223
3224         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3225
3226         if (hdev->rfkill) {
3227                 rfkill_unregister(hdev->rfkill);
3228                 rfkill_destroy(hdev->rfkill);
3229         }
3230
3231         device_del(&hdev->dev);
3232         /* Actual cleanup is deferred until hci_cleanup_dev(). */
3233         hci_dev_put(hdev);
3234 }
3235 EXPORT_SYMBOL(hci_unregister_dev);
3236
3237 /* Cleanup HCI device */
3238 void hci_cleanup_dev(struct hci_dev *hdev)
3239 {
3240         debugfs_remove_recursive(hdev->debugfs);
3241         kfree_const(hdev->hw_info);
3242         kfree_const(hdev->fw_info);
3243
3244         destroy_workqueue(hdev->workqueue);
3245         destroy_workqueue(hdev->req_workqueue);
3246
3247         hci_dev_lock(hdev);
3248         hci_bdaddr_list_clear(&hdev->blacklist);
3249         hci_bdaddr_list_clear(&hdev->whitelist);
3250         hci_uuids_clear(hdev);
3251         hci_link_keys_clear(hdev);
3252         hci_smp_ltks_clear(hdev);
3253         hci_smp_irks_clear(hdev);
3254         hci_remote_oob_data_clear(hdev);
3255         hci_adv_instances_clear(hdev);
3256         hci_bdaddr_list_clear(&hdev->le_white_list);
3257         hci_conn_params_clear_all(hdev);
3258         hci_discovery_filter_clear(hdev);
3259         hci_dev_unlock(hdev);
3260
3261         ida_simple_remove(&hci_index_ida, hdev->id);
3262 }
3263
3264 /* Suspend HCI device */
3265 int hci_suspend_dev(struct hci_dev *hdev)
3266 {
3267         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3268         return 0;
3269 }
3270 EXPORT_SYMBOL(hci_suspend_dev);
3271
3272 /* Resume HCI device */
3273 int hci_resume_dev(struct hci_dev *hdev)
3274 {
3275         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3276         return 0;
3277 }
3278 EXPORT_SYMBOL(hci_resume_dev);
3279
3280 /* Reset HCI device */
3281 int hci_reset_dev(struct hci_dev *hdev)
3282 {
3283         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3284         struct sk_buff *skb;
3285
3286         skb = bt_skb_alloc(3, GFP_ATOMIC);
3287         if (!skb)
3288                 return -ENOMEM;
3289
3290         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3291         skb_put_data(skb, hw_err, 3);
3292
3293         /* Send Hardware Error to upper stack */
3294         return hci_recv_frame(hdev, skb);
3295 }
3296 EXPORT_SYMBOL(hci_reset_dev);
3297
3298 /* Receive frame from HCI drivers */
3299 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3300 {
3301         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3302                       && !test_bit(HCI_INIT, &hdev->flags))) {
3303                 kfree_skb(skb);
3304                 return -ENXIO;
3305         }
3306
3307         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3308             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3309             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3310                 kfree_skb(skb);
3311                 return -EINVAL;
3312         }
3313
3314         /* Incoming skb */
3315         bt_cb(skb)->incoming = 1;
3316
3317         /* Time stamp */
3318         __net_timestamp(skb);
3319
3320         skb_queue_tail(&hdev->rx_q, skb);
3321         queue_work(hdev->workqueue, &hdev->rx_work);
3322
3323         return 0;
3324 }
3325 EXPORT_SYMBOL(hci_recv_frame);
3326
3327 /* Receive diagnostic message from HCI drivers */
3328 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3329 {
3330         /* Mark as diagnostic packet */
3331         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3332
3333         /* Time stamp */
3334         __net_timestamp(skb);
3335
3336         skb_queue_tail(&hdev->rx_q, skb);
3337         queue_work(hdev->workqueue, &hdev->rx_work);
3338
3339         return 0;
3340 }
3341 EXPORT_SYMBOL(hci_recv_diag);
3342
3343 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3344 {
3345         va_list vargs;
3346
3347         va_start(vargs, fmt);
3348         kfree_const(hdev->hw_info);
3349         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3350         va_end(vargs);
3351 }
3352 EXPORT_SYMBOL(hci_set_hw_info);
3353
3354 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3355 {
3356         va_list vargs;
3357
3358         va_start(vargs, fmt);
3359         kfree_const(hdev->fw_info);
3360         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3361         va_end(vargs);
3362 }
3363 EXPORT_SYMBOL(hci_set_fw_info);
3364
3365 /* ---- Interface to upper protocols ---- */
3366
3367 int hci_register_cb(struct hci_cb *cb)
3368 {
3369         BT_DBG("%p name %s", cb, cb->name);
3370
3371         mutex_lock(&hci_cb_list_lock);
3372         list_add_tail(&cb->list, &hci_cb_list);
3373         mutex_unlock(&hci_cb_list_lock);
3374
3375         return 0;
3376 }
3377 EXPORT_SYMBOL(hci_register_cb);
3378
3379 int hci_unregister_cb(struct hci_cb *cb)
3380 {
3381         BT_DBG("%p name %s", cb, cb->name);
3382
3383         mutex_lock(&hci_cb_list_lock);
3384         list_del(&cb->list);
3385         mutex_unlock(&hci_cb_list_lock);
3386
3387         return 0;
3388 }
3389 EXPORT_SYMBOL(hci_unregister_cb);
3390
3391 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3392 {
3393         int err;
3394
3395         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3396                skb->len);
3397
3398         /* Time stamp */
3399         __net_timestamp(skb);
3400
3401         /* Send copy to monitor */
3402         hci_send_to_monitor(hdev, skb);
3403
3404         if (atomic_read(&hdev->promisc)) {
3405                 /* Send copy to the sockets */
3406                 hci_send_to_sock(hdev, skb);
3407         }
3408
3409         /* Get rid of skb owner, prior to sending to the driver. */
3410         skb_orphan(skb);
3411
3412         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3413                 kfree_skb(skb);
3414                 return;
3415         }
3416
3417         err = hdev->send(hdev, skb);
3418         if (err < 0) {
3419                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3420                 kfree_skb(skb);
3421         }
3422 }
3423
3424 /* Send HCI command */
3425 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3426                  const void *param)
3427 {
3428         struct sk_buff *skb;
3429
3430         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3431
3432         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3433         if (!skb) {
3434                 BT_ERR("%s no memory for command", hdev->name);
3435                 return -ENOMEM;
3436         }
3437
3438         /* Stand-alone HCI commands must be flagged as
3439          * single-command requests.
3440          */
3441         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3442
3443         skb_queue_tail(&hdev->cmd_q, skb);
3444         queue_work(hdev->workqueue, &hdev->cmd_work);
3445
3446         return 0;
3447 }
3448
3449 /* Get data from the previously sent command */
3450 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3451 {
3452         struct hci_command_hdr *hdr;
3453
3454         if (!hdev->sent_cmd)
3455                 return NULL;
3456
3457         hdr = (void *) hdev->sent_cmd->data;
3458
3459         if (hdr->opcode != cpu_to_le16(opcode))
3460                 return NULL;
3461
3462         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3463
3464         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3465 }
3466
3467 /* Send HCI command and wait for command commplete event */
3468 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3469                              const void *param, u32 timeout)
3470 {
3471         struct sk_buff *skb;
3472
3473         if (!test_bit(HCI_UP, &hdev->flags))
3474                 return ERR_PTR(-ENETDOWN);
3475
3476         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3477
3478         hci_req_sync_lock(hdev);
3479         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3480         hci_req_sync_unlock(hdev);
3481
3482         return skb;
3483 }
3484 EXPORT_SYMBOL(hci_cmd_sync);
3485
3486 /* Send ACL data */
3487 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3488 {
3489         struct hci_acl_hdr *hdr;
3490         int len = skb->len;
3491
3492         skb_push(skb, HCI_ACL_HDR_SIZE);
3493         skb_reset_transport_header(skb);
3494         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3495         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3496         hdr->dlen   = cpu_to_le16(len);
3497 }
3498
3499 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3500                           struct sk_buff *skb, __u16 flags)
3501 {
3502         struct hci_conn *conn = chan->conn;
3503         struct hci_dev *hdev = conn->hdev;
3504         struct sk_buff *list;
3505
3506         skb->len = skb_headlen(skb);
3507         skb->data_len = 0;
3508
3509         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3510
3511         switch (hdev->dev_type) {
3512         case HCI_PRIMARY:
3513                 hci_add_acl_hdr(skb, conn->handle, flags);
3514                 break;
3515         case HCI_AMP:
3516                 hci_add_acl_hdr(skb, chan->handle, flags);
3517                 break;
3518         default:
3519                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3520                 return;
3521         }
3522
3523         list = skb_shinfo(skb)->frag_list;
3524         if (!list) {
3525                 /* Non fragmented */
3526                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3527
3528                 skb_queue_tail(queue, skb);
3529         } else {
3530                 /* Fragmented */
3531                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3532
3533                 skb_shinfo(skb)->frag_list = NULL;
3534
3535                 /* Queue all fragments atomically. We need to use spin_lock_bh
3536                  * here because of 6LoWPAN links, as there this function is
3537                  * called from softirq and using normal spin lock could cause
3538                  * deadlocks.
3539                  */
3540                 spin_lock_bh(&queue->lock);
3541
3542                 __skb_queue_tail(queue, skb);
3543
3544                 flags &= ~ACL_START;
3545                 flags |= ACL_CONT;
3546                 do {
3547                         skb = list; list = list->next;
3548
3549                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3550                         hci_add_acl_hdr(skb, conn->handle, flags);
3551
3552                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3553
3554                         __skb_queue_tail(queue, skb);
3555                 } while (list);
3556
3557                 spin_unlock_bh(&queue->lock);
3558         }
3559 }
3560
3561 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3562 {
3563         struct hci_dev *hdev = chan->conn->hdev;
3564
3565         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3566
3567         hci_queue_acl(chan, &chan->data_q, skb, flags);
3568
3569         queue_work(hdev->workqueue, &hdev->tx_work);
3570 }
3571
3572 /* Send SCO data */
3573 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3574 {
3575         struct hci_dev *hdev = conn->hdev;
3576         struct hci_sco_hdr hdr;
3577
3578         BT_DBG("%s len %d", hdev->name, skb->len);
3579
3580         hdr.handle = cpu_to_le16(conn->handle);
3581         hdr.dlen   = skb->len;
3582
3583         skb_push(skb, HCI_SCO_HDR_SIZE);
3584         skb_reset_transport_header(skb);
3585         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3586
3587         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3588
3589         skb_queue_tail(&conn->data_q, skb);
3590         queue_work(hdev->workqueue, &hdev->tx_work);
3591 }
3592
3593 /* ---- HCI TX task (outgoing data) ---- */
3594
3595 /* HCI Connection scheduler */
3596 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3597                                      int *quote)
3598 {
3599         struct hci_conn_hash *h = &hdev->conn_hash;
3600         struct hci_conn *conn = NULL, *c;
3601         unsigned int num = 0, min = ~0;
3602
3603         /* We don't have to lock device here. Connections are always
3604          * added and removed with TX task disabled. */
3605
3606         rcu_read_lock();
3607
3608         list_for_each_entry_rcu(c, &h->list, list) {
3609                 if (c->type != type || skb_queue_empty(&c->data_q))
3610                         continue;
3611
3612                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3613                         continue;
3614
3615                 num++;
3616
3617                 if (c->sent < min) {
3618                         min  = c->sent;
3619                         conn = c;
3620                 }
3621
3622                 if (hci_conn_num(hdev, type) == num)
3623                         break;
3624         }
3625
3626         rcu_read_unlock();
3627
3628         if (conn) {
3629                 int cnt, q;
3630
3631                 switch (conn->type) {
3632                 case ACL_LINK:
3633                         cnt = hdev->acl_cnt;
3634                         break;
3635                 case SCO_LINK:
3636                 case ESCO_LINK:
3637                         cnt = hdev->sco_cnt;
3638                         break;
3639                 case LE_LINK:
3640                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3641                         break;
3642                 default:
3643                         cnt = 0;
3644                         BT_ERR("Unknown link type");
3645                 }
3646
3647                 q = cnt / num;
3648                 *quote = q ? q : 1;
3649         } else
3650                 *quote = 0;
3651
3652         BT_DBG("conn %p quote %d", conn, *quote);
3653         return conn;
3654 }
3655
3656 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3657 {
3658         struct hci_conn_hash *h = &hdev->conn_hash;
3659         struct hci_conn *c;
3660
3661         BT_ERR("%s link tx timeout", hdev->name);
3662
3663         rcu_read_lock();
3664
3665         /* Kill stalled connections */
3666         list_for_each_entry_rcu(c, &h->list, list) {
3667                 if (c->type == type && c->sent) {
3668                         BT_ERR("%s killing stalled connection %pMR",
3669                                hdev->name, &c->dst);
3670                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3671                 }
3672         }
3673
3674         rcu_read_unlock();
3675 }
3676
3677 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3678                                       int *quote)
3679 {
3680         struct hci_conn_hash *h = &hdev->conn_hash;
3681         struct hci_chan *chan = NULL;
3682         unsigned int num = 0, min = ~0, cur_prio = 0;
3683         struct hci_conn *conn;
3684         int cnt, q, conn_num = 0;
3685
3686         BT_DBG("%s", hdev->name);
3687
3688         rcu_read_lock();
3689
3690         list_for_each_entry_rcu(conn, &h->list, list) {
3691                 struct hci_chan *tmp;
3692
3693                 if (conn->type != type)
3694                         continue;
3695
3696                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3697                         continue;
3698
3699                 conn_num++;
3700
3701                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3702                         struct sk_buff *skb;
3703
3704                         if (skb_queue_empty(&tmp->data_q))
3705                                 continue;
3706
3707                         skb = skb_peek(&tmp->data_q);
3708                         if (skb->priority < cur_prio)
3709                                 continue;
3710
3711                         if (skb->priority > cur_prio) {
3712                                 num = 0;
3713                                 min = ~0;
3714                                 cur_prio = skb->priority;
3715                         }
3716
3717                         num++;
3718
3719                         if (conn->sent < min) {
3720                                 min  = conn->sent;
3721                                 chan = tmp;
3722                         }
3723                 }
3724
3725                 if (hci_conn_num(hdev, type) == conn_num)
3726                         break;
3727         }
3728
3729         rcu_read_unlock();
3730
3731         if (!chan)
3732                 return NULL;
3733
3734         switch (chan->conn->type) {
3735         case ACL_LINK:
3736                 cnt = hdev->acl_cnt;
3737                 break;
3738         case AMP_LINK:
3739                 cnt = hdev->block_cnt;
3740                 break;
3741         case SCO_LINK:
3742         case ESCO_LINK:
3743                 cnt = hdev->sco_cnt;
3744                 break;
3745         case LE_LINK:
3746                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3747                 break;
3748         default:
3749                 cnt = 0;
3750                 BT_ERR("Unknown link type");
3751         }
3752
3753         q = cnt / num;
3754         *quote = q ? q : 1;
3755         BT_DBG("chan %p quote %d", chan, *quote);
3756         return chan;
3757 }
3758
3759 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3760 {
3761         struct hci_conn_hash *h = &hdev->conn_hash;
3762         struct hci_conn *conn;
3763         int num = 0;
3764
3765         BT_DBG("%s", hdev->name);
3766
3767         rcu_read_lock();
3768
3769         list_for_each_entry_rcu(conn, &h->list, list) {
3770                 struct hci_chan *chan;
3771
3772                 if (conn->type != type)
3773                         continue;
3774
3775                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3776                         continue;
3777
3778                 num++;
3779
3780                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3781                         struct sk_buff *skb;
3782
3783                         if (chan->sent) {
3784                                 chan->sent = 0;
3785                                 continue;
3786                         }
3787
3788                         if (skb_queue_empty(&chan->data_q))
3789                                 continue;
3790
3791                         skb = skb_peek(&chan->data_q);
3792                         if (skb->priority >= HCI_PRIO_MAX - 1)
3793                                 continue;
3794
3795                         skb->priority = HCI_PRIO_MAX - 1;
3796
3797                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3798                                skb->priority);
3799                 }
3800
3801                 if (hci_conn_num(hdev, type) == num)
3802                         break;
3803         }
3804
3805         rcu_read_unlock();
3806
3807 }
3808
3809 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3810 {
3811         /* Calculate count of blocks used by this packet */
3812         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3813 }
3814
3815 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3816 {
3817         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3818                 /* ACL tx timeout must be longer than maximum
3819                  * link supervision timeout (40.9 seconds) */
3820                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3821                                        HCI_ACL_TX_TIMEOUT))
3822                         hci_link_tx_to(hdev, ACL_LINK);
3823         }
3824 }
3825
3826 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3827 {
3828         unsigned int cnt = hdev->acl_cnt;
3829         struct hci_chan *chan;
3830         struct sk_buff *skb;
3831         int quote;
3832
3833         __check_timeout(hdev, cnt);
3834
3835         while (hdev->acl_cnt &&
3836                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3837                 u32 priority = (skb_peek(&chan->data_q))->priority;
3838                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3839                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3840                                skb->len, skb->priority);
3841
3842                         /* Stop if priority has changed */
3843                         if (skb->priority < priority)
3844                                 break;
3845
3846                         skb = skb_dequeue(&chan->data_q);
3847
3848                         hci_conn_enter_active_mode(chan->conn,
3849                                                    bt_cb(skb)->force_active);
3850
3851                         hci_send_frame(hdev, skb);
3852                         hdev->acl_last_tx = jiffies;
3853
3854                         hdev->acl_cnt--;
3855                         chan->sent++;
3856                         chan->conn->sent++;
3857                 }
3858         }
3859
3860         if (cnt != hdev->acl_cnt)
3861                 hci_prio_recalculate(hdev, ACL_LINK);
3862 }
3863
3864 static void hci_sched_acl_blk(struct hci_dev *hdev)
3865 {
3866         unsigned int cnt = hdev->block_cnt;
3867         struct hci_chan *chan;
3868         struct sk_buff *skb;
3869         int quote;
3870         u8 type;
3871
3872         __check_timeout(hdev, cnt);
3873
3874         BT_DBG("%s", hdev->name);
3875
3876         if (hdev->dev_type == HCI_AMP)
3877                 type = AMP_LINK;
3878         else
3879                 type = ACL_LINK;
3880
3881         while (hdev->block_cnt > 0 &&
3882                (chan = hci_chan_sent(hdev, type, &quote))) {
3883                 u32 priority = (skb_peek(&chan->data_q))->priority;
3884                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3885                         int blocks;
3886
3887                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3888                                skb->len, skb->priority);
3889
3890                         /* Stop if priority has changed */
3891                         if (skb->priority < priority)
3892                                 break;
3893
3894                         skb = skb_dequeue(&chan->data_q);
3895
3896                         blocks = __get_blocks(hdev, skb);
3897                         if (blocks > hdev->block_cnt)
3898                                 return;
3899
3900                         hci_conn_enter_active_mode(chan->conn,
3901                                                    bt_cb(skb)->force_active);
3902
3903                         hci_send_frame(hdev, skb);
3904                         hdev->acl_last_tx = jiffies;
3905
3906                         hdev->block_cnt -= blocks;
3907                         quote -= blocks;
3908
3909                         chan->sent += blocks;
3910                         chan->conn->sent += blocks;
3911                 }
3912         }
3913
3914         if (cnt != hdev->block_cnt)
3915                 hci_prio_recalculate(hdev, type);
3916 }
3917
3918 static void hci_sched_acl(struct hci_dev *hdev)
3919 {
3920         BT_DBG("%s", hdev->name);
3921
3922         /* No ACL link over BR/EDR controller */
3923         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3924                 return;
3925
3926         /* No AMP link over AMP controller */
3927         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3928                 return;
3929
3930         switch (hdev->flow_ctl_mode) {
3931         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3932                 hci_sched_acl_pkt(hdev);
3933                 break;
3934
3935         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3936                 hci_sched_acl_blk(hdev);
3937                 break;
3938         }
3939 }
3940
3941 /* Schedule SCO */
3942 static void hci_sched_sco(struct hci_dev *hdev)
3943 {
3944         struct hci_conn *conn;
3945         struct sk_buff *skb;
3946         int quote;
3947
3948         BT_DBG("%s", hdev->name);
3949
3950         if (!hci_conn_num(hdev, SCO_LINK))
3951                 return;
3952
3953         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3954                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3955                         BT_DBG("skb %p len %d", skb, skb->len);
3956                         hci_send_frame(hdev, skb);
3957
3958                         conn->sent++;
3959                         if (conn->sent == ~0)
3960                                 conn->sent = 0;
3961                 }
3962         }
3963 }
3964
3965 static void hci_sched_esco(struct hci_dev *hdev)
3966 {
3967         struct hci_conn *conn;
3968         struct sk_buff *skb;
3969         int quote;
3970
3971         BT_DBG("%s", hdev->name);
3972
3973         if (!hci_conn_num(hdev, ESCO_LINK))
3974                 return;
3975
3976         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3977                                                      &quote))) {
3978                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3979                         BT_DBG("skb %p len %d", skb, skb->len);
3980                         hci_send_frame(hdev, skb);
3981
3982                         conn->sent++;
3983                         if (conn->sent == ~0)
3984                                 conn->sent = 0;
3985                 }
3986         }
3987 }
3988
3989 static void hci_sched_le(struct hci_dev *hdev)
3990 {
3991         struct hci_chan *chan;
3992         struct sk_buff *skb;
3993         int quote, cnt, tmp;
3994
3995         BT_DBG("%s", hdev->name);
3996
3997         if (!hci_conn_num(hdev, LE_LINK))
3998                 return;
3999
4000         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4001                 /* LE tx timeout must be longer than maximum
4002                  * link supervision timeout (40.9 seconds) */
4003                 if (!hdev->le_cnt && hdev->le_pkts &&
4004                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4005                         hci_link_tx_to(hdev, LE_LINK);
4006         }
4007
4008         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4009         tmp = cnt;
4010         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4011                 u32 priority = (skb_peek(&chan->data_q))->priority;
4012                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4013                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4014                                skb->len, skb->priority);
4015
4016                         /* Stop if priority has changed */
4017                         if (skb->priority < priority)
4018                                 break;
4019
4020                         skb = skb_dequeue(&chan->data_q);
4021
4022                         hci_send_frame(hdev, skb);
4023                         hdev->le_last_tx = jiffies;
4024
4025                         cnt--;
4026                         chan->sent++;
4027                         chan->conn->sent++;
4028                 }
4029         }
4030
4031         if (hdev->le_pkts)
4032                 hdev->le_cnt = cnt;
4033         else
4034                 hdev->acl_cnt = cnt;
4035
4036         if (cnt != tmp)
4037                 hci_prio_recalculate(hdev, LE_LINK);
4038 }
4039
4040 static void hci_tx_work(struct work_struct *work)
4041 {
4042         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4043         struct sk_buff *skb;
4044
4045         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4046                hdev->sco_cnt, hdev->le_cnt);
4047
4048         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4049                 /* Schedule queues and send stuff to HCI driver */
4050                 hci_sched_acl(hdev);
4051                 hci_sched_sco(hdev);
4052                 hci_sched_esco(hdev);
4053                 hci_sched_le(hdev);
4054         }
4055
4056         /* Send next queued raw (unknown type) packet */
4057         while ((skb = skb_dequeue(&hdev->raw_q)))
4058                 hci_send_frame(hdev, skb);
4059 }
4060
4061 /* ----- HCI RX task (incoming data processing) ----- */
4062
4063 /* ACL data packet */
4064 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4065 {
4066         struct hci_acl_hdr *hdr = (void *) skb->data;
4067         struct hci_conn *conn;
4068         __u16 handle, flags;
4069
4070         skb_pull(skb, HCI_ACL_HDR_SIZE);
4071
4072         handle = __le16_to_cpu(hdr->handle);
4073         flags  = hci_flags(handle);
4074         handle = hci_handle(handle);
4075
4076         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4077                handle, flags);
4078
4079         hdev->stat.acl_rx++;
4080
4081         hci_dev_lock(hdev);
4082         conn = hci_conn_hash_lookup_handle(hdev, handle);
4083         hci_dev_unlock(hdev);
4084
4085         if (conn) {
4086                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4087
4088                 /* Send to upper protocol */
4089                 l2cap_recv_acldata(conn, skb, flags);
4090                 return;
4091         } else {
4092                 BT_ERR("%s ACL packet for unknown connection handle %d",
4093                        hdev->name, handle);
4094         }
4095
4096         kfree_skb(skb);
4097 }
4098
4099 /* SCO data packet */
4100 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4101 {
4102         struct hci_sco_hdr *hdr = (void *) skb->data;
4103         struct hci_conn *conn;
4104         __u16 handle;
4105
4106         skb_pull(skb, HCI_SCO_HDR_SIZE);
4107
4108         handle = __le16_to_cpu(hdr->handle);
4109
4110         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4111
4112         hdev->stat.sco_rx++;
4113
4114         hci_dev_lock(hdev);
4115         conn = hci_conn_hash_lookup_handle(hdev, handle);
4116         hci_dev_unlock(hdev);
4117
4118         if (conn) {
4119                 /* Send to upper protocol */
4120                 sco_recv_scodata(conn, skb);
4121                 return;
4122         } else {
4123                 BT_ERR("%s SCO packet for unknown connection handle %d",
4124                        hdev->name, handle);
4125         }
4126
4127         kfree_skb(skb);
4128 }
4129
4130 static bool hci_req_is_complete(struct hci_dev *hdev)
4131 {
4132         struct sk_buff *skb;
4133
4134         skb = skb_peek(&hdev->cmd_q);
4135         if (!skb)
4136                 return true;
4137
4138         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4139 }
4140
4141 static void hci_resend_last(struct hci_dev *hdev)
4142 {
4143         struct hci_command_hdr *sent;
4144         struct sk_buff *skb;
4145         u16 opcode;
4146
4147         if (!hdev->sent_cmd)
4148                 return;
4149
4150         sent = (void *) hdev->sent_cmd->data;
4151         opcode = __le16_to_cpu(sent->opcode);
4152         if (opcode == HCI_OP_RESET)
4153                 return;
4154
4155         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4156         if (!skb)
4157                 return;
4158
4159         skb_queue_head(&hdev->cmd_q, skb);
4160         queue_work(hdev->workqueue, &hdev->cmd_work);
4161 }
4162
4163 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4164                           hci_req_complete_t *req_complete,
4165                           hci_req_complete_skb_t *req_complete_skb)
4166 {
4167         struct sk_buff *skb;
4168         unsigned long flags;
4169
4170         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4171
4172         /* If the completed command doesn't match the last one that was
4173          * sent we need to do special handling of it.
4174          */
4175         if (!hci_sent_cmd_data(hdev, opcode)) {
4176                 /* Some CSR based controllers generate a spontaneous
4177                  * reset complete event during init and any pending
4178                  * command will never be completed. In such a case we
4179                  * need to resend whatever was the last sent
4180                  * command.
4181                  */
4182                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4183                         hci_resend_last(hdev);
4184
4185                 return;
4186         }
4187
4188         /* If the command succeeded and there's still more commands in
4189          * this request the request is not yet complete.
4190          */
4191         if (!status && !hci_req_is_complete(hdev))
4192                 return;
4193
4194         /* If this was the last command in a request the complete
4195          * callback would be found in hdev->sent_cmd instead of the
4196          * command queue (hdev->cmd_q).
4197          */
4198         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4199                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4200                 return;
4201         }
4202
4203         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4204                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4205                 return;
4206         }
4207
4208         /* Remove all pending commands belonging to this request */
4209         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4210         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4211                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4212                         __skb_queue_head(&hdev->cmd_q, skb);
4213                         break;
4214                 }
4215
4216                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4217                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4218                 else
4219                         *req_complete = bt_cb(skb)->hci.req_complete;
4220                 dev_kfree_skb_irq(skb);
4221         }
4222         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4223 }
4224
4225 static void hci_rx_work(struct work_struct *work)
4226 {
4227         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4228         struct sk_buff *skb;
4229
4230         BT_DBG("%s", hdev->name);
4231
4232         while ((skb = skb_dequeue(&hdev->rx_q))) {
4233                 /* Send copy to monitor */
4234                 hci_send_to_monitor(hdev, skb);
4235
4236                 if (atomic_read(&hdev->promisc)) {
4237                         /* Send copy to the sockets */
4238                         hci_send_to_sock(hdev, skb);
4239                 }
4240
4241                 /* If the device has been opened in HCI_USER_CHANNEL,
4242                  * the userspace has exclusive access to device.
4243                  * When device is HCI_INIT, we still need to process
4244                  * the data packets to the driver in order
4245                  * to complete its setup().
4246                  */
4247                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4248                     !test_bit(HCI_INIT, &hdev->flags)) {
4249                         kfree_skb(skb);
4250                         continue;
4251                 }
4252
4253                 if (test_bit(HCI_INIT, &hdev->flags)) {
4254                         /* Don't process data packets in this states. */
4255                         switch (hci_skb_pkt_type(skb)) {
4256                         case HCI_ACLDATA_PKT:
4257                         case HCI_SCODATA_PKT:
4258                                 kfree_skb(skb);
4259                                 continue;
4260                         }
4261                 }
4262
4263                 /* Process frame */
4264                 switch (hci_skb_pkt_type(skb)) {
4265                 case HCI_EVENT_PKT:
4266                         BT_DBG("%s Event packet", hdev->name);
4267                         hci_event_packet(hdev, skb);
4268                         break;
4269
4270                 case HCI_ACLDATA_PKT:
4271                         BT_DBG("%s ACL data packet", hdev->name);
4272                         hci_acldata_packet(hdev, skb);
4273                         break;
4274
4275                 case HCI_SCODATA_PKT:
4276                         BT_DBG("%s SCO data packet", hdev->name);
4277                         hci_scodata_packet(hdev, skb);
4278                         break;
4279
4280                 default:
4281                         kfree_skb(skb);
4282                         break;
4283                 }
4284         }
4285 }
4286
4287 static void hci_cmd_work(struct work_struct *work)
4288 {
4289         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4290         struct sk_buff *skb;
4291
4292         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4293                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4294
4295         /* Send queued commands */
4296         if (atomic_read(&hdev->cmd_cnt)) {
4297                 skb = skb_dequeue(&hdev->cmd_q);
4298                 if (!skb)
4299                         return;
4300
4301                 kfree_skb(hdev->sent_cmd);
4302
4303                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4304                 if (hdev->sent_cmd) {
4305                         atomic_dec(&hdev->cmd_cnt);
4306                         hci_send_frame(hdev, skb);
4307                         if (test_bit(HCI_RESET, &hdev->flags))
4308                                 cancel_delayed_work(&hdev->cmd_timer);
4309                         else
4310                                 schedule_delayed_work(&hdev->cmd_timer,
4311                                                       HCI_CMD_TIMEOUT);
4312                 } else {
4313                         skb_queue_head(&hdev->cmd_q, skb);
4314                         queue_work(hdev->workqueue, &hdev->cmd_work);
4315                 }
4316         }
4317 }