GNU Linux-libre 5.4.207-gnu1
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <linux/property.h>
34 #include <asm/unaligned.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/mgmt.h>
40
41 #include "hci_request.h"
42 #include "hci_debugfs.h"
43 #include "smp.h"
44 #include "leds.h"
45
46 static void hci_rx_work(struct work_struct *work);
47 static void hci_cmd_work(struct work_struct *work);
48 static void hci_tx_work(struct work_struct *work);
49
50 /* HCI device list */
51 LIST_HEAD(hci_dev_list);
52 DEFINE_RWLOCK(hci_dev_list_lock);
53
54 /* HCI callback list */
55 LIST_HEAD(hci_cb_list);
56 DEFINE_MUTEX(hci_cb_list_lock);
57
58 /* HCI ID Numbering */
59 static DEFINE_IDA(hci_index_ida);
60
61 /* ---- HCI debugfs entries ---- */
62
63 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
64                              size_t count, loff_t *ppos)
65 {
66         struct hci_dev *hdev = file->private_data;
67         char buf[3];
68
69         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
70         buf[1] = '\n';
71         buf[2] = '\0';
72         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73 }
74
75 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
76                               size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         struct sk_buff *skb;
80         bool enable;
81         int err;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         err = kstrtobool_from_user(user_buf, count, &enable);
87         if (err)
88                 return err;
89
90         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
91                 return -EALREADY;
92
93         hci_req_sync_lock(hdev);
94         if (enable)
95                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
96                                      HCI_CMD_TIMEOUT);
97         else
98                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         hci_req_sync_unlock(hdev);
101
102         if (IS_ERR(skb))
103                 return PTR_ERR(skb);
104
105         kfree_skb(skb);
106
107         hci_dev_change_flag(hdev, HCI_DUT_MODE);
108
109         return count;
110 }
111
112 static const struct file_operations dut_mode_fops = {
113         .open           = simple_open,
114         .read           = dut_mode_read,
115         .write          = dut_mode_write,
116         .llseek         = default_llseek,
117 };
118
119 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
120                                 size_t count, loff_t *ppos)
121 {
122         struct hci_dev *hdev = file->private_data;
123         char buf[3];
124
125         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
126         buf[1] = '\n';
127         buf[2] = '\0';
128         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
129 }
130
131 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
132                                  size_t count, loff_t *ppos)
133 {
134         struct hci_dev *hdev = file->private_data;
135         bool enable;
136         int err;
137
138         err = kstrtobool_from_user(user_buf, count, &enable);
139         if (err)
140                 return err;
141
142         /* When the diagnostic flags are not persistent and the transport
143          * is not active or in user channel operation, then there is no need
144          * for the vendor callback. Instead just store the desired value and
145          * the setting will be programmed when the controller gets powered on.
146          */
147         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
148             (!test_bit(HCI_RUNNING, &hdev->flags) ||
149              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
150                 goto done;
151
152         hci_req_sync_lock(hdev);
153         err = hdev->set_diag(hdev, enable);
154         hci_req_sync_unlock(hdev);
155
156         if (err < 0)
157                 return err;
158
159 done:
160         if (enable)
161                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
162         else
163                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
164
165         return count;
166 }
167
168 static const struct file_operations vendor_diag_fops = {
169         .open           = simple_open,
170         .read           = vendor_diag_read,
171         .write          = vendor_diag_write,
172         .llseek         = default_llseek,
173 };
174
175 static void hci_debugfs_create_basic(struct hci_dev *hdev)
176 {
177         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
178                             &dut_mode_fops);
179
180         if (hdev->set_diag)
181                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
182                                     &vendor_diag_fops);
183 }
184
185 static int hci_reset_req(struct hci_request *req, unsigned long opt)
186 {
187         BT_DBG("%s %ld", req->hdev->name, opt);
188
189         /* Reset device */
190         set_bit(HCI_RESET, &req->hdev->flags);
191         hci_req_add(req, HCI_OP_RESET, 0, NULL);
192         return 0;
193 }
194
195 static void bredr_init(struct hci_request *req)
196 {
197         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198
199         /* Read Local Supported Features */
200         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
201
202         /* Read Local Version */
203         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
204
205         /* Read BD Address */
206         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
207 }
208
209 static void amp_init1(struct hci_request *req)
210 {
211         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
212
213         /* Read Local Version */
214         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215
216         /* Read Local Supported Commands */
217         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
218
219         /* Read Local AMP Info */
220         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
221
222         /* Read Data Blk size */
223         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
224
225         /* Read Flow Control Mode */
226         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
227
228         /* Read Location Data */
229         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
230 }
231
232 static int amp_init2(struct hci_request *req)
233 {
234         /* Read Local Supported Features. Not all AMP controllers
235          * support this so it's placed conditionally in the second
236          * stage init.
237          */
238         if (req->hdev->commands[14] & 0x20)
239                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
240
241         return 0;
242 }
243
244 static int hci_init1_req(struct hci_request *req, unsigned long opt)
245 {
246         struct hci_dev *hdev = req->hdev;
247
248         BT_DBG("%s %ld", hdev->name, opt);
249
250         /* Reset */
251         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
252                 hci_reset_req(req, 0);
253
254         switch (hdev->dev_type) {
255         case HCI_PRIMARY:
256                 bredr_init(req);
257                 break;
258         case HCI_AMP:
259                 amp_init1(req);
260                 break;
261         default:
262                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
263                 break;
264         }
265
266         return 0;
267 }
268
269 static void bredr_setup(struct hci_request *req)
270 {
271         __le16 param;
272         __u8 flt_type;
273
274         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
275         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
276
277         /* Read Class of Device */
278         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
279
280         /* Read Local Name */
281         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
282
283         /* Read Voice Setting */
284         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
285
286         /* Read Number of Supported IAC */
287         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
288
289         /* Read Current IAC LAP */
290         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
291
292         /* Clear Event Filters */
293         flt_type = HCI_FLT_CLEAR_ALL;
294         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
295
296         /* Connection accept timeout ~20 secs */
297         param = cpu_to_le16(0x7d00);
298         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
299 }
300
301 static void le_setup(struct hci_request *req)
302 {
303         struct hci_dev *hdev = req->hdev;
304
305         /* Read LE Buffer Size */
306         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307
308         /* Read LE Local Supported Features */
309         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
310
311         /* Read LE Supported States */
312         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
313
314         /* LE-only controllers have LE implicitly enabled */
315         if (!lmp_bredr_capable(hdev))
316                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
317 }
318
319 static void hci_setup_event_mask(struct hci_request *req)
320 {
321         struct hci_dev *hdev = req->hdev;
322
323         /* The second byte is 0xff instead of 0x9f (two reserved bits
324          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
325          * command otherwise.
326          */
327         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
328
329         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
330          * any event mask for pre 1.2 devices.
331          */
332         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
333                 return;
334
335         if (lmp_bredr_capable(hdev)) {
336                 events[4] |= 0x01; /* Flow Specification Complete */
337         } else {
338                 /* Use a different default for LE-only devices */
339                 memset(events, 0, sizeof(events));
340                 events[1] |= 0x20; /* Command Complete */
341                 events[1] |= 0x40; /* Command Status */
342                 events[1] |= 0x80; /* Hardware Error */
343
344                 /* If the controller supports the Disconnect command, enable
345                  * the corresponding event. In addition enable packet flow
346                  * control related events.
347                  */
348                 if (hdev->commands[0] & 0x20) {
349                         events[0] |= 0x10; /* Disconnection Complete */
350                         events[2] |= 0x04; /* Number of Completed Packets */
351                         events[3] |= 0x02; /* Data Buffer Overflow */
352                 }
353
354                 /* If the controller supports the Read Remote Version
355                  * Information command, enable the corresponding event.
356                  */
357                 if (hdev->commands[2] & 0x80)
358                         events[1] |= 0x08; /* Read Remote Version Information
359                                             * Complete
360                                             */
361
362                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
363                         events[0] |= 0x80; /* Encryption Change */
364                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
365                 }
366         }
367
368         if (lmp_inq_rssi_capable(hdev) ||
369             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
370                 events[4] |= 0x02; /* Inquiry Result with RSSI */
371
372         if (lmp_ext_feat_capable(hdev))
373                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
374
375         if (lmp_esco_capable(hdev)) {
376                 events[5] |= 0x08; /* Synchronous Connection Complete */
377                 events[5] |= 0x10; /* Synchronous Connection Changed */
378         }
379
380         if (lmp_sniffsubr_capable(hdev))
381                 events[5] |= 0x20; /* Sniff Subrating */
382
383         if (lmp_pause_enc_capable(hdev))
384                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
385
386         if (lmp_ext_inq_capable(hdev))
387                 events[5] |= 0x40; /* Extended Inquiry Result */
388
389         if (lmp_no_flush_capable(hdev))
390                 events[7] |= 0x01; /* Enhanced Flush Complete */
391
392         if (lmp_lsto_capable(hdev))
393                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
394
395         if (lmp_ssp_capable(hdev)) {
396                 events[6] |= 0x01;      /* IO Capability Request */
397                 events[6] |= 0x02;      /* IO Capability Response */
398                 events[6] |= 0x04;      /* User Confirmation Request */
399                 events[6] |= 0x08;      /* User Passkey Request */
400                 events[6] |= 0x10;      /* Remote OOB Data Request */
401                 events[6] |= 0x20;      /* Simple Pairing Complete */
402                 events[7] |= 0x04;      /* User Passkey Notification */
403                 events[7] |= 0x08;      /* Keypress Notification */
404                 events[7] |= 0x10;      /* Remote Host Supported
405                                          * Features Notification
406                                          */
407         }
408
409         if (lmp_le_capable(hdev))
410                 events[7] |= 0x20;      /* LE Meta-Event */
411
412         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
413 }
414
415 static int hci_init2_req(struct hci_request *req, unsigned long opt)
416 {
417         struct hci_dev *hdev = req->hdev;
418
419         if (hdev->dev_type == HCI_AMP)
420                 return amp_init2(req);
421
422         if (lmp_bredr_capable(hdev))
423                 bredr_setup(req);
424         else
425                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
426
427         if (lmp_le_capable(hdev))
428                 le_setup(req);
429
430         /* All Bluetooth 1.2 and later controllers should support the
431          * HCI command for reading the local supported commands.
432          *
433          * Unfortunately some controllers indicate Bluetooth 1.2 support,
434          * but do not have support for this command. If that is the case,
435          * the driver can quirk the behavior and skip reading the local
436          * supported commands.
437          */
438         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
439             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
440                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
441
442         if (lmp_ssp_capable(hdev)) {
443                 /* When SSP is available, then the host features page
444                  * should also be available as well. However some
445                  * controllers list the max_page as 0 as long as SSP
446                  * has not been enabled. To achieve proper debugging
447                  * output, force the minimum max_page to 1 at least.
448                  */
449                 hdev->max_page = 0x01;
450
451                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
452                         u8 mode = 0x01;
453
454                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
455                                     sizeof(mode), &mode);
456                 } else {
457                         struct hci_cp_write_eir cp;
458
459                         memset(hdev->eir, 0, sizeof(hdev->eir));
460                         memset(&cp, 0, sizeof(cp));
461
462                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
463                 }
464         }
465
466         if (lmp_inq_rssi_capable(hdev) ||
467             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
468                 u8 mode;
469
470                 /* If Extended Inquiry Result events are supported, then
471                  * they are clearly preferred over Inquiry Result with RSSI
472                  * events.
473                  */
474                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
475
476                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
477         }
478
479         if (lmp_inq_tx_pwr_capable(hdev))
480                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
481
482         if (lmp_ext_feat_capable(hdev)) {
483                 struct hci_cp_read_local_ext_features cp;
484
485                 cp.page = 0x01;
486                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
487                             sizeof(cp), &cp);
488         }
489
490         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
491                 u8 enable = 1;
492                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
493                             &enable);
494         }
495
496         return 0;
497 }
498
499 static void hci_setup_link_policy(struct hci_request *req)
500 {
501         struct hci_dev *hdev = req->hdev;
502         struct hci_cp_write_def_link_policy cp;
503         u16 link_policy = 0;
504
505         if (lmp_rswitch_capable(hdev))
506                 link_policy |= HCI_LP_RSWITCH;
507         if (lmp_hold_capable(hdev))
508                 link_policy |= HCI_LP_HOLD;
509         if (lmp_sniff_capable(hdev))
510                 link_policy |= HCI_LP_SNIFF;
511         if (lmp_park_capable(hdev))
512                 link_policy |= HCI_LP_PARK;
513
514         cp.policy = cpu_to_le16(link_policy);
515         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
516 }
517
518 static void hci_set_le_support(struct hci_request *req)
519 {
520         struct hci_dev *hdev = req->hdev;
521         struct hci_cp_write_le_host_supported cp;
522
523         /* LE-only devices do not support explicit enablement */
524         if (!lmp_bredr_capable(hdev))
525                 return;
526
527         memset(&cp, 0, sizeof(cp));
528
529         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
530                 cp.le = 0x01;
531                 cp.simul = 0x00;
532         }
533
534         if (cp.le != lmp_host_le_capable(hdev))
535                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
536                             &cp);
537 }
538
539 static void hci_set_event_mask_page_2(struct hci_request *req)
540 {
541         struct hci_dev *hdev = req->hdev;
542         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
543         bool changed = false;
544
545         /* If Connectionless Slave Broadcast master role is supported
546          * enable all necessary events for it.
547          */
548         if (lmp_csb_master_capable(hdev)) {
549                 events[1] |= 0x40;      /* Triggered Clock Capture */
550                 events[1] |= 0x80;      /* Synchronization Train Complete */
551                 events[2] |= 0x10;      /* Slave Page Response Timeout */
552                 events[2] |= 0x20;      /* CSB Channel Map Change */
553                 changed = true;
554         }
555
556         /* If Connectionless Slave Broadcast slave role is supported
557          * enable all necessary events for it.
558          */
559         if (lmp_csb_slave_capable(hdev)) {
560                 events[2] |= 0x01;      /* Synchronization Train Received */
561                 events[2] |= 0x02;      /* CSB Receive */
562                 events[2] |= 0x04;      /* CSB Timeout */
563                 events[2] |= 0x08;      /* Truncated Page Complete */
564                 changed = true;
565         }
566
567         /* Enable Authenticated Payload Timeout Expired event if supported */
568         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
569                 events[2] |= 0x80;
570                 changed = true;
571         }
572
573         /* Some Broadcom based controllers indicate support for Set Event
574          * Mask Page 2 command, but then actually do not support it. Since
575          * the default value is all bits set to zero, the command is only
576          * required if the event mask has to be changed. In case no change
577          * to the event mask is needed, skip this command.
578          */
579         if (changed)
580                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
581                             sizeof(events), events);
582 }
583
584 static int hci_init3_req(struct hci_request *req, unsigned long opt)
585 {
586         struct hci_dev *hdev = req->hdev;
587         u8 p;
588
589         hci_setup_event_mask(req);
590
591         if (hdev->commands[6] & 0x20 &&
592             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
593                 struct hci_cp_read_stored_link_key cp;
594
595                 bacpy(&cp.bdaddr, BDADDR_ANY);
596                 cp.read_all = 0x01;
597                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
598         }
599
600         if (hdev->commands[5] & 0x10)
601                 hci_setup_link_policy(req);
602
603         if (hdev->commands[8] & 0x01)
604                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
605
606         /* Some older Broadcom based Bluetooth 1.2 controllers do not
607          * support the Read Page Scan Type command. Check support for
608          * this command in the bit mask of supported commands.
609          */
610         if (hdev->commands[13] & 0x01)
611                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
612
613         if (lmp_le_capable(hdev)) {
614                 u8 events[8];
615
616                 memset(events, 0, sizeof(events));
617
618                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
619                         events[0] |= 0x10;      /* LE Long Term Key Request */
620
621                 /* If controller supports the Connection Parameters Request
622                  * Link Layer Procedure, enable the corresponding event.
623                  */
624                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
625                         events[0] |= 0x20;      /* LE Remote Connection
626                                                  * Parameter Request
627                                                  */
628
629                 /* If the controller supports the Data Length Extension
630                  * feature, enable the corresponding event.
631                  */
632                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
633                         events[0] |= 0x40;      /* LE Data Length Change */
634
635                 /* If the controller supports Extended Scanner Filter
636                  * Policies, enable the correspondig event.
637                  */
638                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
639                         events[1] |= 0x04;      /* LE Direct Advertising
640                                                  * Report
641                                                  */
642
643                 /* If the controller supports Channel Selection Algorithm #2
644                  * feature, enable the corresponding event.
645                  */
646                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
647                         events[2] |= 0x08;      /* LE Channel Selection
648                                                  * Algorithm
649                                                  */
650
651                 /* If the controller supports the LE Set Scan Enable command,
652                  * enable the corresponding advertising report event.
653                  */
654                 if (hdev->commands[26] & 0x08)
655                         events[0] |= 0x02;      /* LE Advertising Report */
656
657                 /* If the controller supports the LE Create Connection
658                  * command, enable the corresponding event.
659                  */
660                 if (hdev->commands[26] & 0x10)
661                         events[0] |= 0x01;      /* LE Connection Complete */
662
663                 /* If the controller supports the LE Connection Update
664                  * command, enable the corresponding event.
665                  */
666                 if (hdev->commands[27] & 0x04)
667                         events[0] |= 0x04;      /* LE Connection Update
668                                                  * Complete
669                                                  */
670
671                 /* If the controller supports the LE Read Remote Used Features
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[27] & 0x20)
675                         events[0] |= 0x08;      /* LE Read Remote Used
676                                                  * Features Complete
677                                                  */
678
679                 /* If the controller supports the LE Read Local P-256
680                  * Public Key command, enable the corresponding event.
681                  */
682                 if (hdev->commands[34] & 0x02)
683                         events[0] |= 0x80;      /* LE Read Local P-256
684                                                  * Public Key Complete
685                                                  */
686
687                 /* If the controller supports the LE Generate DHKey
688                  * command, enable the corresponding event.
689                  */
690                 if (hdev->commands[34] & 0x04)
691                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
692
693                 /* If the controller supports the LE Set Default PHY or
694                  * LE Set PHY commands, enable the corresponding event.
695                  */
696                 if (hdev->commands[35] & (0x20 | 0x40))
697                         events[1] |= 0x08;        /* LE PHY Update Complete */
698
699                 /* If the controller supports LE Set Extended Scan Parameters
700                  * and LE Set Extended Scan Enable commands, enable the
701                  * corresponding event.
702                  */
703                 if (use_ext_scan(hdev))
704                         events[1] |= 0x10;      /* LE Extended Advertising
705                                                  * Report
706                                                  */
707
708                 /* If the controller supports the LE Extended Create Connection
709                  * command, enable the corresponding event.
710                  */
711                 if (use_ext_conn(hdev))
712                         events[1] |= 0x02;      /* LE Enhanced Connection
713                                                  * Complete
714                                                  */
715
716                 /* If the controller supports the LE Extended Advertising
717                  * command, enable the corresponding event.
718                  */
719                 if (ext_adv_capable(hdev))
720                         events[2] |= 0x02;      /* LE Advertising Set
721                                                  * Terminated
722                                                  */
723
724                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
725                             events);
726
727                 /* Read LE Advertising Channel TX Power */
728                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
729                         /* HCI TS spec forbids mixing of legacy and extended
730                          * advertising commands wherein READ_ADV_TX_POWER is
731                          * also included. So do not call it if extended adv
732                          * is supported otherwise controller will return
733                          * COMMAND_DISALLOWED for extended commands.
734                          */
735                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
736                 }
737
738                 if (hdev->commands[26] & 0x40) {
739                         /* Read LE White List Size */
740                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
741                                     0, NULL);
742                 }
743
744                 if (hdev->commands[26] & 0x80) {
745                         /* Clear LE White List */
746                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
747                 }
748
749                 if (hdev->commands[34] & 0x40) {
750                         /* Read LE Resolving List Size */
751                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
752                                     0, NULL);
753                 }
754
755                 if (hdev->commands[34] & 0x20) {
756                         /* Clear LE Resolving List */
757                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
758                 }
759
760                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
761                         /* Read LE Maximum Data Length */
762                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
763
764                         /* Read LE Suggested Default Data Length */
765                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
766                 }
767
768                 if (ext_adv_capable(hdev)) {
769                         /* Read LE Number of Supported Advertising Sets */
770                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
771                                     0, NULL);
772                 }
773
774                 hci_set_le_support(req);
775         }
776
777         /* Read features beyond page 1 if available */
778         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
779                 struct hci_cp_read_local_ext_features cp;
780
781                 cp.page = p;
782                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
783                             sizeof(cp), &cp);
784         }
785
786         return 0;
787 }
788
789 static int hci_init4_req(struct hci_request *req, unsigned long opt)
790 {
791         struct hci_dev *hdev = req->hdev;
792
793         /* Some Broadcom based Bluetooth controllers do not support the
794          * Delete Stored Link Key command. They are clearly indicating its
795          * absence in the bit mask of supported commands.
796          *
797          * Check the supported commands and only if the the command is marked
798          * as supported send it. If not supported assume that the controller
799          * does not have actual support for stored link keys which makes this
800          * command redundant anyway.
801          *
802          * Some controllers indicate that they support handling deleting
803          * stored link keys, but they don't. The quirk lets a driver
804          * just disable this command.
805          */
806         if (hdev->commands[6] & 0x80 &&
807             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
808                 struct hci_cp_delete_stored_link_key cp;
809
810                 bacpy(&cp.bdaddr, BDADDR_ANY);
811                 cp.delete_all = 0x01;
812                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
813                             sizeof(cp), &cp);
814         }
815
816         /* Set event mask page 2 if the HCI command for it is supported */
817         if (hdev->commands[22] & 0x04)
818                 hci_set_event_mask_page_2(req);
819
820         /* Read local codec list if the HCI command is supported */
821         if (hdev->commands[29] & 0x20)
822                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
823
824         /* Get MWS transport configuration if the HCI command is supported */
825         if (hdev->commands[30] & 0x08)
826                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
827
828         /* Check for Synchronization Train support */
829         if (lmp_sync_train_capable(hdev))
830                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
831
832         /* Enable Secure Connections if supported and configured */
833         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
834             bredr_sc_enabled(hdev)) {
835                 u8 support = 0x01;
836
837                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
838                             sizeof(support), &support);
839         }
840
841         /* Set Suggested Default Data Length to maximum if supported */
842         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843                 struct hci_cp_le_write_def_data_len cp;
844
845                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
846                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
847                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
848         }
849
850         /* Set Default PHY parameters if command is supported */
851         if (hdev->commands[35] & 0x20) {
852                 struct hci_cp_le_set_default_phy cp;
853
854                 cp.all_phys = 0x00;
855                 cp.tx_phys = hdev->le_tx_def_phys;
856                 cp.rx_phys = hdev->le_rx_def_phys;
857
858                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
859         }
860
861         return 0;
862 }
863
864 static int __hci_init(struct hci_dev *hdev)
865 {
866         int err;
867
868         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
869         if (err < 0)
870                 return err;
871
872         if (hci_dev_test_flag(hdev, HCI_SETUP))
873                 hci_debugfs_create_basic(hdev);
874
875         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
876         if (err < 0)
877                 return err;
878
879         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
880          * BR/EDR/LE type controllers. AMP controllers only need the
881          * first two stages of init.
882          */
883         if (hdev->dev_type != HCI_PRIMARY)
884                 return 0;
885
886         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
887         if (err < 0)
888                 return err;
889
890         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
891         if (err < 0)
892                 return err;
893
894         /* This function is only called when the controller is actually in
895          * configured state. When the controller is marked as unconfigured,
896          * this initialization procedure is not run.
897          *
898          * It means that it is possible that a controller runs through its
899          * setup phase and then discovers missing settings. If that is the
900          * case, then this function will not be called. It then will only
901          * be called during the config phase.
902          *
903          * So only when in setup phase or config phase, create the debugfs
904          * entries and register the SMP channels.
905          */
906         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
907             !hci_dev_test_flag(hdev, HCI_CONFIG))
908                 return 0;
909
910         hci_debugfs_create_common(hdev);
911
912         if (lmp_bredr_capable(hdev))
913                 hci_debugfs_create_bredr(hdev);
914
915         if (lmp_le_capable(hdev))
916                 hci_debugfs_create_le(hdev);
917
918         return 0;
919 }
920
921 static int hci_init0_req(struct hci_request *req, unsigned long opt)
922 {
923         struct hci_dev *hdev = req->hdev;
924
925         BT_DBG("%s %ld", hdev->name, opt);
926
927         /* Reset */
928         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
929                 hci_reset_req(req, 0);
930
931         /* Read Local Version */
932         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
933
934         /* Read BD Address */
935         if (hdev->set_bdaddr)
936                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
937
938         return 0;
939 }
940
941 static int __hci_unconf_init(struct hci_dev *hdev)
942 {
943         int err;
944
945         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
946                 return 0;
947
948         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
949         if (err < 0)
950                 return err;
951
952         if (hci_dev_test_flag(hdev, HCI_SETUP))
953                 hci_debugfs_create_basic(hdev);
954
955         return 0;
956 }
957
958 static int hci_scan_req(struct hci_request *req, unsigned long opt)
959 {
960         __u8 scan = opt;
961
962         BT_DBG("%s %x", req->hdev->name, scan);
963
964         /* Inquiry and Page scans */
965         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
966         return 0;
967 }
968
969 static int hci_auth_req(struct hci_request *req, unsigned long opt)
970 {
971         __u8 auth = opt;
972
973         BT_DBG("%s %x", req->hdev->name, auth);
974
975         /* Authentication */
976         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
977         return 0;
978 }
979
980 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
981 {
982         __u8 encrypt = opt;
983
984         BT_DBG("%s %x", req->hdev->name, encrypt);
985
986         /* Encryption */
987         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
988         return 0;
989 }
990
991 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
992 {
993         __le16 policy = cpu_to_le16(opt);
994
995         BT_DBG("%s %x", req->hdev->name, policy);
996
997         /* Default link policy */
998         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
999         return 0;
1000 }
1001
1002 /* Get HCI device by index.
1003  * Device is held on return. */
1004 struct hci_dev *hci_dev_get(int index)
1005 {
1006         struct hci_dev *hdev = NULL, *d;
1007
1008         BT_DBG("%d", index);
1009
1010         if (index < 0)
1011                 return NULL;
1012
1013         read_lock(&hci_dev_list_lock);
1014         list_for_each_entry(d, &hci_dev_list, list) {
1015                 if (d->id == index) {
1016                         hdev = hci_dev_hold(d);
1017                         break;
1018                 }
1019         }
1020         read_unlock(&hci_dev_list_lock);
1021         return hdev;
1022 }
1023
1024 /* ---- Inquiry support ---- */
1025
1026 bool hci_discovery_active(struct hci_dev *hdev)
1027 {
1028         struct discovery_state *discov = &hdev->discovery;
1029
1030         switch (discov->state) {
1031         case DISCOVERY_FINDING:
1032         case DISCOVERY_RESOLVING:
1033                 return true;
1034
1035         default:
1036                 return false;
1037         }
1038 }
1039
1040 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1041 {
1042         int old_state = hdev->discovery.state;
1043
1044         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1045
1046         if (old_state == state)
1047                 return;
1048
1049         hdev->discovery.state = state;
1050
1051         switch (state) {
1052         case DISCOVERY_STOPPED:
1053                 hci_update_background_scan(hdev);
1054
1055                 if (old_state != DISCOVERY_STARTING)
1056                         mgmt_discovering(hdev, 0);
1057                 break;
1058         case DISCOVERY_STARTING:
1059                 break;
1060         case DISCOVERY_FINDING:
1061                 mgmt_discovering(hdev, 1);
1062                 break;
1063         case DISCOVERY_RESOLVING:
1064                 break;
1065         case DISCOVERY_STOPPING:
1066                 break;
1067         }
1068 }
1069
1070 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1071 {
1072         struct discovery_state *cache = &hdev->discovery;
1073         struct inquiry_entry *p, *n;
1074
1075         list_for_each_entry_safe(p, n, &cache->all, all) {
1076                 list_del(&p->all);
1077                 kfree(p);
1078         }
1079
1080         INIT_LIST_HEAD(&cache->unknown);
1081         INIT_LIST_HEAD(&cache->resolve);
1082 }
1083
1084 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1085                                                bdaddr_t *bdaddr)
1086 {
1087         struct discovery_state *cache = &hdev->discovery;
1088         struct inquiry_entry *e;
1089
1090         BT_DBG("cache %p, %pMR", cache, bdaddr);
1091
1092         list_for_each_entry(e, &cache->all, all) {
1093                 if (!bacmp(&e->data.bdaddr, bdaddr))
1094                         return e;
1095         }
1096
1097         return NULL;
1098 }
1099
1100 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1101                                                        bdaddr_t *bdaddr)
1102 {
1103         struct discovery_state *cache = &hdev->discovery;
1104         struct inquiry_entry *e;
1105
1106         BT_DBG("cache %p, %pMR", cache, bdaddr);
1107
1108         list_for_each_entry(e, &cache->unknown, list) {
1109                 if (!bacmp(&e->data.bdaddr, bdaddr))
1110                         return e;
1111         }
1112
1113         return NULL;
1114 }
1115
1116 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1117                                                        bdaddr_t *bdaddr,
1118                                                        int state)
1119 {
1120         struct discovery_state *cache = &hdev->discovery;
1121         struct inquiry_entry *e;
1122
1123         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1124
1125         list_for_each_entry(e, &cache->resolve, list) {
1126                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1127                         return e;
1128                 if (!bacmp(&e->data.bdaddr, bdaddr))
1129                         return e;
1130         }
1131
1132         return NULL;
1133 }
1134
1135 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1136                                       struct inquiry_entry *ie)
1137 {
1138         struct discovery_state *cache = &hdev->discovery;
1139         struct list_head *pos = &cache->resolve;
1140         struct inquiry_entry *p;
1141
1142         list_del(&ie->list);
1143
1144         list_for_each_entry(p, &cache->resolve, list) {
1145                 if (p->name_state != NAME_PENDING &&
1146                     abs(p->data.rssi) >= abs(ie->data.rssi))
1147                         break;
1148                 pos = &p->list;
1149         }
1150
1151         list_add(&ie->list, pos);
1152 }
1153
1154 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1155                              bool name_known)
1156 {
1157         struct discovery_state *cache = &hdev->discovery;
1158         struct inquiry_entry *ie;
1159         u32 flags = 0;
1160
1161         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1162
1163         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1164
1165         if (!data->ssp_mode)
1166                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1167
1168         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1169         if (ie) {
1170                 if (!ie->data.ssp_mode)
1171                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1172
1173                 if (ie->name_state == NAME_NEEDED &&
1174                     data->rssi != ie->data.rssi) {
1175                         ie->data.rssi = data->rssi;
1176                         hci_inquiry_cache_update_resolve(hdev, ie);
1177                 }
1178
1179                 goto update;
1180         }
1181
1182         /* Entry not in the cache. Add new one. */
1183         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1184         if (!ie) {
1185                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1186                 goto done;
1187         }
1188
1189         list_add(&ie->all, &cache->all);
1190
1191         if (name_known) {
1192                 ie->name_state = NAME_KNOWN;
1193         } else {
1194                 ie->name_state = NAME_NOT_KNOWN;
1195                 list_add(&ie->list, &cache->unknown);
1196         }
1197
1198 update:
1199         if (name_known && ie->name_state != NAME_KNOWN &&
1200             ie->name_state != NAME_PENDING) {
1201                 ie->name_state = NAME_KNOWN;
1202                 list_del(&ie->list);
1203         }
1204
1205         memcpy(&ie->data, data, sizeof(*data));
1206         ie->timestamp = jiffies;
1207         cache->timestamp = jiffies;
1208
1209         if (ie->name_state == NAME_NOT_KNOWN)
1210                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211
1212 done:
1213         return flags;
1214 }
1215
1216 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1217 {
1218         struct discovery_state *cache = &hdev->discovery;
1219         struct inquiry_info *info = (struct inquiry_info *) buf;
1220         struct inquiry_entry *e;
1221         int copied = 0;
1222
1223         list_for_each_entry(e, &cache->all, all) {
1224                 struct inquiry_data *data = &e->data;
1225
1226                 if (copied >= num)
1227                         break;
1228
1229                 bacpy(&info->bdaddr, &data->bdaddr);
1230                 info->pscan_rep_mode    = data->pscan_rep_mode;
1231                 info->pscan_period_mode = data->pscan_period_mode;
1232                 info->pscan_mode        = data->pscan_mode;
1233                 memcpy(info->dev_class, data->dev_class, 3);
1234                 info->clock_offset      = data->clock_offset;
1235
1236                 info++;
1237                 copied++;
1238         }
1239
1240         BT_DBG("cache %p, copied %d", cache, copied);
1241         return copied;
1242 }
1243
1244 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1245 {
1246         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1247         struct hci_dev *hdev = req->hdev;
1248         struct hci_cp_inquiry cp;
1249
1250         BT_DBG("%s", hdev->name);
1251
1252         if (test_bit(HCI_INQUIRY, &hdev->flags))
1253                 return 0;
1254
1255         /* Start Inquiry */
1256         memcpy(&cp.lap, &ir->lap, 3);
1257         cp.length  = ir->length;
1258         cp.num_rsp = ir->num_rsp;
1259         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1260
1261         return 0;
1262 }
1263
1264 int hci_inquiry(void __user *arg)
1265 {
1266         __u8 __user *ptr = arg;
1267         struct hci_inquiry_req ir;
1268         struct hci_dev *hdev;
1269         int err = 0, do_inquiry = 0, max_rsp;
1270         long timeo;
1271         __u8 *buf;
1272
1273         if (copy_from_user(&ir, ptr, sizeof(ir)))
1274                 return -EFAULT;
1275
1276         hdev = hci_dev_get(ir.dev_id);
1277         if (!hdev)
1278                 return -ENODEV;
1279
1280         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1281                 err = -EBUSY;
1282                 goto done;
1283         }
1284
1285         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1286                 err = -EOPNOTSUPP;
1287                 goto done;
1288         }
1289
1290         if (hdev->dev_type != HCI_PRIMARY) {
1291                 err = -EOPNOTSUPP;
1292                 goto done;
1293         }
1294
1295         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1296                 err = -EOPNOTSUPP;
1297                 goto done;
1298         }
1299
1300         /* Restrict maximum inquiry length to 60 seconds */
1301         if (ir.length > 60) {
1302                 err = -EINVAL;
1303                 goto done;
1304         }
1305
1306         hci_dev_lock(hdev);
1307         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1308             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1309                 hci_inquiry_cache_flush(hdev);
1310                 do_inquiry = 1;
1311         }
1312         hci_dev_unlock(hdev);
1313
1314         timeo = ir.length * msecs_to_jiffies(2000);
1315
1316         if (do_inquiry) {
1317                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1318                                    timeo, NULL);
1319                 if (err < 0)
1320                         goto done;
1321
1322                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1323                  * cleared). If it is interrupted by a signal, return -EINTR.
1324                  */
1325                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1326                                 TASK_INTERRUPTIBLE)) {
1327                         err = -EINTR;
1328                         goto done;
1329                 }
1330         }
1331
1332         /* for unlimited number of responses we will use buffer with
1333          * 255 entries
1334          */
1335         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1336
1337         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1338          * copy it to the user space.
1339          */
1340         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1341         if (!buf) {
1342                 err = -ENOMEM;
1343                 goto done;
1344         }
1345
1346         hci_dev_lock(hdev);
1347         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1348         hci_dev_unlock(hdev);
1349
1350         BT_DBG("num_rsp %d", ir.num_rsp);
1351
1352         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1353                 ptr += sizeof(ir);
1354                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1355                                  ir.num_rsp))
1356                         err = -EFAULT;
1357         } else
1358                 err = -EFAULT;
1359
1360         kfree(buf);
1361
1362 done:
1363         hci_dev_put(hdev);
1364         return err;
1365 }
1366
1367 /**
1368  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1369  *                                     (BD_ADDR) for a HCI device from
1370  *                                     a firmware node property.
1371  * @hdev:       The HCI device
1372  *
1373  * Search the firmware node for 'local-bd-address'.
1374  *
1375  * All-zero BD addresses are rejected, because those could be properties
1376  * that exist in the firmware tables, but were not updated by the firmware. For
1377  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1378  */
1379 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1380 {
1381         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1382         bdaddr_t ba;
1383         int ret;
1384
1385         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1386                                             (u8 *)&ba, sizeof(ba));
1387         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1388                 return;
1389
1390         bacpy(&hdev->public_addr, &ba);
1391 }
1392
1393 static int hci_dev_do_open(struct hci_dev *hdev)
1394 {
1395         int ret = 0;
1396
1397         BT_DBG("%s %p", hdev->name, hdev);
1398
1399         hci_req_sync_lock(hdev);
1400
1401         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1402                 ret = -ENODEV;
1403                 goto done;
1404         }
1405
1406         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1407             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1408                 /* Check for rfkill but allow the HCI setup stage to
1409                  * proceed (which in itself doesn't cause any RF activity).
1410                  */
1411                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1412                         ret = -ERFKILL;
1413                         goto done;
1414                 }
1415
1416                 /* Check for valid public address or a configured static
1417                  * random adddress, but let the HCI setup proceed to
1418                  * be able to determine if there is a public address
1419                  * or not.
1420                  *
1421                  * In case of user channel usage, it is not important
1422                  * if a public address or static random address is
1423                  * available.
1424                  *
1425                  * This check is only valid for BR/EDR controllers
1426                  * since AMP controllers do not have an address.
1427                  */
1428                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1429                     hdev->dev_type == HCI_PRIMARY &&
1430                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1431                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1432                         ret = -EADDRNOTAVAIL;
1433                         goto done;
1434                 }
1435         }
1436
1437         if (test_bit(HCI_UP, &hdev->flags)) {
1438                 ret = -EALREADY;
1439                 goto done;
1440         }
1441
1442         if (hdev->open(hdev)) {
1443                 ret = -EIO;
1444                 goto done;
1445         }
1446
1447         set_bit(HCI_RUNNING, &hdev->flags);
1448         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1449
1450         atomic_set(&hdev->cmd_cnt, 1);
1451         set_bit(HCI_INIT, &hdev->flags);
1452
1453         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1454             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1455                 bool invalid_bdaddr;
1456
1457                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1458
1459                 if (hdev->setup)
1460                         ret = hdev->setup(hdev);
1461
1462                 /* The transport driver can set the quirk to mark the
1463                  * BD_ADDR invalid before creating the HCI device or in
1464                  * its setup callback.
1465                  */
1466                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1467                                           &hdev->quirks);
1468
1469                 if (ret)
1470                         goto setup_failed;
1471
1472                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1473                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1474                                 hci_dev_get_bd_addr_from_property(hdev);
1475
1476                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1477                             hdev->set_bdaddr) {
1478                                 ret = hdev->set_bdaddr(hdev,
1479                                                        &hdev->public_addr);
1480
1481                                 /* If setting of the BD_ADDR from the device
1482                                  * property succeeds, then treat the address
1483                                  * as valid even if the invalid BD_ADDR
1484                                  * quirk indicates otherwise.
1485                                  */
1486                                 if (!ret)
1487                                         invalid_bdaddr = false;
1488                         }
1489                 }
1490
1491 setup_failed:
1492                 /* The transport driver can set these quirks before
1493                  * creating the HCI device or in its setup callback.
1494                  *
1495                  * For the invalid BD_ADDR quirk it is possible that
1496                  * it becomes a valid address if the bootloader does
1497                  * provide it (see above).
1498                  *
1499                  * In case any of them is set, the controller has to
1500                  * start up as unconfigured.
1501                  */
1502                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1503                     invalid_bdaddr)
1504                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1505
1506                 /* For an unconfigured controller it is required to
1507                  * read at least the version information provided by
1508                  * the Read Local Version Information command.
1509                  *
1510                  * If the set_bdaddr driver callback is provided, then
1511                  * also the original Bluetooth public device address
1512                  * will be read using the Read BD Address command.
1513                  */
1514                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1515                         ret = __hci_unconf_init(hdev);
1516         }
1517
1518         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1519                 /* If public address change is configured, ensure that
1520                  * the address gets programmed. If the driver does not
1521                  * support changing the public address, fail the power
1522                  * on procedure.
1523                  */
1524                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1525                     hdev->set_bdaddr)
1526                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1527                 else
1528                         ret = -EADDRNOTAVAIL;
1529         }
1530
1531         if (!ret) {
1532                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1533                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1534                         ret = __hci_init(hdev);
1535                         if (!ret && hdev->post_init)
1536                                 ret = hdev->post_init(hdev);
1537                 }
1538         }
1539
1540         /* If the HCI Reset command is clearing all diagnostic settings,
1541          * then they need to be reprogrammed after the init procedure
1542          * completed.
1543          */
1544         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1545             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1546             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1547                 ret = hdev->set_diag(hdev, true);
1548
1549         clear_bit(HCI_INIT, &hdev->flags);
1550
1551         if (!ret) {
1552                 hci_dev_hold(hdev);
1553                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1554                 hci_adv_instances_set_rpa_expired(hdev, true);
1555                 set_bit(HCI_UP, &hdev->flags);
1556                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1557                 hci_leds_update_powered(hdev, true);
1558                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1559                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1560                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1561                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1562                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1563                     hdev->dev_type == HCI_PRIMARY) {
1564                         ret = __hci_req_hci_power_on(hdev);
1565                         mgmt_power_on(hdev, ret);
1566                 }
1567         } else {
1568                 /* Init failed, cleanup */
1569                 flush_work(&hdev->tx_work);
1570
1571                 /* Since hci_rx_work() is possible to awake new cmd_work
1572                  * it should be flushed first to avoid unexpected call of
1573                  * hci_cmd_work()
1574                  */
1575                 flush_work(&hdev->rx_work);
1576                 flush_work(&hdev->cmd_work);
1577
1578                 skb_queue_purge(&hdev->cmd_q);
1579                 skb_queue_purge(&hdev->rx_q);
1580
1581                 if (hdev->flush)
1582                         hdev->flush(hdev);
1583
1584                 if (hdev->sent_cmd) {
1585                         kfree_skb(hdev->sent_cmd);
1586                         hdev->sent_cmd = NULL;
1587                 }
1588
1589                 clear_bit(HCI_RUNNING, &hdev->flags);
1590                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1591
1592                 hdev->close(hdev);
1593                 hdev->flags &= BIT(HCI_RAW);
1594         }
1595
1596 done:
1597         hci_req_sync_unlock(hdev);
1598         return ret;
1599 }
1600
1601 /* ---- HCI ioctl helpers ---- */
1602
1603 int hci_dev_open(__u16 dev)
1604 {
1605         struct hci_dev *hdev;
1606         int err;
1607
1608         hdev = hci_dev_get(dev);
1609         if (!hdev)
1610                 return -ENODEV;
1611
1612         /* Devices that are marked as unconfigured can only be powered
1613          * up as user channel. Trying to bring them up as normal devices
1614          * will result into a failure. Only user channel operation is
1615          * possible.
1616          *
1617          * When this function is called for a user channel, the flag
1618          * HCI_USER_CHANNEL will be set first before attempting to
1619          * open the device.
1620          */
1621         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1622             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1623                 err = -EOPNOTSUPP;
1624                 goto done;
1625         }
1626
1627         /* We need to ensure that no other power on/off work is pending
1628          * before proceeding to call hci_dev_do_open. This is
1629          * particularly important if the setup procedure has not yet
1630          * completed.
1631          */
1632         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1633                 cancel_delayed_work(&hdev->power_off);
1634
1635         /* After this call it is guaranteed that the setup procedure
1636          * has finished. This means that error conditions like RFKILL
1637          * or no valid public or static random address apply.
1638          */
1639         flush_workqueue(hdev->req_workqueue);
1640
1641         /* For controllers not using the management interface and that
1642          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1643          * so that pairing works for them. Once the management interface
1644          * is in use this bit will be cleared again and userspace has
1645          * to explicitly enable it.
1646          */
1647         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1648             !hci_dev_test_flag(hdev, HCI_MGMT))
1649                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1650
1651         err = hci_dev_do_open(hdev);
1652
1653 done:
1654         hci_dev_put(hdev);
1655         return err;
1656 }
1657
1658 /* This function requires the caller holds hdev->lock */
1659 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1660 {
1661         struct hci_conn_params *p;
1662
1663         list_for_each_entry(p, &hdev->le_conn_params, list) {
1664                 if (p->conn) {
1665                         hci_conn_drop(p->conn);
1666                         hci_conn_put(p->conn);
1667                         p->conn = NULL;
1668                 }
1669                 list_del_init(&p->action);
1670         }
1671
1672         BT_DBG("All LE pending actions cleared");
1673 }
1674
1675 int hci_dev_do_close(struct hci_dev *hdev)
1676 {
1677         bool auto_off;
1678
1679         BT_DBG("%s %p", hdev->name, hdev);
1680
1681         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1682             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1683             test_bit(HCI_UP, &hdev->flags)) {
1684                 /* Execute vendor specific shutdown routine */
1685                 if (hdev->shutdown)
1686                         hdev->shutdown(hdev);
1687         }
1688
1689         cancel_delayed_work(&hdev->power_off);
1690
1691         hci_request_cancel_all(hdev);
1692         hci_req_sync_lock(hdev);
1693
1694         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1695                 cancel_delayed_work_sync(&hdev->cmd_timer);
1696                 hci_req_sync_unlock(hdev);
1697                 return 0;
1698         }
1699
1700         hci_leds_update_powered(hdev, false);
1701
1702         /* Flush RX and TX works */
1703         flush_work(&hdev->tx_work);
1704         flush_work(&hdev->rx_work);
1705
1706         if (hdev->discov_timeout > 0) {
1707                 hdev->discov_timeout = 0;
1708                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1709                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1710         }
1711
1712         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1713                 cancel_delayed_work(&hdev->service_cache);
1714
1715         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1716                 struct adv_info *adv_instance;
1717
1718                 cancel_delayed_work_sync(&hdev->rpa_expired);
1719
1720                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1721                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1722         }
1723
1724         /* Avoid potential lockdep warnings from the *_flush() calls by
1725          * ensuring the workqueue is empty up front.
1726          */
1727         drain_workqueue(hdev->workqueue);
1728
1729         hci_dev_lock(hdev);
1730
1731         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1732
1733         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1734
1735         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1736             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1737             hci_dev_test_flag(hdev, HCI_MGMT))
1738                 __mgmt_power_off(hdev);
1739
1740         hci_inquiry_cache_flush(hdev);
1741         hci_pend_le_actions_clear(hdev);
1742         hci_conn_hash_flush(hdev);
1743         hci_dev_unlock(hdev);
1744
1745         smp_unregister(hdev);
1746
1747         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1748
1749         if (hdev->flush)
1750                 hdev->flush(hdev);
1751
1752         /* Reset device */
1753         skb_queue_purge(&hdev->cmd_q);
1754         atomic_set(&hdev->cmd_cnt, 1);
1755         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1756             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1757                 set_bit(HCI_INIT, &hdev->flags);
1758                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1759                 clear_bit(HCI_INIT, &hdev->flags);
1760         }
1761
1762         /* flush cmd  work */
1763         flush_work(&hdev->cmd_work);
1764
1765         /* Drop queues */
1766         skb_queue_purge(&hdev->rx_q);
1767         skb_queue_purge(&hdev->cmd_q);
1768         skb_queue_purge(&hdev->raw_q);
1769
1770         /* Drop last sent command */
1771         if (hdev->sent_cmd) {
1772                 cancel_delayed_work_sync(&hdev->cmd_timer);
1773                 kfree_skb(hdev->sent_cmd);
1774                 hdev->sent_cmd = NULL;
1775         }
1776
1777         clear_bit(HCI_RUNNING, &hdev->flags);
1778         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1779
1780         /* After this point our queues are empty
1781          * and no tasks are scheduled. */
1782         hdev->close(hdev);
1783
1784         /* Clear flags */
1785         hdev->flags &= BIT(HCI_RAW);
1786         hci_dev_clear_volatile_flags(hdev);
1787
1788         /* Controller radio is available but is currently powered down */
1789         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1790
1791         memset(hdev->eir, 0, sizeof(hdev->eir));
1792         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1793         bacpy(&hdev->random_addr, BDADDR_ANY);
1794
1795         hci_req_sync_unlock(hdev);
1796
1797         hci_dev_put(hdev);
1798         return 0;
1799 }
1800
1801 int hci_dev_close(__u16 dev)
1802 {
1803         struct hci_dev *hdev;
1804         int err;
1805
1806         hdev = hci_dev_get(dev);
1807         if (!hdev)
1808                 return -ENODEV;
1809
1810         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1811                 err = -EBUSY;
1812                 goto done;
1813         }
1814
1815         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1816                 cancel_delayed_work(&hdev->power_off);
1817
1818         err = hci_dev_do_close(hdev);
1819
1820 done:
1821         hci_dev_put(hdev);
1822         return err;
1823 }
1824
1825 static int hci_dev_do_reset(struct hci_dev *hdev)
1826 {
1827         int ret;
1828
1829         BT_DBG("%s %p", hdev->name, hdev);
1830
1831         hci_req_sync_lock(hdev);
1832
1833         /* Drop queues */
1834         skb_queue_purge(&hdev->rx_q);
1835         skb_queue_purge(&hdev->cmd_q);
1836
1837         /* Avoid potential lockdep warnings from the *_flush() calls by
1838          * ensuring the workqueue is empty up front.
1839          */
1840         drain_workqueue(hdev->workqueue);
1841
1842         hci_dev_lock(hdev);
1843         hci_inquiry_cache_flush(hdev);
1844         hci_conn_hash_flush(hdev);
1845         hci_dev_unlock(hdev);
1846
1847         if (hdev->flush)
1848                 hdev->flush(hdev);
1849
1850         atomic_set(&hdev->cmd_cnt, 1);
1851         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1852
1853         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1854
1855         hci_req_sync_unlock(hdev);
1856         return ret;
1857 }
1858
1859 int hci_dev_reset(__u16 dev)
1860 {
1861         struct hci_dev *hdev;
1862         int err;
1863
1864         hdev = hci_dev_get(dev);
1865         if (!hdev)
1866                 return -ENODEV;
1867
1868         if (!test_bit(HCI_UP, &hdev->flags)) {
1869                 err = -ENETDOWN;
1870                 goto done;
1871         }
1872
1873         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1874                 err = -EBUSY;
1875                 goto done;
1876         }
1877
1878         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1879                 err = -EOPNOTSUPP;
1880                 goto done;
1881         }
1882
1883         err = hci_dev_do_reset(hdev);
1884
1885 done:
1886         hci_dev_put(hdev);
1887         return err;
1888 }
1889
1890 int hci_dev_reset_stat(__u16 dev)
1891 {
1892         struct hci_dev *hdev;
1893         int ret = 0;
1894
1895         hdev = hci_dev_get(dev);
1896         if (!hdev)
1897                 return -ENODEV;
1898
1899         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1900                 ret = -EBUSY;
1901                 goto done;
1902         }
1903
1904         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1905                 ret = -EOPNOTSUPP;
1906                 goto done;
1907         }
1908
1909         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1910
1911 done:
1912         hci_dev_put(hdev);
1913         return ret;
1914 }
1915
1916 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1917 {
1918         bool conn_changed, discov_changed;
1919
1920         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1921
1922         if ((scan & SCAN_PAGE))
1923                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1924                                                           HCI_CONNECTABLE);
1925         else
1926                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1927                                                            HCI_CONNECTABLE);
1928
1929         if ((scan & SCAN_INQUIRY)) {
1930                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1931                                                             HCI_DISCOVERABLE);
1932         } else {
1933                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1934                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1935                                                              HCI_DISCOVERABLE);
1936         }
1937
1938         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1939                 return;
1940
1941         if (conn_changed || discov_changed) {
1942                 /* In case this was disabled through mgmt */
1943                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1944
1945                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1946                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1947
1948                 mgmt_new_settings(hdev);
1949         }
1950 }
1951
1952 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1953 {
1954         struct hci_dev *hdev;
1955         struct hci_dev_req dr;
1956         int err = 0;
1957
1958         if (copy_from_user(&dr, arg, sizeof(dr)))
1959                 return -EFAULT;
1960
1961         hdev = hci_dev_get(dr.dev_id);
1962         if (!hdev)
1963                 return -ENODEV;
1964
1965         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1966                 err = -EBUSY;
1967                 goto done;
1968         }
1969
1970         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1971                 err = -EOPNOTSUPP;
1972                 goto done;
1973         }
1974
1975         if (hdev->dev_type != HCI_PRIMARY) {
1976                 err = -EOPNOTSUPP;
1977                 goto done;
1978         }
1979
1980         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1981                 err = -EOPNOTSUPP;
1982                 goto done;
1983         }
1984
1985         switch (cmd) {
1986         case HCISETAUTH:
1987                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1988                                    HCI_INIT_TIMEOUT, NULL);
1989                 break;
1990
1991         case HCISETENCRYPT:
1992                 if (!lmp_encrypt_capable(hdev)) {
1993                         err = -EOPNOTSUPP;
1994                         break;
1995                 }
1996
1997                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1998                         /* Auth must be enabled first */
1999                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2000                                            HCI_INIT_TIMEOUT, NULL);
2001                         if (err)
2002                                 break;
2003                 }
2004
2005                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2006                                    HCI_INIT_TIMEOUT, NULL);
2007                 break;
2008
2009         case HCISETSCAN:
2010                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2011                                    HCI_INIT_TIMEOUT, NULL);
2012
2013                 /* Ensure that the connectable and discoverable states
2014                  * get correctly modified as this was a non-mgmt change.
2015                  */
2016                 if (!err)
2017                         hci_update_scan_state(hdev, dr.dev_opt);
2018                 break;
2019
2020         case HCISETLINKPOL:
2021                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2022                                    HCI_INIT_TIMEOUT, NULL);
2023                 break;
2024
2025         case HCISETLINKMODE:
2026                 hdev->link_mode = ((__u16) dr.dev_opt) &
2027                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2028                 break;
2029
2030         case HCISETPTYPE:
2031                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2032                         break;
2033
2034                 hdev->pkt_type = (__u16) dr.dev_opt;
2035                 mgmt_phy_configuration_changed(hdev, NULL);
2036                 break;
2037
2038         case HCISETACLMTU:
2039                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2040                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2041                 break;
2042
2043         case HCISETSCOMTU:
2044                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2045                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2046                 break;
2047
2048         default:
2049                 err = -EINVAL;
2050                 break;
2051         }
2052
2053 done:
2054         hci_dev_put(hdev);
2055         return err;
2056 }
2057
2058 int hci_get_dev_list(void __user *arg)
2059 {
2060         struct hci_dev *hdev;
2061         struct hci_dev_list_req *dl;
2062         struct hci_dev_req *dr;
2063         int n = 0, size, err;
2064         __u16 dev_num;
2065
2066         if (get_user(dev_num, (__u16 __user *) arg))
2067                 return -EFAULT;
2068
2069         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2070                 return -EINVAL;
2071
2072         size = sizeof(*dl) + dev_num * sizeof(*dr);
2073
2074         dl = kzalloc(size, GFP_KERNEL);
2075         if (!dl)
2076                 return -ENOMEM;
2077
2078         dr = dl->dev_req;
2079
2080         read_lock(&hci_dev_list_lock);
2081         list_for_each_entry(hdev, &hci_dev_list, list) {
2082                 unsigned long flags = hdev->flags;
2083
2084                 /* When the auto-off is configured it means the transport
2085                  * is running, but in that case still indicate that the
2086                  * device is actually down.
2087                  */
2088                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2089                         flags &= ~BIT(HCI_UP);
2090
2091                 (dr + n)->dev_id  = hdev->id;
2092                 (dr + n)->dev_opt = flags;
2093
2094                 if (++n >= dev_num)
2095                         break;
2096         }
2097         read_unlock(&hci_dev_list_lock);
2098
2099         dl->dev_num = n;
2100         size = sizeof(*dl) + n * sizeof(*dr);
2101
2102         err = copy_to_user(arg, dl, size);
2103         kfree(dl);
2104
2105         return err ? -EFAULT : 0;
2106 }
2107
2108 int hci_get_dev_info(void __user *arg)
2109 {
2110         struct hci_dev *hdev;
2111         struct hci_dev_info di;
2112         unsigned long flags;
2113         int err = 0;
2114
2115         if (copy_from_user(&di, arg, sizeof(di)))
2116                 return -EFAULT;
2117
2118         hdev = hci_dev_get(di.dev_id);
2119         if (!hdev)
2120                 return -ENODEV;
2121
2122         /* When the auto-off is configured it means the transport
2123          * is running, but in that case still indicate that the
2124          * device is actually down.
2125          */
2126         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2127                 flags = hdev->flags & ~BIT(HCI_UP);
2128         else
2129                 flags = hdev->flags;
2130
2131         strcpy(di.name, hdev->name);
2132         di.bdaddr   = hdev->bdaddr;
2133         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2134         di.flags    = flags;
2135         di.pkt_type = hdev->pkt_type;
2136         if (lmp_bredr_capable(hdev)) {
2137                 di.acl_mtu  = hdev->acl_mtu;
2138                 di.acl_pkts = hdev->acl_pkts;
2139                 di.sco_mtu  = hdev->sco_mtu;
2140                 di.sco_pkts = hdev->sco_pkts;
2141         } else {
2142                 di.acl_mtu  = hdev->le_mtu;
2143                 di.acl_pkts = hdev->le_pkts;
2144                 di.sco_mtu  = 0;
2145                 di.sco_pkts = 0;
2146         }
2147         di.link_policy = hdev->link_policy;
2148         di.link_mode   = hdev->link_mode;
2149
2150         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2151         memcpy(&di.features, &hdev->features, sizeof(di.features));
2152
2153         if (copy_to_user(arg, &di, sizeof(di)))
2154                 err = -EFAULT;
2155
2156         hci_dev_put(hdev);
2157
2158         return err;
2159 }
2160
2161 /* ---- Interface to HCI drivers ---- */
2162
2163 static int hci_rfkill_set_block(void *data, bool blocked)
2164 {
2165         struct hci_dev *hdev = data;
2166
2167         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2168
2169         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2170                 return -EBUSY;
2171
2172         if (blocked) {
2173                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2174                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2175                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2176                         hci_dev_do_close(hdev);
2177         } else {
2178                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2179         }
2180
2181         return 0;
2182 }
2183
2184 static const struct rfkill_ops hci_rfkill_ops = {
2185         .set_block = hci_rfkill_set_block,
2186 };
2187
2188 static void hci_power_on(struct work_struct *work)
2189 {
2190         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2191         int err;
2192
2193         BT_DBG("%s", hdev->name);
2194
2195         if (test_bit(HCI_UP, &hdev->flags) &&
2196             hci_dev_test_flag(hdev, HCI_MGMT) &&
2197             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2198                 cancel_delayed_work(&hdev->power_off);
2199                 hci_req_sync_lock(hdev);
2200                 err = __hci_req_hci_power_on(hdev);
2201                 hci_req_sync_unlock(hdev);
2202                 mgmt_power_on(hdev, err);
2203                 return;
2204         }
2205
2206         err = hci_dev_do_open(hdev);
2207         if (err < 0) {
2208                 hci_dev_lock(hdev);
2209                 mgmt_set_powered_failed(hdev, err);
2210                 hci_dev_unlock(hdev);
2211                 return;
2212         }
2213
2214         /* During the HCI setup phase, a few error conditions are
2215          * ignored and they need to be checked now. If they are still
2216          * valid, it is important to turn the device back off.
2217          */
2218         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2219             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2220             (hdev->dev_type == HCI_PRIMARY &&
2221              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2222              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2223                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2224                 hci_dev_do_close(hdev);
2225         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2226                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2227                                    HCI_AUTO_OFF_TIMEOUT);
2228         }
2229
2230         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2231                 /* For unconfigured devices, set the HCI_RAW flag
2232                  * so that userspace can easily identify them.
2233                  */
2234                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2235                         set_bit(HCI_RAW, &hdev->flags);
2236
2237                 /* For fully configured devices, this will send
2238                  * the Index Added event. For unconfigured devices,
2239                  * it will send Unconfigued Index Added event.
2240                  *
2241                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2242                  * and no event will be send.
2243                  */
2244                 mgmt_index_added(hdev);
2245         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2246                 /* When the controller is now configured, then it
2247                  * is important to clear the HCI_RAW flag.
2248                  */
2249                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2250                         clear_bit(HCI_RAW, &hdev->flags);
2251
2252                 /* Powering on the controller with HCI_CONFIG set only
2253                  * happens with the transition from unconfigured to
2254                  * configured. This will send the Index Added event.
2255                  */
2256                 mgmt_index_added(hdev);
2257         }
2258 }
2259
2260 static void hci_power_off(struct work_struct *work)
2261 {
2262         struct hci_dev *hdev = container_of(work, struct hci_dev,
2263                                             power_off.work);
2264
2265         BT_DBG("%s", hdev->name);
2266
2267         hci_dev_do_close(hdev);
2268 }
2269
2270 static void hci_error_reset(struct work_struct *work)
2271 {
2272         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2273
2274         BT_DBG("%s", hdev->name);
2275
2276         if (hdev->hw_error)
2277                 hdev->hw_error(hdev, hdev->hw_error_code);
2278         else
2279                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2280
2281         if (hci_dev_do_close(hdev))
2282                 return;
2283
2284         hci_dev_do_open(hdev);
2285 }
2286
2287 void hci_uuids_clear(struct hci_dev *hdev)
2288 {
2289         struct bt_uuid *uuid, *tmp;
2290
2291         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2292                 list_del(&uuid->list);
2293                 kfree(uuid);
2294         }
2295 }
2296
2297 void hci_link_keys_clear(struct hci_dev *hdev)
2298 {
2299         struct link_key *key;
2300
2301         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2302                 list_del_rcu(&key->list);
2303                 kfree_rcu(key, rcu);
2304         }
2305 }
2306
2307 void hci_smp_ltks_clear(struct hci_dev *hdev)
2308 {
2309         struct smp_ltk *k;
2310
2311         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2312                 list_del_rcu(&k->list);
2313                 kfree_rcu(k, rcu);
2314         }
2315 }
2316
2317 void hci_smp_irks_clear(struct hci_dev *hdev)
2318 {
2319         struct smp_irk *k;
2320
2321         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2322                 list_del_rcu(&k->list);
2323                 kfree_rcu(k, rcu);
2324         }
2325 }
2326
2327 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2328 {
2329         struct link_key *k;
2330
2331         rcu_read_lock();
2332         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2333                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2334                         rcu_read_unlock();
2335                         return k;
2336                 }
2337         }
2338         rcu_read_unlock();
2339
2340         return NULL;
2341 }
2342
2343 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2344                                u8 key_type, u8 old_key_type)
2345 {
2346         /* Legacy key */
2347         if (key_type < 0x03)
2348                 return true;
2349
2350         /* Debug keys are insecure so don't store them persistently */
2351         if (key_type == HCI_LK_DEBUG_COMBINATION)
2352                 return false;
2353
2354         /* Changed combination key and there's no previous one */
2355         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2356                 return false;
2357
2358         /* Security mode 3 case */
2359         if (!conn)
2360                 return true;
2361
2362         /* BR/EDR key derived using SC from an LE link */
2363         if (conn->type == LE_LINK)
2364                 return true;
2365
2366         /* Neither local nor remote side had no-bonding as requirement */
2367         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2368                 return true;
2369
2370         /* Local side had dedicated bonding as requirement */
2371         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2372                 return true;
2373
2374         /* Remote side had dedicated bonding as requirement */
2375         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2376                 return true;
2377
2378         /* If none of the above criteria match, then don't store the key
2379          * persistently */
2380         return false;
2381 }
2382
2383 static u8 ltk_role(u8 type)
2384 {
2385         if (type == SMP_LTK)
2386                 return HCI_ROLE_MASTER;
2387
2388         return HCI_ROLE_SLAVE;
2389 }
2390
2391 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2392                              u8 addr_type, u8 role)
2393 {
2394         struct smp_ltk *k;
2395
2396         rcu_read_lock();
2397         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2398                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2399                         continue;
2400
2401                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2402                         rcu_read_unlock();
2403                         return k;
2404                 }
2405         }
2406         rcu_read_unlock();
2407
2408         return NULL;
2409 }
2410
2411 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2412 {
2413         struct smp_irk *irk;
2414
2415         rcu_read_lock();
2416         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2417                 if (!bacmp(&irk->rpa, rpa)) {
2418                         rcu_read_unlock();
2419                         return irk;
2420                 }
2421         }
2422
2423         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2424                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2425                         bacpy(&irk->rpa, rpa);
2426                         rcu_read_unlock();
2427                         return irk;
2428                 }
2429         }
2430         rcu_read_unlock();
2431
2432         return NULL;
2433 }
2434
2435 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2436                                      u8 addr_type)
2437 {
2438         struct smp_irk *irk;
2439
2440         /* Identity Address must be public or static random */
2441         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2442                 return NULL;
2443
2444         rcu_read_lock();
2445         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2446                 if (addr_type == irk->addr_type &&
2447                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2448                         rcu_read_unlock();
2449                         return irk;
2450                 }
2451         }
2452         rcu_read_unlock();
2453
2454         return NULL;
2455 }
2456
2457 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2458                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2459                                   u8 pin_len, bool *persistent)
2460 {
2461         struct link_key *key, *old_key;
2462         u8 old_key_type;
2463
2464         old_key = hci_find_link_key(hdev, bdaddr);
2465         if (old_key) {
2466                 old_key_type = old_key->type;
2467                 key = old_key;
2468         } else {
2469                 old_key_type = conn ? conn->key_type : 0xff;
2470                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2471                 if (!key)
2472                         return NULL;
2473                 list_add_rcu(&key->list, &hdev->link_keys);
2474         }
2475
2476         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2477
2478         /* Some buggy controller combinations generate a changed
2479          * combination key for legacy pairing even when there's no
2480          * previous key */
2481         if (type == HCI_LK_CHANGED_COMBINATION &&
2482             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2483                 type = HCI_LK_COMBINATION;
2484                 if (conn)
2485                         conn->key_type = type;
2486         }
2487
2488         bacpy(&key->bdaddr, bdaddr);
2489         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2490         key->pin_len = pin_len;
2491
2492         if (type == HCI_LK_CHANGED_COMBINATION)
2493                 key->type = old_key_type;
2494         else
2495                 key->type = type;
2496
2497         if (persistent)
2498                 *persistent = hci_persistent_key(hdev, conn, type,
2499                                                  old_key_type);
2500
2501         return key;
2502 }
2503
2504 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2505                             u8 addr_type, u8 type, u8 authenticated,
2506                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2507 {
2508         struct smp_ltk *key, *old_key;
2509         u8 role = ltk_role(type);
2510
2511         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2512         if (old_key)
2513                 key = old_key;
2514         else {
2515                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2516                 if (!key)
2517                         return NULL;
2518                 list_add_rcu(&key->list, &hdev->long_term_keys);
2519         }
2520
2521         bacpy(&key->bdaddr, bdaddr);
2522         key->bdaddr_type = addr_type;
2523         memcpy(key->val, tk, sizeof(key->val));
2524         key->authenticated = authenticated;
2525         key->ediv = ediv;
2526         key->rand = rand;
2527         key->enc_size = enc_size;
2528         key->type = type;
2529
2530         return key;
2531 }
2532
2533 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2534                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2535 {
2536         struct smp_irk *irk;
2537
2538         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2539         if (!irk) {
2540                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2541                 if (!irk)
2542                         return NULL;
2543
2544                 bacpy(&irk->bdaddr, bdaddr);
2545                 irk->addr_type = addr_type;
2546
2547                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2548         }
2549
2550         memcpy(irk->val, val, 16);
2551         bacpy(&irk->rpa, rpa);
2552
2553         return irk;
2554 }
2555
2556 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2557 {
2558         struct link_key *key;
2559
2560         key = hci_find_link_key(hdev, bdaddr);
2561         if (!key)
2562                 return -ENOENT;
2563
2564         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2565
2566         list_del_rcu(&key->list);
2567         kfree_rcu(key, rcu);
2568
2569         return 0;
2570 }
2571
2572 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2573 {
2574         struct smp_ltk *k;
2575         int removed = 0;
2576
2577         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2578                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2579                         continue;
2580
2581                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2582
2583                 list_del_rcu(&k->list);
2584                 kfree_rcu(k, rcu);
2585                 removed++;
2586         }
2587
2588         return removed ? 0 : -ENOENT;
2589 }
2590
2591 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2592 {
2593         struct smp_irk *k;
2594
2595         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2596                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2597                         continue;
2598
2599                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2600
2601                 list_del_rcu(&k->list);
2602                 kfree_rcu(k, rcu);
2603         }
2604 }
2605
2606 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2607 {
2608         struct smp_ltk *k;
2609         struct smp_irk *irk;
2610         u8 addr_type;
2611
2612         if (type == BDADDR_BREDR) {
2613                 if (hci_find_link_key(hdev, bdaddr))
2614                         return true;
2615                 return false;
2616         }
2617
2618         /* Convert to HCI addr type which struct smp_ltk uses */
2619         if (type == BDADDR_LE_PUBLIC)
2620                 addr_type = ADDR_LE_DEV_PUBLIC;
2621         else
2622                 addr_type = ADDR_LE_DEV_RANDOM;
2623
2624         irk = hci_get_irk(hdev, bdaddr, addr_type);
2625         if (irk) {
2626                 bdaddr = &irk->bdaddr;
2627                 addr_type = irk->addr_type;
2628         }
2629
2630         rcu_read_lock();
2631         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2632                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2633                         rcu_read_unlock();
2634                         return true;
2635                 }
2636         }
2637         rcu_read_unlock();
2638
2639         return false;
2640 }
2641
2642 /* HCI command timer function */
2643 static void hci_cmd_timeout(struct work_struct *work)
2644 {
2645         struct hci_dev *hdev = container_of(work, struct hci_dev,
2646                                             cmd_timer.work);
2647
2648         if (hdev->sent_cmd) {
2649                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2650                 u16 opcode = __le16_to_cpu(sent->opcode);
2651
2652                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2653         } else {
2654                 bt_dev_err(hdev, "command tx timeout");
2655         }
2656
2657         if (hdev->cmd_timeout)
2658                 hdev->cmd_timeout(hdev);
2659
2660         atomic_set(&hdev->cmd_cnt, 1);
2661         queue_work(hdev->workqueue, &hdev->cmd_work);
2662 }
2663
2664 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2665                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2666 {
2667         struct oob_data *data;
2668
2669         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2670                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2671                         continue;
2672                 if (data->bdaddr_type != bdaddr_type)
2673                         continue;
2674                 return data;
2675         }
2676
2677         return NULL;
2678 }
2679
2680 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2681                                u8 bdaddr_type)
2682 {
2683         struct oob_data *data;
2684
2685         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2686         if (!data)
2687                 return -ENOENT;
2688
2689         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2690
2691         list_del(&data->list);
2692         kfree(data);
2693
2694         return 0;
2695 }
2696
2697 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2698 {
2699         struct oob_data *data, *n;
2700
2701         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2702                 list_del(&data->list);
2703                 kfree(data);
2704         }
2705 }
2706
2707 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2708                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2709                             u8 *hash256, u8 *rand256)
2710 {
2711         struct oob_data *data;
2712
2713         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2714         if (!data) {
2715                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2716                 if (!data)
2717                         return -ENOMEM;
2718
2719                 bacpy(&data->bdaddr, bdaddr);
2720                 data->bdaddr_type = bdaddr_type;
2721                 list_add(&data->list, &hdev->remote_oob_data);
2722         }
2723
2724         if (hash192 && rand192) {
2725                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2726                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2727                 if (hash256 && rand256)
2728                         data->present = 0x03;
2729         } else {
2730                 memset(data->hash192, 0, sizeof(data->hash192));
2731                 memset(data->rand192, 0, sizeof(data->rand192));
2732                 if (hash256 && rand256)
2733                         data->present = 0x02;
2734                 else
2735                         data->present = 0x00;
2736         }
2737
2738         if (hash256 && rand256) {
2739                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2740                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2741         } else {
2742                 memset(data->hash256, 0, sizeof(data->hash256));
2743                 memset(data->rand256, 0, sizeof(data->rand256));
2744                 if (hash192 && rand192)
2745                         data->present = 0x01;
2746         }
2747
2748         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2749
2750         return 0;
2751 }
2752
2753 /* This function requires the caller holds hdev->lock */
2754 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2755 {
2756         struct adv_info *adv_instance;
2757
2758         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2759                 if (adv_instance->instance == instance)
2760                         return adv_instance;
2761         }
2762
2763         return NULL;
2764 }
2765
2766 /* This function requires the caller holds hdev->lock */
2767 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2768 {
2769         struct adv_info *cur_instance;
2770
2771         cur_instance = hci_find_adv_instance(hdev, instance);
2772         if (!cur_instance)
2773                 return NULL;
2774
2775         if (cur_instance == list_last_entry(&hdev->adv_instances,
2776                                             struct adv_info, list))
2777                 return list_first_entry(&hdev->adv_instances,
2778                                                  struct adv_info, list);
2779         else
2780                 return list_next_entry(cur_instance, list);
2781 }
2782
2783 /* This function requires the caller holds hdev->lock */
2784 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2785 {
2786         struct adv_info *adv_instance;
2787
2788         adv_instance = hci_find_adv_instance(hdev, instance);
2789         if (!adv_instance)
2790                 return -ENOENT;
2791
2792         BT_DBG("%s removing %dMR", hdev->name, instance);
2793
2794         if (hdev->cur_adv_instance == instance) {
2795                 if (hdev->adv_instance_timeout) {
2796                         cancel_delayed_work(&hdev->adv_instance_expire);
2797                         hdev->adv_instance_timeout = 0;
2798                 }
2799                 hdev->cur_adv_instance = 0x00;
2800         }
2801
2802         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2803
2804         list_del(&adv_instance->list);
2805         kfree(adv_instance);
2806
2807         hdev->adv_instance_cnt--;
2808
2809         return 0;
2810 }
2811
2812 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2813 {
2814         struct adv_info *adv_instance, *n;
2815
2816         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2817                 adv_instance->rpa_expired = rpa_expired;
2818 }
2819
2820 /* This function requires the caller holds hdev->lock */
2821 void hci_adv_instances_clear(struct hci_dev *hdev)
2822 {
2823         struct adv_info *adv_instance, *n;
2824
2825         if (hdev->adv_instance_timeout) {
2826                 cancel_delayed_work(&hdev->adv_instance_expire);
2827                 hdev->adv_instance_timeout = 0;
2828         }
2829
2830         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2831                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2832                 list_del(&adv_instance->list);
2833                 kfree(adv_instance);
2834         }
2835
2836         hdev->adv_instance_cnt = 0;
2837         hdev->cur_adv_instance = 0x00;
2838 }
2839
2840 static void adv_instance_rpa_expired(struct work_struct *work)
2841 {
2842         struct adv_info *adv_instance = container_of(work, struct adv_info,
2843                                                      rpa_expired_cb.work);
2844
2845         BT_DBG("");
2846
2847         adv_instance->rpa_expired = true;
2848 }
2849
2850 /* This function requires the caller holds hdev->lock */
2851 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2852                          u16 adv_data_len, u8 *adv_data,
2853                          u16 scan_rsp_len, u8 *scan_rsp_data,
2854                          u16 timeout, u16 duration)
2855 {
2856         struct adv_info *adv_instance;
2857
2858         adv_instance = hci_find_adv_instance(hdev, instance);
2859         if (adv_instance) {
2860                 memset(adv_instance->adv_data, 0,
2861                        sizeof(adv_instance->adv_data));
2862                 memset(adv_instance->scan_rsp_data, 0,
2863                        sizeof(adv_instance->scan_rsp_data));
2864         } else {
2865                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2866                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2867                         return -EOVERFLOW;
2868
2869                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2870                 if (!adv_instance)
2871                         return -ENOMEM;
2872
2873                 adv_instance->pending = true;
2874                 adv_instance->instance = instance;
2875                 list_add(&adv_instance->list, &hdev->adv_instances);
2876                 hdev->adv_instance_cnt++;
2877         }
2878
2879         adv_instance->flags = flags;
2880         adv_instance->adv_data_len = adv_data_len;
2881         adv_instance->scan_rsp_len = scan_rsp_len;
2882
2883         if (adv_data_len)
2884                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2885
2886         if (scan_rsp_len)
2887                 memcpy(adv_instance->scan_rsp_data,
2888                        scan_rsp_data, scan_rsp_len);
2889
2890         adv_instance->timeout = timeout;
2891         adv_instance->remaining_time = timeout;
2892
2893         if (duration == 0)
2894                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2895         else
2896                 adv_instance->duration = duration;
2897
2898         adv_instance->tx_power = HCI_TX_POWER_INVALID;
2899
2900         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2901                           adv_instance_rpa_expired);
2902
2903         BT_DBG("%s for %dMR", hdev->name, instance);
2904
2905         return 0;
2906 }
2907
2908 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2909                                          bdaddr_t *bdaddr, u8 type)
2910 {
2911         struct bdaddr_list *b;
2912
2913         list_for_each_entry(b, bdaddr_list, list) {
2914                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2915                         return b;
2916         }
2917
2918         return NULL;
2919 }
2920
2921 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2922                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2923                                 u8 type)
2924 {
2925         struct bdaddr_list_with_irk *b;
2926
2927         list_for_each_entry(b, bdaddr_list, list) {
2928                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2929                         return b;
2930         }
2931
2932         return NULL;
2933 }
2934
2935 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2936 {
2937         struct bdaddr_list *b, *n;
2938
2939         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2940                 list_del(&b->list);
2941                 kfree(b);
2942         }
2943 }
2944
2945 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2946 {
2947         struct bdaddr_list *entry;
2948
2949         if (!bacmp(bdaddr, BDADDR_ANY))
2950                 return -EBADF;
2951
2952         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2953                 return -EEXIST;
2954
2955         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2956         if (!entry)
2957                 return -ENOMEM;
2958
2959         bacpy(&entry->bdaddr, bdaddr);
2960         entry->bdaddr_type = type;
2961
2962         list_add(&entry->list, list);
2963
2964         return 0;
2965 }
2966
2967 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2968                                         u8 type, u8 *peer_irk, u8 *local_irk)
2969 {
2970         struct bdaddr_list_with_irk *entry;
2971
2972         if (!bacmp(bdaddr, BDADDR_ANY))
2973                 return -EBADF;
2974
2975         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2976                 return -EEXIST;
2977
2978         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2979         if (!entry)
2980                 return -ENOMEM;
2981
2982         bacpy(&entry->bdaddr, bdaddr);
2983         entry->bdaddr_type = type;
2984
2985         if (peer_irk)
2986                 memcpy(entry->peer_irk, peer_irk, 16);
2987
2988         if (local_irk)
2989                 memcpy(entry->local_irk, local_irk, 16);
2990
2991         list_add(&entry->list, list);
2992
2993         return 0;
2994 }
2995
2996 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2997 {
2998         struct bdaddr_list *entry;
2999
3000         if (!bacmp(bdaddr, BDADDR_ANY)) {
3001                 hci_bdaddr_list_clear(list);
3002                 return 0;
3003         }
3004
3005         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3006         if (!entry)
3007                 return -ENOENT;
3008
3009         list_del(&entry->list);
3010         kfree(entry);
3011
3012         return 0;
3013 }
3014
3015 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3016                                                         u8 type)
3017 {
3018         struct bdaddr_list_with_irk *entry;
3019
3020         if (!bacmp(bdaddr, BDADDR_ANY)) {
3021                 hci_bdaddr_list_clear(list);
3022                 return 0;
3023         }
3024
3025         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3026         if (!entry)
3027                 return -ENOENT;
3028
3029         list_del(&entry->list);
3030         kfree(entry);
3031
3032         return 0;
3033 }
3034
3035 /* This function requires the caller holds hdev->lock */
3036 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3037                                                bdaddr_t *addr, u8 addr_type)
3038 {
3039         struct hci_conn_params *params;
3040
3041         list_for_each_entry(params, &hdev->le_conn_params, list) {
3042                 if (bacmp(&params->addr, addr) == 0 &&
3043                     params->addr_type == addr_type) {
3044                         return params;
3045                 }
3046         }
3047
3048         return NULL;
3049 }
3050
3051 /* This function requires the caller holds hdev->lock */
3052 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3053                                                   bdaddr_t *addr, u8 addr_type)
3054 {
3055         struct hci_conn_params *param;
3056
3057         list_for_each_entry(param, list, action) {
3058                 if (bacmp(&param->addr, addr) == 0 &&
3059                     param->addr_type == addr_type)
3060                         return param;
3061         }
3062
3063         return NULL;
3064 }
3065
3066 /* This function requires the caller holds hdev->lock */
3067 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3068                                             bdaddr_t *addr, u8 addr_type)
3069 {
3070         struct hci_conn_params *params;
3071
3072         params = hci_conn_params_lookup(hdev, addr, addr_type);
3073         if (params)
3074                 return params;
3075
3076         params = kzalloc(sizeof(*params), GFP_KERNEL);
3077         if (!params) {
3078                 bt_dev_err(hdev, "out of memory");
3079                 return NULL;
3080         }
3081
3082         bacpy(&params->addr, addr);
3083         params->addr_type = addr_type;
3084
3085         list_add(&params->list, &hdev->le_conn_params);
3086         INIT_LIST_HEAD(&params->action);
3087
3088         params->conn_min_interval = hdev->le_conn_min_interval;
3089         params->conn_max_interval = hdev->le_conn_max_interval;
3090         params->conn_latency = hdev->le_conn_latency;
3091         params->supervision_timeout = hdev->le_supv_timeout;
3092         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3093
3094         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3095
3096         return params;
3097 }
3098
3099 static void hci_conn_params_free(struct hci_conn_params *params)
3100 {
3101         if (params->conn) {
3102                 hci_conn_drop(params->conn);
3103                 hci_conn_put(params->conn);
3104         }
3105
3106         list_del(&params->action);
3107         list_del(&params->list);
3108         kfree(params);
3109 }
3110
3111 /* This function requires the caller holds hdev->lock */
3112 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3113 {
3114         struct hci_conn_params *params;
3115
3116         params = hci_conn_params_lookup(hdev, addr, addr_type);
3117         if (!params)
3118                 return;
3119
3120         hci_conn_params_free(params);
3121
3122         hci_update_background_scan(hdev);
3123
3124         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3125 }
3126
3127 /* This function requires the caller holds hdev->lock */
3128 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3129 {
3130         struct hci_conn_params *params, *tmp;
3131
3132         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3133                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3134                         continue;
3135
3136                 /* If trying to estabilish one time connection to disabled
3137                  * device, leave the params, but mark them as just once.
3138                  */
3139                 if (params->explicit_connect) {
3140                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3141                         continue;
3142                 }
3143
3144                 list_del(&params->list);
3145                 kfree(params);
3146         }
3147
3148         BT_DBG("All LE disabled connection parameters were removed");
3149 }
3150
3151 /* This function requires the caller holds hdev->lock */
3152 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3153 {
3154         struct hci_conn_params *params, *tmp;
3155
3156         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3157                 hci_conn_params_free(params);
3158
3159         BT_DBG("All LE connection parameters were removed");
3160 }
3161
3162 /* Copy the Identity Address of the controller.
3163  *
3164  * If the controller has a public BD_ADDR, then by default use that one.
3165  * If this is a LE only controller without a public address, default to
3166  * the static random address.
3167  *
3168  * For debugging purposes it is possible to force controllers with a
3169  * public address to use the static random address instead.
3170  *
3171  * In case BR/EDR has been disabled on a dual-mode controller and
3172  * userspace has configured a static address, then that address
3173  * becomes the identity address instead of the public BR/EDR address.
3174  */
3175 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3176                                u8 *bdaddr_type)
3177 {
3178         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3179             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3180             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3181              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3182                 bacpy(bdaddr, &hdev->static_addr);
3183                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3184         } else {
3185                 bacpy(bdaddr, &hdev->bdaddr);
3186                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3187         }
3188 }
3189
3190 /* Alloc HCI device */
3191 struct hci_dev *hci_alloc_dev(void)
3192 {
3193         struct hci_dev *hdev;
3194
3195         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3196         if (!hdev)
3197                 return NULL;
3198
3199         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3200         hdev->esco_type = (ESCO_HV1);
3201         hdev->link_mode = (HCI_LM_ACCEPT);
3202         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3203         hdev->io_capability = 0x03;     /* No Input No Output */
3204         hdev->manufacturer = 0xffff;    /* Default to internal use */
3205         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3206         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3207         hdev->adv_instance_cnt = 0;
3208         hdev->cur_adv_instance = 0x00;
3209         hdev->adv_instance_timeout = 0;
3210
3211         hdev->sniff_max_interval = 800;
3212         hdev->sniff_min_interval = 80;
3213
3214         hdev->le_adv_channel_map = 0x07;
3215         hdev->le_adv_min_interval = 0x0800;
3216         hdev->le_adv_max_interval = 0x0800;
3217         hdev->le_scan_interval = 0x0060;
3218         hdev->le_scan_window = 0x0030;
3219         hdev->le_conn_min_interval = 0x0018;
3220         hdev->le_conn_max_interval = 0x0028;
3221         hdev->le_conn_latency = 0x0000;
3222         hdev->le_supv_timeout = 0x002a;
3223         hdev->le_def_tx_len = 0x001b;
3224         hdev->le_def_tx_time = 0x0148;
3225         hdev->le_max_tx_len = 0x001b;
3226         hdev->le_max_tx_time = 0x0148;
3227         hdev->le_max_rx_len = 0x001b;
3228         hdev->le_max_rx_time = 0x0148;
3229         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3230         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3231         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3232         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3233         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3234
3235         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3236         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3237         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3238         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3239         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3240         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3241
3242         mutex_init(&hdev->lock);
3243         mutex_init(&hdev->req_lock);
3244
3245         INIT_LIST_HEAD(&hdev->mgmt_pending);
3246         INIT_LIST_HEAD(&hdev->blacklist);
3247         INIT_LIST_HEAD(&hdev->whitelist);
3248         INIT_LIST_HEAD(&hdev->uuids);
3249         INIT_LIST_HEAD(&hdev->link_keys);
3250         INIT_LIST_HEAD(&hdev->long_term_keys);
3251         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3252         INIT_LIST_HEAD(&hdev->remote_oob_data);
3253         INIT_LIST_HEAD(&hdev->le_white_list);
3254         INIT_LIST_HEAD(&hdev->le_resolv_list);
3255         INIT_LIST_HEAD(&hdev->le_conn_params);
3256         INIT_LIST_HEAD(&hdev->pend_le_conns);
3257         INIT_LIST_HEAD(&hdev->pend_le_reports);
3258         INIT_LIST_HEAD(&hdev->conn_hash.list);
3259         INIT_LIST_HEAD(&hdev->adv_instances);
3260
3261         INIT_WORK(&hdev->rx_work, hci_rx_work);
3262         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3263         INIT_WORK(&hdev->tx_work, hci_tx_work);
3264         INIT_WORK(&hdev->power_on, hci_power_on);
3265         INIT_WORK(&hdev->error_reset, hci_error_reset);
3266
3267         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3268
3269         skb_queue_head_init(&hdev->rx_q);
3270         skb_queue_head_init(&hdev->cmd_q);
3271         skb_queue_head_init(&hdev->raw_q);
3272
3273         init_waitqueue_head(&hdev->req_wait_q);
3274
3275         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3276
3277         hci_request_setup(hdev);
3278
3279         hci_init_sysfs(hdev);
3280         discovery_init(hdev);
3281
3282         return hdev;
3283 }
3284 EXPORT_SYMBOL(hci_alloc_dev);
3285
3286 /* Free HCI device */
3287 void hci_free_dev(struct hci_dev *hdev)
3288 {
3289         /* will free via device release */
3290         put_device(&hdev->dev);
3291 }
3292 EXPORT_SYMBOL(hci_free_dev);
3293
3294 /* Register HCI device */
3295 int hci_register_dev(struct hci_dev *hdev)
3296 {
3297         int id, error;
3298
3299         if (!hdev->open || !hdev->close || !hdev->send)
3300                 return -EINVAL;
3301
3302         /* Do not allow HCI_AMP devices to register at index 0,
3303          * so the index can be used as the AMP controller ID.
3304          */
3305         switch (hdev->dev_type) {
3306         case HCI_PRIMARY:
3307                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3308                 break;
3309         case HCI_AMP:
3310                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3311                 break;
3312         default:
3313                 return -EINVAL;
3314         }
3315
3316         if (id < 0)
3317                 return id;
3318
3319         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3320         hdev->id = id;
3321
3322         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3323
3324         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3325         if (!hdev->workqueue) {
3326                 error = -ENOMEM;
3327                 goto err;
3328         }
3329
3330         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3331                                                       hdev->name);
3332         if (!hdev->req_workqueue) {
3333                 destroy_workqueue(hdev->workqueue);
3334                 error = -ENOMEM;
3335                 goto err;
3336         }
3337
3338         if (!IS_ERR_OR_NULL(bt_debugfs))
3339                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3340
3341         dev_set_name(&hdev->dev, "%s", hdev->name);
3342
3343         error = device_add(&hdev->dev);
3344         if (error < 0)
3345                 goto err_wqueue;
3346
3347         hci_leds_init(hdev);
3348
3349         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3350                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3351                                     hdev);
3352         if (hdev->rfkill) {
3353                 if (rfkill_register(hdev->rfkill) < 0) {
3354                         rfkill_destroy(hdev->rfkill);
3355                         hdev->rfkill = NULL;
3356                 }
3357         }
3358
3359         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3360                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3361
3362         hci_dev_set_flag(hdev, HCI_SETUP);
3363         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3364
3365         if (hdev->dev_type == HCI_PRIMARY) {
3366                 /* Assume BR/EDR support until proven otherwise (such as
3367                  * through reading supported features during init.
3368                  */
3369                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3370         }
3371
3372         write_lock(&hci_dev_list_lock);
3373         list_add(&hdev->list, &hci_dev_list);
3374         write_unlock(&hci_dev_list_lock);
3375
3376         /* Devices that are marked for raw-only usage are unconfigured
3377          * and should not be included in normal operation.
3378          */
3379         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3380                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3381
3382         hci_sock_dev_event(hdev, HCI_DEV_REG);
3383         hci_dev_hold(hdev);
3384
3385         queue_work(hdev->req_workqueue, &hdev->power_on);
3386
3387         return id;
3388
3389 err_wqueue:
3390         debugfs_remove_recursive(hdev->debugfs);
3391         destroy_workqueue(hdev->workqueue);
3392         destroy_workqueue(hdev->req_workqueue);
3393 err:
3394         ida_simple_remove(&hci_index_ida, hdev->id);
3395
3396         return error;
3397 }
3398 EXPORT_SYMBOL(hci_register_dev);
3399
3400 /* Unregister HCI device */
3401 void hci_unregister_dev(struct hci_dev *hdev)
3402 {
3403         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3404
3405         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3406
3407         write_lock(&hci_dev_list_lock);
3408         list_del(&hdev->list);
3409         write_unlock(&hci_dev_list_lock);
3410
3411         cancel_work_sync(&hdev->power_on);
3412
3413         hci_dev_do_close(hdev);
3414
3415         if (!test_bit(HCI_INIT, &hdev->flags) &&
3416             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3417             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3418                 hci_dev_lock(hdev);
3419                 mgmt_index_removed(hdev);
3420                 hci_dev_unlock(hdev);
3421         }
3422
3423         /* mgmt_index_removed should take care of emptying the
3424          * pending list */
3425         BUG_ON(!list_empty(&hdev->mgmt_pending));
3426
3427         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3428
3429         if (hdev->rfkill) {
3430                 rfkill_unregister(hdev->rfkill);
3431                 rfkill_destroy(hdev->rfkill);
3432         }
3433
3434         device_del(&hdev->dev);
3435         /* Actual cleanup is deferred until hci_cleanup_dev(). */
3436         hci_dev_put(hdev);
3437 }
3438 EXPORT_SYMBOL(hci_unregister_dev);
3439
3440 /* Cleanup HCI device */
3441 void hci_cleanup_dev(struct hci_dev *hdev)
3442 {
3443         debugfs_remove_recursive(hdev->debugfs);
3444         kfree_const(hdev->hw_info);
3445         kfree_const(hdev->fw_info);
3446
3447         destroy_workqueue(hdev->workqueue);
3448         destroy_workqueue(hdev->req_workqueue);
3449
3450         hci_dev_lock(hdev);
3451         hci_bdaddr_list_clear(&hdev->blacklist);
3452         hci_bdaddr_list_clear(&hdev->whitelist);
3453         hci_uuids_clear(hdev);
3454         hci_link_keys_clear(hdev);
3455         hci_smp_ltks_clear(hdev);
3456         hci_smp_irks_clear(hdev);
3457         hci_remote_oob_data_clear(hdev);
3458         hci_adv_instances_clear(hdev);
3459         hci_bdaddr_list_clear(&hdev->le_white_list);
3460         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3461         hci_conn_params_clear_all(hdev);
3462         hci_discovery_filter_clear(hdev);
3463         hci_dev_unlock(hdev);
3464
3465         ida_simple_remove(&hci_index_ida, hdev->id);
3466 }
3467
3468 /* Suspend HCI device */
3469 int hci_suspend_dev(struct hci_dev *hdev)
3470 {
3471         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3472         return 0;
3473 }
3474 EXPORT_SYMBOL(hci_suspend_dev);
3475
3476 /* Resume HCI device */
3477 int hci_resume_dev(struct hci_dev *hdev)
3478 {
3479         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3480         return 0;
3481 }
3482 EXPORT_SYMBOL(hci_resume_dev);
3483
3484 /* Reset HCI device */
3485 int hci_reset_dev(struct hci_dev *hdev)
3486 {
3487         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3488         struct sk_buff *skb;
3489
3490         skb = bt_skb_alloc(3, GFP_ATOMIC);
3491         if (!skb)
3492                 return -ENOMEM;
3493
3494         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3495         skb_put_data(skb, hw_err, 3);
3496
3497         /* Send Hardware Error to upper stack */
3498         return hci_recv_frame(hdev, skb);
3499 }
3500 EXPORT_SYMBOL(hci_reset_dev);
3501
3502 /* Receive frame from HCI drivers */
3503 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3504 {
3505         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3506                       && !test_bit(HCI_INIT, &hdev->flags))) {
3507                 kfree_skb(skb);
3508                 return -ENXIO;
3509         }
3510
3511         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3512             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3513             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3514                 kfree_skb(skb);
3515                 return -EINVAL;
3516         }
3517
3518         /* Incoming skb */
3519         bt_cb(skb)->incoming = 1;
3520
3521         /* Time stamp */
3522         __net_timestamp(skb);
3523
3524         skb_queue_tail(&hdev->rx_q, skb);
3525         queue_work(hdev->workqueue, &hdev->rx_work);
3526
3527         return 0;
3528 }
3529 EXPORT_SYMBOL(hci_recv_frame);
3530
3531 /* Receive diagnostic message from HCI drivers */
3532 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3533 {
3534         /* Mark as diagnostic packet */
3535         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3536
3537         /* Time stamp */
3538         __net_timestamp(skb);
3539
3540         skb_queue_tail(&hdev->rx_q, skb);
3541         queue_work(hdev->workqueue, &hdev->rx_work);
3542
3543         return 0;
3544 }
3545 EXPORT_SYMBOL(hci_recv_diag);
3546
3547 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3548 {
3549         va_list vargs;
3550
3551         va_start(vargs, fmt);
3552         kfree_const(hdev->hw_info);
3553         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3554         va_end(vargs);
3555 }
3556 EXPORT_SYMBOL(hci_set_hw_info);
3557
3558 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3559 {
3560         va_list vargs;
3561
3562         va_start(vargs, fmt);
3563         kfree_const(hdev->fw_info);
3564         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3565         va_end(vargs);
3566 }
3567 EXPORT_SYMBOL(hci_set_fw_info);
3568
3569 /* ---- Interface to upper protocols ---- */
3570
3571 int hci_register_cb(struct hci_cb *cb)
3572 {
3573         BT_DBG("%p name %s", cb, cb->name);
3574
3575         mutex_lock(&hci_cb_list_lock);
3576         list_add_tail(&cb->list, &hci_cb_list);
3577         mutex_unlock(&hci_cb_list_lock);
3578
3579         return 0;
3580 }
3581 EXPORT_SYMBOL(hci_register_cb);
3582
3583 int hci_unregister_cb(struct hci_cb *cb)
3584 {
3585         BT_DBG("%p name %s", cb, cb->name);
3586
3587         mutex_lock(&hci_cb_list_lock);
3588         list_del(&cb->list);
3589         mutex_unlock(&hci_cb_list_lock);
3590
3591         return 0;
3592 }
3593 EXPORT_SYMBOL(hci_unregister_cb);
3594
3595 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3596 {
3597         int err;
3598
3599         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3600                skb->len);
3601
3602         /* Time stamp */
3603         __net_timestamp(skb);
3604
3605         /* Send copy to monitor */
3606         hci_send_to_monitor(hdev, skb);
3607
3608         if (atomic_read(&hdev->promisc)) {
3609                 /* Send copy to the sockets */
3610                 hci_send_to_sock(hdev, skb);
3611         }
3612
3613         /* Get rid of skb owner, prior to sending to the driver. */
3614         skb_orphan(skb);
3615
3616         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3617                 kfree_skb(skb);
3618                 return;
3619         }
3620
3621         err = hdev->send(hdev, skb);
3622         if (err < 0) {
3623                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3624                 kfree_skb(skb);
3625         }
3626 }
3627
3628 /* Send HCI command */
3629 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3630                  const void *param)
3631 {
3632         struct sk_buff *skb;
3633
3634         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3635
3636         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3637         if (!skb) {
3638                 bt_dev_err(hdev, "no memory for command");
3639                 return -ENOMEM;
3640         }
3641
3642         /* Stand-alone HCI commands must be flagged as
3643          * single-command requests.
3644          */
3645         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3646
3647         skb_queue_tail(&hdev->cmd_q, skb);
3648         queue_work(hdev->workqueue, &hdev->cmd_work);
3649
3650         return 0;
3651 }
3652
3653 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3654                    const void *param)
3655 {
3656         struct sk_buff *skb;
3657
3658         if (hci_opcode_ogf(opcode) != 0x3f) {
3659                 /* A controller receiving a command shall respond with either
3660                  * a Command Status Event or a Command Complete Event.
3661                  * Therefore, all standard HCI commands must be sent via the
3662                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3663                  * Some vendors do not comply with this rule for vendor-specific
3664                  * commands and do not return any event. We want to support
3665                  * unresponded commands for such cases only.
3666                  */
3667                 bt_dev_err(hdev, "unresponded command not supported");
3668                 return -EINVAL;
3669         }
3670
3671         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3672         if (!skb) {
3673                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3674                            opcode);
3675                 return -ENOMEM;
3676         }
3677
3678         hci_send_frame(hdev, skb);
3679
3680         return 0;
3681 }
3682 EXPORT_SYMBOL(__hci_cmd_send);
3683
3684 /* Get data from the previously sent command */
3685 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3686 {
3687         struct hci_command_hdr *hdr;
3688
3689         if (!hdev->sent_cmd)
3690                 return NULL;
3691
3692         hdr = (void *) hdev->sent_cmd->data;
3693
3694         if (hdr->opcode != cpu_to_le16(opcode))
3695                 return NULL;
3696
3697         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3698
3699         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3700 }
3701
3702 /* Send HCI command and wait for command commplete event */
3703 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3704                              const void *param, u32 timeout)
3705 {
3706         struct sk_buff *skb;
3707
3708         if (!test_bit(HCI_UP, &hdev->flags))
3709                 return ERR_PTR(-ENETDOWN);
3710
3711         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3712
3713         hci_req_sync_lock(hdev);
3714         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3715         hci_req_sync_unlock(hdev);
3716
3717         return skb;
3718 }
3719 EXPORT_SYMBOL(hci_cmd_sync);
3720
3721 /* Send ACL data */
3722 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3723 {
3724         struct hci_acl_hdr *hdr;
3725         int len = skb->len;
3726
3727         skb_push(skb, HCI_ACL_HDR_SIZE);
3728         skb_reset_transport_header(skb);
3729         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3730         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3731         hdr->dlen   = cpu_to_le16(len);
3732 }
3733
3734 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3735                           struct sk_buff *skb, __u16 flags)
3736 {
3737         struct hci_conn *conn = chan->conn;
3738         struct hci_dev *hdev = conn->hdev;
3739         struct sk_buff *list;
3740
3741         skb->len = skb_headlen(skb);
3742         skb->data_len = 0;
3743
3744         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3745
3746         switch (hdev->dev_type) {
3747         case HCI_PRIMARY:
3748                 hci_add_acl_hdr(skb, conn->handle, flags);
3749                 break;
3750         case HCI_AMP:
3751                 hci_add_acl_hdr(skb, chan->handle, flags);
3752                 break;
3753         default:
3754                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3755                 return;
3756         }
3757
3758         list = skb_shinfo(skb)->frag_list;
3759         if (!list) {
3760                 /* Non fragmented */
3761                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3762
3763                 skb_queue_tail(queue, skb);
3764         } else {
3765                 /* Fragmented */
3766                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3767
3768                 skb_shinfo(skb)->frag_list = NULL;
3769
3770                 /* Queue all fragments atomically. We need to use spin_lock_bh
3771                  * here because of 6LoWPAN links, as there this function is
3772                  * called from softirq and using normal spin lock could cause
3773                  * deadlocks.
3774                  */
3775                 spin_lock_bh(&queue->lock);
3776
3777                 __skb_queue_tail(queue, skb);
3778
3779                 flags &= ~ACL_START;
3780                 flags |= ACL_CONT;
3781                 do {
3782                         skb = list; list = list->next;
3783
3784                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3785                         hci_add_acl_hdr(skb, conn->handle, flags);
3786
3787                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3788
3789                         __skb_queue_tail(queue, skb);
3790                 } while (list);
3791
3792                 spin_unlock_bh(&queue->lock);
3793         }
3794 }
3795
3796 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3797 {
3798         struct hci_dev *hdev = chan->conn->hdev;
3799
3800         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3801
3802         hci_queue_acl(chan, &chan->data_q, skb, flags);
3803
3804         queue_work(hdev->workqueue, &hdev->tx_work);
3805 }
3806
3807 /* Send SCO data */
3808 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3809 {
3810         struct hci_dev *hdev = conn->hdev;
3811         struct hci_sco_hdr hdr;
3812
3813         BT_DBG("%s len %d", hdev->name, skb->len);
3814
3815         hdr.handle = cpu_to_le16(conn->handle);
3816         hdr.dlen   = skb->len;
3817
3818         skb_push(skb, HCI_SCO_HDR_SIZE);
3819         skb_reset_transport_header(skb);
3820         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3821
3822         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3823
3824         skb_queue_tail(&conn->data_q, skb);
3825         queue_work(hdev->workqueue, &hdev->tx_work);
3826 }
3827
3828 /* ---- HCI TX task (outgoing data) ---- */
3829
3830 /* HCI Connection scheduler */
3831 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3832                                      int *quote)
3833 {
3834         struct hci_conn_hash *h = &hdev->conn_hash;
3835         struct hci_conn *conn = NULL, *c;
3836         unsigned int num = 0, min = ~0;
3837
3838         /* We don't have to lock device here. Connections are always
3839          * added and removed with TX task disabled. */
3840
3841         rcu_read_lock();
3842
3843         list_for_each_entry_rcu(c, &h->list, list) {
3844                 if (c->type != type || skb_queue_empty(&c->data_q))
3845                         continue;
3846
3847                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3848                         continue;
3849
3850                 num++;
3851
3852                 if (c->sent < min) {
3853                         min  = c->sent;
3854                         conn = c;
3855                 }
3856
3857                 if (hci_conn_num(hdev, type) == num)
3858                         break;
3859         }
3860
3861         rcu_read_unlock();
3862
3863         if (conn) {
3864                 int cnt, q;
3865
3866                 switch (conn->type) {
3867                 case ACL_LINK:
3868                         cnt = hdev->acl_cnt;
3869                         break;
3870                 case SCO_LINK:
3871                 case ESCO_LINK:
3872                         cnt = hdev->sco_cnt;
3873                         break;
3874                 case LE_LINK:
3875                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3876                         break;
3877                 default:
3878                         cnt = 0;
3879                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3880                 }
3881
3882                 q = cnt / num;
3883                 *quote = q ? q : 1;
3884         } else
3885                 *quote = 0;
3886
3887         BT_DBG("conn %p quote %d", conn, *quote);
3888         return conn;
3889 }
3890
3891 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3892 {
3893         struct hci_conn_hash *h = &hdev->conn_hash;
3894         struct hci_conn *c;
3895
3896         bt_dev_err(hdev, "link tx timeout");
3897
3898         rcu_read_lock();
3899
3900         /* Kill stalled connections */
3901         list_for_each_entry_rcu(c, &h->list, list) {
3902                 if (c->type == type && c->sent) {
3903                         bt_dev_err(hdev, "killing stalled connection %pMR",
3904                                    &c->dst);
3905                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3906                 }
3907         }
3908
3909         rcu_read_unlock();
3910 }
3911
3912 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3913                                       int *quote)
3914 {
3915         struct hci_conn_hash *h = &hdev->conn_hash;
3916         struct hci_chan *chan = NULL;
3917         unsigned int num = 0, min = ~0, cur_prio = 0;
3918         struct hci_conn *conn;
3919         int cnt, q, conn_num = 0;
3920
3921         BT_DBG("%s", hdev->name);
3922
3923         rcu_read_lock();
3924
3925         list_for_each_entry_rcu(conn, &h->list, list) {
3926                 struct hci_chan *tmp;
3927
3928                 if (conn->type != type)
3929                         continue;
3930
3931                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3932                         continue;
3933
3934                 conn_num++;
3935
3936                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3937                         struct sk_buff *skb;
3938
3939                         if (skb_queue_empty(&tmp->data_q))
3940                                 continue;
3941
3942                         skb = skb_peek(&tmp->data_q);
3943                         if (skb->priority < cur_prio)
3944                                 continue;
3945
3946                         if (skb->priority > cur_prio) {
3947                                 num = 0;
3948                                 min = ~0;
3949                                 cur_prio = skb->priority;
3950                         }
3951
3952                         num++;
3953
3954                         if (conn->sent < min) {
3955                                 min  = conn->sent;
3956                                 chan = tmp;
3957                         }
3958                 }
3959
3960                 if (hci_conn_num(hdev, type) == conn_num)
3961                         break;
3962         }
3963
3964         rcu_read_unlock();
3965
3966         if (!chan)
3967                 return NULL;
3968
3969         switch (chan->conn->type) {
3970         case ACL_LINK:
3971                 cnt = hdev->acl_cnt;
3972                 break;
3973         case AMP_LINK:
3974                 cnt = hdev->block_cnt;
3975                 break;
3976         case SCO_LINK:
3977         case ESCO_LINK:
3978                 cnt = hdev->sco_cnt;
3979                 break;
3980         case LE_LINK:
3981                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3982                 break;
3983         default:
3984                 cnt = 0;
3985                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3986         }
3987
3988         q = cnt / num;
3989         *quote = q ? q : 1;
3990         BT_DBG("chan %p quote %d", chan, *quote);
3991         return chan;
3992 }
3993
3994 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3995 {
3996         struct hci_conn_hash *h = &hdev->conn_hash;
3997         struct hci_conn *conn;
3998         int num = 0;
3999
4000         BT_DBG("%s", hdev->name);
4001
4002         rcu_read_lock();
4003
4004         list_for_each_entry_rcu(conn, &h->list, list) {
4005                 struct hci_chan *chan;
4006
4007                 if (conn->type != type)
4008                         continue;
4009
4010                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4011                         continue;
4012
4013                 num++;
4014
4015                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4016                         struct sk_buff *skb;
4017
4018                         if (chan->sent) {
4019                                 chan->sent = 0;
4020                                 continue;
4021                         }
4022
4023                         if (skb_queue_empty(&chan->data_q))
4024                                 continue;
4025
4026                         skb = skb_peek(&chan->data_q);
4027                         if (skb->priority >= HCI_PRIO_MAX - 1)
4028                                 continue;
4029
4030                         skb->priority = HCI_PRIO_MAX - 1;
4031
4032                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4033                                skb->priority);
4034                 }
4035
4036                 if (hci_conn_num(hdev, type) == num)
4037                         break;
4038         }
4039
4040         rcu_read_unlock();
4041
4042 }
4043
4044 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4045 {
4046         /* Calculate count of blocks used by this packet */
4047         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4048 }
4049
4050 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4051 {
4052         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4053                 /* ACL tx timeout must be longer than maximum
4054                  * link supervision timeout (40.9 seconds) */
4055                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4056                                        HCI_ACL_TX_TIMEOUT))
4057                         hci_link_tx_to(hdev, ACL_LINK);
4058         }
4059 }
4060
4061 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4062 {
4063         unsigned int cnt = hdev->acl_cnt;
4064         struct hci_chan *chan;
4065         struct sk_buff *skb;
4066         int quote;
4067
4068         __check_timeout(hdev, cnt);
4069
4070         while (hdev->acl_cnt &&
4071                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4072                 u32 priority = (skb_peek(&chan->data_q))->priority;
4073                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4074                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4075                                skb->len, skb->priority);
4076
4077                         /* Stop if priority has changed */
4078                         if (skb->priority < priority)
4079                                 break;
4080
4081                         skb = skb_dequeue(&chan->data_q);
4082
4083                         hci_conn_enter_active_mode(chan->conn,
4084                                                    bt_cb(skb)->force_active);
4085
4086                         hci_send_frame(hdev, skb);
4087                         hdev->acl_last_tx = jiffies;
4088
4089                         hdev->acl_cnt--;
4090                         chan->sent++;
4091                         chan->conn->sent++;
4092                 }
4093         }
4094
4095         if (cnt != hdev->acl_cnt)
4096                 hci_prio_recalculate(hdev, ACL_LINK);
4097 }
4098
4099 static void hci_sched_acl_blk(struct hci_dev *hdev)
4100 {
4101         unsigned int cnt = hdev->block_cnt;
4102         struct hci_chan *chan;
4103         struct sk_buff *skb;
4104         int quote;
4105         u8 type;
4106
4107         __check_timeout(hdev, cnt);
4108
4109         BT_DBG("%s", hdev->name);
4110
4111         if (hdev->dev_type == HCI_AMP)
4112                 type = AMP_LINK;
4113         else
4114                 type = ACL_LINK;
4115
4116         while (hdev->block_cnt > 0 &&
4117                (chan = hci_chan_sent(hdev, type, &quote))) {
4118                 u32 priority = (skb_peek(&chan->data_q))->priority;
4119                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4120                         int blocks;
4121
4122                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4123                                skb->len, skb->priority);
4124
4125                         /* Stop if priority has changed */
4126                         if (skb->priority < priority)
4127                                 break;
4128
4129                         skb = skb_dequeue(&chan->data_q);
4130
4131                         blocks = __get_blocks(hdev, skb);
4132                         if (blocks > hdev->block_cnt)
4133                                 return;
4134
4135                         hci_conn_enter_active_mode(chan->conn,
4136                                                    bt_cb(skb)->force_active);
4137
4138                         hci_send_frame(hdev, skb);
4139                         hdev->acl_last_tx = jiffies;
4140
4141                         hdev->block_cnt -= blocks;
4142                         quote -= blocks;
4143
4144                         chan->sent += blocks;
4145                         chan->conn->sent += blocks;
4146                 }
4147         }
4148
4149         if (cnt != hdev->block_cnt)
4150                 hci_prio_recalculate(hdev, type);
4151 }
4152
4153 static void hci_sched_acl(struct hci_dev *hdev)
4154 {
4155         BT_DBG("%s", hdev->name);
4156
4157         /* No ACL link over BR/EDR controller */
4158         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4159                 return;
4160
4161         /* No AMP link over AMP controller */
4162         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4163                 return;
4164
4165         switch (hdev->flow_ctl_mode) {
4166         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4167                 hci_sched_acl_pkt(hdev);
4168                 break;
4169
4170         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4171                 hci_sched_acl_blk(hdev);
4172                 break;
4173         }
4174 }
4175
4176 /* Schedule SCO */
4177 static void hci_sched_sco(struct hci_dev *hdev)
4178 {
4179         struct hci_conn *conn;
4180         struct sk_buff *skb;
4181         int quote;
4182
4183         BT_DBG("%s", hdev->name);
4184
4185         if (!hci_conn_num(hdev, SCO_LINK))
4186                 return;
4187
4188         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4189                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4190                         BT_DBG("skb %p len %d", skb, skb->len);
4191                         hci_send_frame(hdev, skb);
4192
4193                         conn->sent++;
4194                         if (conn->sent == ~0)
4195                                 conn->sent = 0;
4196                 }
4197         }
4198 }
4199
4200 static void hci_sched_esco(struct hci_dev *hdev)
4201 {
4202         struct hci_conn *conn;
4203         struct sk_buff *skb;
4204         int quote;
4205
4206         BT_DBG("%s", hdev->name);
4207
4208         if (!hci_conn_num(hdev, ESCO_LINK))
4209                 return;
4210
4211         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4212                                                      &quote))) {
4213                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4214                         BT_DBG("skb %p len %d", skb, skb->len);
4215                         hci_send_frame(hdev, skb);
4216
4217                         conn->sent++;
4218                         if (conn->sent == ~0)
4219                                 conn->sent = 0;
4220                 }
4221         }
4222 }
4223
4224 static void hci_sched_le(struct hci_dev *hdev)
4225 {
4226         struct hci_chan *chan;
4227         struct sk_buff *skb;
4228         int quote, cnt, tmp;
4229
4230         BT_DBG("%s", hdev->name);
4231
4232         if (!hci_conn_num(hdev, LE_LINK))
4233                 return;
4234
4235         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4236                 /* LE tx timeout must be longer than maximum
4237                  * link supervision timeout (40.9 seconds) */
4238                 if (!hdev->le_cnt && hdev->le_pkts &&
4239                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4240                         hci_link_tx_to(hdev, LE_LINK);
4241         }
4242
4243         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4244         tmp = cnt;
4245         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4246                 u32 priority = (skb_peek(&chan->data_q))->priority;
4247                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4248                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4249                                skb->len, skb->priority);
4250
4251                         /* Stop if priority has changed */
4252                         if (skb->priority < priority)
4253                                 break;
4254
4255                         skb = skb_dequeue(&chan->data_q);
4256
4257                         hci_send_frame(hdev, skb);
4258                         hdev->le_last_tx = jiffies;
4259
4260                         cnt--;
4261                         chan->sent++;
4262                         chan->conn->sent++;
4263                 }
4264         }
4265
4266         if (hdev->le_pkts)
4267                 hdev->le_cnt = cnt;
4268         else
4269                 hdev->acl_cnt = cnt;
4270
4271         if (cnt != tmp)
4272                 hci_prio_recalculate(hdev, LE_LINK);
4273 }
4274
4275 static void hci_tx_work(struct work_struct *work)
4276 {
4277         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4278         struct sk_buff *skb;
4279
4280         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4281                hdev->sco_cnt, hdev->le_cnt);
4282
4283         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4284                 /* Schedule queues and send stuff to HCI driver */
4285                 hci_sched_acl(hdev);
4286                 hci_sched_sco(hdev);
4287                 hci_sched_esco(hdev);
4288                 hci_sched_le(hdev);
4289         }
4290
4291         /* Send next queued raw (unknown type) packet */
4292         while ((skb = skb_dequeue(&hdev->raw_q)))
4293                 hci_send_frame(hdev, skb);
4294 }
4295
4296 /* ----- HCI RX task (incoming data processing) ----- */
4297
4298 /* ACL data packet */
4299 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4300 {
4301         struct hci_acl_hdr *hdr = (void *) skb->data;
4302         struct hci_conn *conn;
4303         __u16 handle, flags;
4304
4305         skb_pull(skb, HCI_ACL_HDR_SIZE);
4306
4307         handle = __le16_to_cpu(hdr->handle);
4308         flags  = hci_flags(handle);
4309         handle = hci_handle(handle);
4310
4311         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4312                handle, flags);
4313
4314         hdev->stat.acl_rx++;
4315
4316         hci_dev_lock(hdev);
4317         conn = hci_conn_hash_lookup_handle(hdev, handle);
4318         hci_dev_unlock(hdev);
4319
4320         if (conn) {
4321                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4322
4323                 /* Send to upper protocol */
4324                 l2cap_recv_acldata(conn, skb, flags);
4325                 return;
4326         } else {
4327                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4328                            handle);
4329         }
4330
4331         kfree_skb(skb);
4332 }
4333
4334 /* SCO data packet */
4335 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4336 {
4337         struct hci_sco_hdr *hdr = (void *) skb->data;
4338         struct hci_conn *conn;
4339         __u16 handle;
4340
4341         skb_pull(skb, HCI_SCO_HDR_SIZE);
4342
4343         handle = __le16_to_cpu(hdr->handle);
4344
4345         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4346
4347         hdev->stat.sco_rx++;
4348
4349         hci_dev_lock(hdev);
4350         conn = hci_conn_hash_lookup_handle(hdev, handle);
4351         hci_dev_unlock(hdev);
4352
4353         if (conn) {
4354                 /* Send to upper protocol */
4355                 sco_recv_scodata(conn, skb);
4356                 return;
4357         } else {
4358                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4359                            handle);
4360         }
4361
4362         kfree_skb(skb);
4363 }
4364
4365 static bool hci_req_is_complete(struct hci_dev *hdev)
4366 {
4367         struct sk_buff *skb;
4368
4369         skb = skb_peek(&hdev->cmd_q);
4370         if (!skb)
4371                 return true;
4372
4373         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4374 }
4375
4376 static void hci_resend_last(struct hci_dev *hdev)
4377 {
4378         struct hci_command_hdr *sent;
4379         struct sk_buff *skb;
4380         u16 opcode;
4381
4382         if (!hdev->sent_cmd)
4383                 return;
4384
4385         sent = (void *) hdev->sent_cmd->data;
4386         opcode = __le16_to_cpu(sent->opcode);
4387         if (opcode == HCI_OP_RESET)
4388                 return;
4389
4390         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4391         if (!skb)
4392                 return;
4393
4394         skb_queue_head(&hdev->cmd_q, skb);
4395         queue_work(hdev->workqueue, &hdev->cmd_work);
4396 }
4397
4398 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4399                           hci_req_complete_t *req_complete,
4400                           hci_req_complete_skb_t *req_complete_skb)
4401 {
4402         struct sk_buff *skb;
4403         unsigned long flags;
4404
4405         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4406
4407         /* If the completed command doesn't match the last one that was
4408          * sent we need to do special handling of it.
4409          */
4410         if (!hci_sent_cmd_data(hdev, opcode)) {
4411                 /* Some CSR based controllers generate a spontaneous
4412                  * reset complete event during init and any pending
4413                  * command will never be completed. In such a case we
4414                  * need to resend whatever was the last sent
4415                  * command.
4416                  */
4417                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4418                         hci_resend_last(hdev);
4419
4420                 return;
4421         }
4422
4423         /* If we reach this point this event matches the last command sent */
4424         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4425
4426         /* If the command succeeded and there's still more commands in
4427          * this request the request is not yet complete.
4428          */
4429         if (!status && !hci_req_is_complete(hdev))
4430                 return;
4431
4432         /* If this was the last command in a request the complete
4433          * callback would be found in hdev->sent_cmd instead of the
4434          * command queue (hdev->cmd_q).
4435          */
4436         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4437                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4438                 return;
4439         }
4440
4441         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4442                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4443                 return;
4444         }
4445
4446         /* Remove all pending commands belonging to this request */
4447         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4448         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4449                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4450                         __skb_queue_head(&hdev->cmd_q, skb);
4451                         break;
4452                 }
4453
4454                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4455                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4456                 else
4457                         *req_complete = bt_cb(skb)->hci.req_complete;
4458                 kfree_skb(skb);
4459         }
4460         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4461 }
4462
4463 static void hci_rx_work(struct work_struct *work)
4464 {
4465         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4466         struct sk_buff *skb;
4467
4468         BT_DBG("%s", hdev->name);
4469
4470         while ((skb = skb_dequeue(&hdev->rx_q))) {
4471                 /* Send copy to monitor */
4472                 hci_send_to_monitor(hdev, skb);
4473
4474                 if (atomic_read(&hdev->promisc)) {
4475                         /* Send copy to the sockets */
4476                         hci_send_to_sock(hdev, skb);
4477                 }
4478
4479                 /* If the device has been opened in HCI_USER_CHANNEL,
4480                  * the userspace has exclusive access to device.
4481                  * When device is HCI_INIT, we still need to process
4482                  * the data packets to the driver in order
4483                  * to complete its setup().
4484                  */
4485                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4486                     !test_bit(HCI_INIT, &hdev->flags)) {
4487                         kfree_skb(skb);
4488                         continue;
4489                 }
4490
4491                 if (test_bit(HCI_INIT, &hdev->flags)) {
4492                         /* Don't process data packets in this states. */
4493                         switch (hci_skb_pkt_type(skb)) {
4494                         case HCI_ACLDATA_PKT:
4495                         case HCI_SCODATA_PKT:
4496                                 kfree_skb(skb);
4497                                 continue;
4498                         }
4499                 }
4500
4501                 /* Process frame */
4502                 switch (hci_skb_pkt_type(skb)) {
4503                 case HCI_EVENT_PKT:
4504                         BT_DBG("%s Event packet", hdev->name);
4505                         hci_event_packet(hdev, skb);
4506                         break;
4507
4508                 case HCI_ACLDATA_PKT:
4509                         BT_DBG("%s ACL data packet", hdev->name);
4510                         hci_acldata_packet(hdev, skb);
4511                         break;
4512
4513                 case HCI_SCODATA_PKT:
4514                         BT_DBG("%s SCO data packet", hdev->name);
4515                         hci_scodata_packet(hdev, skb);
4516                         break;
4517
4518                 default:
4519                         kfree_skb(skb);
4520                         break;
4521                 }
4522         }
4523 }
4524
4525 static void hci_cmd_work(struct work_struct *work)
4526 {
4527         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4528         struct sk_buff *skb;
4529
4530         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4531                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4532
4533         /* Send queued commands */
4534         if (atomic_read(&hdev->cmd_cnt)) {
4535                 skb = skb_dequeue(&hdev->cmd_q);
4536                 if (!skb)
4537                         return;
4538
4539                 kfree_skb(hdev->sent_cmd);
4540
4541                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4542                 if (hdev->sent_cmd) {
4543                         if (hci_req_status_pend(hdev))
4544                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4545                         atomic_dec(&hdev->cmd_cnt);
4546                         hci_send_frame(hdev, skb);
4547                         if (test_bit(HCI_RESET, &hdev->flags))
4548                                 cancel_delayed_work(&hdev->cmd_timer);
4549                         else
4550                                 schedule_delayed_work(&hdev->cmd_timer,
4551                                                       HCI_CMD_TIMEOUT);
4552                 } else {
4553                         skb_queue_head(&hdev->cmd_q, skb);
4554                         queue_work(hdev->workqueue, &hdev->cmd_work);
4555                 }
4556         }
4557 }