GNU Linux-libre 5.10.153-gnu1
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47
48 static void hci_rx_work(struct work_struct *work);
49 static void hci_cmd_work(struct work_struct *work);
50 static void hci_tx_work(struct work_struct *work);
51
52 /* HCI device list */
53 LIST_HEAD(hci_dev_list);
54 DEFINE_RWLOCK(hci_dev_list_lock);
55
56 /* HCI callback list */
57 LIST_HEAD(hci_cb_list);
58 DEFINE_MUTEX(hci_cb_list_lock);
59
60 /* HCI ID Numbering */
61 static DEFINE_IDA(hci_index_ida);
62
63 /* ---- HCI debugfs entries ---- */
64
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66                              size_t count, loff_t *ppos)
67 {
68         struct hci_dev *hdev = file->private_data;
69         char buf[3];
70
71         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
72         buf[1] = '\n';
73         buf[2] = '\0';
74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78                               size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         struct sk_buff *skb;
82         bool enable;
83         int err;
84
85         if (!test_bit(HCI_UP, &hdev->flags))
86                 return -ENETDOWN;
87
88         err = kstrtobool_from_user(user_buf, count, &enable);
89         if (err)
90                 return err;
91
92         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
93                 return -EALREADY;
94
95         hci_req_sync_lock(hdev);
96         if (enable)
97                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         else
100                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101                                      HCI_CMD_TIMEOUT);
102         hci_req_sync_unlock(hdev);
103
104         if (IS_ERR(skb))
105                 return PTR_ERR(skb);
106
107         kfree_skb(skb);
108
109         hci_dev_change_flag(hdev, HCI_DUT_MODE);
110
111         return count;
112 }
113
114 static const struct file_operations dut_mode_fops = {
115         .open           = simple_open,
116         .read           = dut_mode_read,
117         .write          = dut_mode_write,
118         .llseek         = default_llseek,
119 };
120
121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122                                 size_t count, loff_t *ppos)
123 {
124         struct hci_dev *hdev = file->private_data;
125         char buf[3];
126
127         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128         buf[1] = '\n';
129         buf[2] = '\0';
130         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131 }
132
133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134                                  size_t count, loff_t *ppos)
135 {
136         struct hci_dev *hdev = file->private_data;
137         bool enable;
138         int err;
139
140         err = kstrtobool_from_user(user_buf, count, &enable);
141         if (err)
142                 return err;
143
144         /* When the diagnostic flags are not persistent and the transport
145          * is not active or in user channel operation, then there is no need
146          * for the vendor callback. Instead just store the desired value and
147          * the setting will be programmed when the controller gets powered on.
148          */
149         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150             (!test_bit(HCI_RUNNING, &hdev->flags) ||
151              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
152                 goto done;
153
154         hci_req_sync_lock(hdev);
155         err = hdev->set_diag(hdev, enable);
156         hci_req_sync_unlock(hdev);
157
158         if (err < 0)
159                 return err;
160
161 done:
162         if (enable)
163                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164         else
165                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166
167         return count;
168 }
169
170 static const struct file_operations vendor_diag_fops = {
171         .open           = simple_open,
172         .read           = vendor_diag_read,
173         .write          = vendor_diag_write,
174         .llseek         = default_llseek,
175 };
176
177 static void hci_debugfs_create_basic(struct hci_dev *hdev)
178 {
179         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180                             &dut_mode_fops);
181
182         if (hdev->set_diag)
183                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184                                     &vendor_diag_fops);
185 }
186
187 static int hci_reset_req(struct hci_request *req, unsigned long opt)
188 {
189         BT_DBG("%s %ld", req->hdev->name, opt);
190
191         /* Reset device */
192         set_bit(HCI_RESET, &req->hdev->flags);
193         hci_req_add(req, HCI_OP_RESET, 0, NULL);
194         return 0;
195 }
196
197 static void bredr_init(struct hci_request *req)
198 {
199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
201         /* Read Local Supported Features */
202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
203
204         /* Read Local Version */
205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
206
207         /* Read BD Address */
208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
209 }
210
211 static void amp_init1(struct hci_request *req)
212 {
213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
214
215         /* Read Local Version */
216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217
218         /* Read Local Supported Commands */
219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220
221         /* Read Local AMP Info */
222         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
223
224         /* Read Data Blk size */
225         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
226
227         /* Read Flow Control Mode */
228         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229
230         /* Read Location Data */
231         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
232 }
233
234 static int amp_init2(struct hci_request *req)
235 {
236         /* Read Local Supported Features. Not all AMP controllers
237          * support this so it's placed conditionally in the second
238          * stage init.
239          */
240         if (req->hdev->commands[14] & 0x20)
241                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
242
243         return 0;
244 }
245
246 static int hci_init1_req(struct hci_request *req, unsigned long opt)
247 {
248         struct hci_dev *hdev = req->hdev;
249
250         BT_DBG("%s %ld", hdev->name, opt);
251
252         /* Reset */
253         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254                 hci_reset_req(req, 0);
255
256         switch (hdev->dev_type) {
257         case HCI_PRIMARY:
258                 bredr_init(req);
259                 break;
260         case HCI_AMP:
261                 amp_init1(req);
262                 break;
263         default:
264                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
265                 break;
266         }
267
268         return 0;
269 }
270
271 static void bredr_setup(struct hci_request *req)
272 {
273         __le16 param;
274         __u8 flt_type;
275
276         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
277         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
278
279         /* Read Class of Device */
280         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
281
282         /* Read Local Name */
283         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
284
285         /* Read Voice Setting */
286         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
287
288         /* Read Number of Supported IAC */
289         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290
291         /* Read Current IAC LAP */
292         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293
294         /* Clear Event Filters */
295         flt_type = HCI_FLT_CLEAR_ALL;
296         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
297
298         /* Connection accept timeout ~20 secs */
299         param = cpu_to_le16(0x7d00);
300         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
301 }
302
303 static void le_setup(struct hci_request *req)
304 {
305         struct hci_dev *hdev = req->hdev;
306
307         /* Read LE Buffer Size */
308         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
309
310         /* Read LE Local Supported Features */
311         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
312
313         /* Read LE Supported States */
314         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315
316         /* LE-only controllers have LE implicitly enabled */
317         if (!lmp_bredr_capable(hdev))
318                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
319 }
320
321 static void hci_setup_event_mask(struct hci_request *req)
322 {
323         struct hci_dev *hdev = req->hdev;
324
325         /* The second byte is 0xff instead of 0x9f (two reserved bits
326          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327          * command otherwise.
328          */
329         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330
331         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332          * any event mask for pre 1.2 devices.
333          */
334         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335                 return;
336
337         if (lmp_bredr_capable(hdev)) {
338                 events[4] |= 0x01; /* Flow Specification Complete */
339         } else {
340                 /* Use a different default for LE-only devices */
341                 memset(events, 0, sizeof(events));
342                 events[1] |= 0x20; /* Command Complete */
343                 events[1] |= 0x40; /* Command Status */
344                 events[1] |= 0x80; /* Hardware Error */
345
346                 /* If the controller supports the Disconnect command, enable
347                  * the corresponding event. In addition enable packet flow
348                  * control related events.
349                  */
350                 if (hdev->commands[0] & 0x20) {
351                         events[0] |= 0x10; /* Disconnection Complete */
352                         events[2] |= 0x04; /* Number of Completed Packets */
353                         events[3] |= 0x02; /* Data Buffer Overflow */
354                 }
355
356                 /* If the controller supports the Read Remote Version
357                  * Information command, enable the corresponding event.
358                  */
359                 if (hdev->commands[2] & 0x80)
360                         events[1] |= 0x08; /* Read Remote Version Information
361                                             * Complete
362                                             */
363
364                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365                         events[0] |= 0x80; /* Encryption Change */
366                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
367                 }
368         }
369
370         if (lmp_inq_rssi_capable(hdev) ||
371             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372                 events[4] |= 0x02; /* Inquiry Result with RSSI */
373
374         if (lmp_ext_feat_capable(hdev))
375                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
376
377         if (lmp_esco_capable(hdev)) {
378                 events[5] |= 0x08; /* Synchronous Connection Complete */
379                 events[5] |= 0x10; /* Synchronous Connection Changed */
380         }
381
382         if (lmp_sniffsubr_capable(hdev))
383                 events[5] |= 0x20; /* Sniff Subrating */
384
385         if (lmp_pause_enc_capable(hdev))
386                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
387
388         if (lmp_ext_inq_capable(hdev))
389                 events[5] |= 0x40; /* Extended Inquiry Result */
390
391         if (lmp_no_flush_capable(hdev))
392                 events[7] |= 0x01; /* Enhanced Flush Complete */
393
394         if (lmp_lsto_capable(hdev))
395                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
396
397         if (lmp_ssp_capable(hdev)) {
398                 events[6] |= 0x01;      /* IO Capability Request */
399                 events[6] |= 0x02;      /* IO Capability Response */
400                 events[6] |= 0x04;      /* User Confirmation Request */
401                 events[6] |= 0x08;      /* User Passkey Request */
402                 events[6] |= 0x10;      /* Remote OOB Data Request */
403                 events[6] |= 0x20;      /* Simple Pairing Complete */
404                 events[7] |= 0x04;      /* User Passkey Notification */
405                 events[7] |= 0x08;      /* Keypress Notification */
406                 events[7] |= 0x10;      /* Remote Host Supported
407                                          * Features Notification
408                                          */
409         }
410
411         if (lmp_le_capable(hdev))
412                 events[7] |= 0x20;      /* LE Meta-Event */
413
414         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
415 }
416
417 static int hci_init2_req(struct hci_request *req, unsigned long opt)
418 {
419         struct hci_dev *hdev = req->hdev;
420
421         if (hdev->dev_type == HCI_AMP)
422                 return amp_init2(req);
423
424         if (lmp_bredr_capable(hdev))
425                 bredr_setup(req);
426         else
427                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
428
429         if (lmp_le_capable(hdev))
430                 le_setup(req);
431
432         /* All Bluetooth 1.2 and later controllers should support the
433          * HCI command for reading the local supported commands.
434          *
435          * Unfortunately some controllers indicate Bluetooth 1.2 support,
436          * but do not have support for this command. If that is the case,
437          * the driver can quirk the behavior and skip reading the local
438          * supported commands.
439          */
440         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
443
444         if (lmp_ssp_capable(hdev)) {
445                 /* When SSP is available, then the host features page
446                  * should also be available as well. However some
447                  * controllers list the max_page as 0 as long as SSP
448                  * has not been enabled. To achieve proper debugging
449                  * output, force the minimum max_page to 1 at least.
450                  */
451                 hdev->max_page = 0x01;
452
453                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
454                         u8 mode = 0x01;
455
456                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457                                     sizeof(mode), &mode);
458                 } else {
459                         struct hci_cp_write_eir cp;
460
461                         memset(hdev->eir, 0, sizeof(hdev->eir));
462                         memset(&cp, 0, sizeof(cp));
463
464                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465                 }
466         }
467
468         if (lmp_inq_rssi_capable(hdev) ||
469             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
470                 u8 mode;
471
472                 /* If Extended Inquiry Result events are supported, then
473                  * they are clearly preferred over Inquiry Result with RSSI
474                  * events.
475                  */
476                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477
478                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479         }
480
481         if (lmp_inq_tx_pwr_capable(hdev))
482                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
483
484         if (lmp_ext_feat_capable(hdev)) {
485                 struct hci_cp_read_local_ext_features cp;
486
487                 cp.page = 0x01;
488                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489                             sizeof(cp), &cp);
490         }
491
492         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
493                 u8 enable = 1;
494                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495                             &enable);
496         }
497
498         return 0;
499 }
500
501 static void hci_setup_link_policy(struct hci_request *req)
502 {
503         struct hci_dev *hdev = req->hdev;
504         struct hci_cp_write_def_link_policy cp;
505         u16 link_policy = 0;
506
507         if (lmp_rswitch_capable(hdev))
508                 link_policy |= HCI_LP_RSWITCH;
509         if (lmp_hold_capable(hdev))
510                 link_policy |= HCI_LP_HOLD;
511         if (lmp_sniff_capable(hdev))
512                 link_policy |= HCI_LP_SNIFF;
513         if (lmp_park_capable(hdev))
514                 link_policy |= HCI_LP_PARK;
515
516         cp.policy = cpu_to_le16(link_policy);
517         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
518 }
519
520 static void hci_set_le_support(struct hci_request *req)
521 {
522         struct hci_dev *hdev = req->hdev;
523         struct hci_cp_write_le_host_supported cp;
524
525         /* LE-only devices do not support explicit enablement */
526         if (!lmp_bredr_capable(hdev))
527                 return;
528
529         memset(&cp, 0, sizeof(cp));
530
531         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
532                 cp.le = 0x01;
533                 cp.simul = 0x00;
534         }
535
536         if (cp.le != lmp_host_le_capable(hdev))
537                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538                             &cp);
539 }
540
541 static void hci_set_event_mask_page_2(struct hci_request *req)
542 {
543         struct hci_dev *hdev = req->hdev;
544         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545         bool changed = false;
546
547         /* If Connectionless Slave Broadcast master role is supported
548          * enable all necessary events for it.
549          */
550         if (lmp_csb_master_capable(hdev)) {
551                 events[1] |= 0x40;      /* Triggered Clock Capture */
552                 events[1] |= 0x80;      /* Synchronization Train Complete */
553                 events[2] |= 0x10;      /* Slave Page Response Timeout */
554                 events[2] |= 0x20;      /* CSB Channel Map Change */
555                 changed = true;
556         }
557
558         /* If Connectionless Slave Broadcast slave role is supported
559          * enable all necessary events for it.
560          */
561         if (lmp_csb_slave_capable(hdev)) {
562                 events[2] |= 0x01;      /* Synchronization Train Received */
563                 events[2] |= 0x02;      /* CSB Receive */
564                 events[2] |= 0x04;      /* CSB Timeout */
565                 events[2] |= 0x08;      /* Truncated Page Complete */
566                 changed = true;
567         }
568
569         /* Enable Authenticated Payload Timeout Expired event if supported */
570         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
571                 events[2] |= 0x80;
572                 changed = true;
573         }
574
575         /* Some Broadcom based controllers indicate support for Set Event
576          * Mask Page 2 command, but then actually do not support it. Since
577          * the default value is all bits set to zero, the command is only
578          * required if the event mask has to be changed. In case no change
579          * to the event mask is needed, skip this command.
580          */
581         if (changed)
582                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583                             sizeof(events), events);
584 }
585
586 static int hci_init3_req(struct hci_request *req, unsigned long opt)
587 {
588         struct hci_dev *hdev = req->hdev;
589         u8 p;
590
591         hci_setup_event_mask(req);
592
593         if (hdev->commands[6] & 0x20 &&
594             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595                 struct hci_cp_read_stored_link_key cp;
596
597                 bacpy(&cp.bdaddr, BDADDR_ANY);
598                 cp.read_all = 0x01;
599                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600         }
601
602         if (hdev->commands[5] & 0x10)
603                 hci_setup_link_policy(req);
604
605         if (hdev->commands[8] & 0x01)
606                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607
608         if (hdev->commands[18] & 0x04 &&
609             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
610                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
612         /* Some older Broadcom based Bluetooth 1.2 controllers do not
613          * support the Read Page Scan Type command. Check support for
614          * this command in the bit mask of supported commands.
615          */
616         if (hdev->commands[13] & 0x01)
617                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
619         if (lmp_le_capable(hdev)) {
620                 u8 events[8];
621
622                 memset(events, 0, sizeof(events));
623
624                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625                         events[0] |= 0x10;      /* LE Long Term Key Request */
626
627                 /* If controller supports the Connection Parameters Request
628                  * Link Layer Procedure, enable the corresponding event.
629                  */
630                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631                         events[0] |= 0x20;      /* LE Remote Connection
632                                                  * Parameter Request
633                                                  */
634
635                 /* If the controller supports the Data Length Extension
636                  * feature, enable the corresponding event.
637                  */
638                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639                         events[0] |= 0x40;      /* LE Data Length Change */
640
641                 /* If the controller supports LL Privacy feature, enable
642                  * the corresponding event.
643                  */
644                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645                         events[1] |= 0x02;      /* LE Enhanced Connection
646                                                  * Complete
647                                                  */
648
649                 /* If the controller supports Extended Scanner Filter
650                  * Policies, enable the correspondig event.
651                  */
652                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653                         events[1] |= 0x04;      /* LE Direct Advertising
654                                                  * Report
655                                                  */
656
657                 /* If the controller supports Channel Selection Algorithm #2
658                  * feature, enable the corresponding event.
659                  */
660                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661                         events[2] |= 0x08;      /* LE Channel Selection
662                                                  * Algorithm
663                                                  */
664
665                 /* If the controller supports the LE Set Scan Enable command,
666                  * enable the corresponding advertising report event.
667                  */
668                 if (hdev->commands[26] & 0x08)
669                         events[0] |= 0x02;      /* LE Advertising Report */
670
671                 /* If the controller supports the LE Create Connection
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[26] & 0x10)
675                         events[0] |= 0x01;      /* LE Connection Complete */
676
677                 /* If the controller supports the LE Connection Update
678                  * command, enable the corresponding event.
679                  */
680                 if (hdev->commands[27] & 0x04)
681                         events[0] |= 0x04;      /* LE Connection Update
682                                                  * Complete
683                                                  */
684
685                 /* If the controller supports the LE Read Remote Used Features
686                  * command, enable the corresponding event.
687                  */
688                 if (hdev->commands[27] & 0x20)
689                         events[0] |= 0x08;      /* LE Read Remote Used
690                                                  * Features Complete
691                                                  */
692
693                 /* If the controller supports the LE Read Local P-256
694                  * Public Key command, enable the corresponding event.
695                  */
696                 if (hdev->commands[34] & 0x02)
697                         events[0] |= 0x80;      /* LE Read Local P-256
698                                                  * Public Key Complete
699                                                  */
700
701                 /* If the controller supports the LE Generate DHKey
702                  * command, enable the corresponding event.
703                  */
704                 if (hdev->commands[34] & 0x04)
705                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
706
707                 /* If the controller supports the LE Set Default PHY or
708                  * LE Set PHY commands, enable the corresponding event.
709                  */
710                 if (hdev->commands[35] & (0x20 | 0x40))
711                         events[1] |= 0x08;        /* LE PHY Update Complete */
712
713                 /* If the controller supports LE Set Extended Scan Parameters
714                  * and LE Set Extended Scan Enable commands, enable the
715                  * corresponding event.
716                  */
717                 if (use_ext_scan(hdev))
718                         events[1] |= 0x10;      /* LE Extended Advertising
719                                                  * Report
720                                                  */
721
722                 /* If the controller supports the LE Extended Advertising
723                  * command, enable the corresponding event.
724                  */
725                 if (ext_adv_capable(hdev))
726                         events[2] |= 0x02;      /* LE Advertising Set
727                                                  * Terminated
728                                                  */
729
730                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731                             events);
732
733                 /* Read LE Advertising Channel TX Power */
734                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735                         /* HCI TS spec forbids mixing of legacy and extended
736                          * advertising commands wherein READ_ADV_TX_POWER is
737                          * also included. So do not call it if extended adv
738                          * is supported otherwise controller will return
739                          * COMMAND_DISALLOWED for extended commands.
740                          */
741                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742                 }
743
744                 if (hdev->commands[26] & 0x40) {
745                         /* Read LE Accept List Size */
746                         hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
747                                     0, NULL);
748                 }
749
750                 if (hdev->commands[26] & 0x80) {
751                         /* Clear LE Accept List */
752                         hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
753                 }
754
755                 if (hdev->commands[34] & 0x40) {
756                         /* Read LE Resolving List Size */
757                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
758                                     0, NULL);
759                 }
760
761                 if (hdev->commands[34] & 0x20) {
762                         /* Clear LE Resolving List */
763                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
764                 }
765
766                 if (hdev->commands[35] & 0x04) {
767                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
768
769                         /* Set RPA timeout */
770                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
771                                     &rpa_timeout);
772                 }
773
774                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
775                         /* Read LE Maximum Data Length */
776                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
777
778                         /* Read LE Suggested Default Data Length */
779                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
780                 }
781
782                 if (ext_adv_capable(hdev)) {
783                         /* Read LE Number of Supported Advertising Sets */
784                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
785                                     0, NULL);
786                 }
787
788                 hci_set_le_support(req);
789         }
790
791         /* Read features beyond page 1 if available */
792         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
793                 struct hci_cp_read_local_ext_features cp;
794
795                 cp.page = p;
796                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
797                             sizeof(cp), &cp);
798         }
799
800         return 0;
801 }
802
803 static int hci_init4_req(struct hci_request *req, unsigned long opt)
804 {
805         struct hci_dev *hdev = req->hdev;
806
807         /* Some Broadcom based Bluetooth controllers do not support the
808          * Delete Stored Link Key command. They are clearly indicating its
809          * absence in the bit mask of supported commands.
810          *
811          * Check the supported commands and only if the command is marked
812          * as supported send it. If not supported assume that the controller
813          * does not have actual support for stored link keys which makes this
814          * command redundant anyway.
815          *
816          * Some controllers indicate that they support handling deleting
817          * stored link keys, but they don't. The quirk lets a driver
818          * just disable this command.
819          */
820         if (hdev->commands[6] & 0x80 &&
821             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
822                 struct hci_cp_delete_stored_link_key cp;
823
824                 bacpy(&cp.bdaddr, BDADDR_ANY);
825                 cp.delete_all = 0x01;
826                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
827                             sizeof(cp), &cp);
828         }
829
830         /* Set event mask page 2 if the HCI command for it is supported */
831         if (hdev->commands[22] & 0x04)
832                 hci_set_event_mask_page_2(req);
833
834         /* Read local codec list if the HCI command is supported */
835         if (hdev->commands[29] & 0x20)
836                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
837
838         /* Read local pairing options if the HCI command is supported */
839         if (hdev->commands[41] & 0x08)
840                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
841
842         /* Get MWS transport configuration if the HCI command is supported */
843         if (hdev->commands[30] & 0x08)
844                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
845
846         /* Check for Synchronization Train support */
847         if (lmp_sync_train_capable(hdev))
848                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
849
850         /* Enable Secure Connections if supported and configured */
851         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
852             bredr_sc_enabled(hdev)) {
853                 u8 support = 0x01;
854
855                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
856                             sizeof(support), &support);
857         }
858
859         /* Set erroneous data reporting if supported to the wideband speech
860          * setting value
861          */
862         if (hdev->commands[18] & 0x08 &&
863             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
864                 bool enabled = hci_dev_test_flag(hdev,
865                                                  HCI_WIDEBAND_SPEECH_ENABLED);
866
867                 if (enabled !=
868                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
869                         struct hci_cp_write_def_err_data_reporting cp;
870
871                         cp.err_data_reporting = enabled ?
872                                                 ERR_DATA_REPORTING_ENABLED :
873                                                 ERR_DATA_REPORTING_DISABLED;
874
875                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
876                                     sizeof(cp), &cp);
877                 }
878         }
879
880         /* Set Suggested Default Data Length to maximum if supported */
881         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
882                 struct hci_cp_le_write_def_data_len cp;
883
884                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
885                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
886                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
887         }
888
889         /* Set Default PHY parameters if command is supported */
890         if (hdev->commands[35] & 0x20) {
891                 struct hci_cp_le_set_default_phy cp;
892
893                 cp.all_phys = 0x00;
894                 cp.tx_phys = hdev->le_tx_def_phys;
895                 cp.rx_phys = hdev->le_rx_def_phys;
896
897                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
898         }
899
900         return 0;
901 }
902
903 static int __hci_init(struct hci_dev *hdev)
904 {
905         int err;
906
907         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
908         if (err < 0)
909                 return err;
910
911         if (hci_dev_test_flag(hdev, HCI_SETUP))
912                 hci_debugfs_create_basic(hdev);
913
914         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
915         if (err < 0)
916                 return err;
917
918         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
919          * BR/EDR/LE type controllers. AMP controllers only need the
920          * first two stages of init.
921          */
922         if (hdev->dev_type != HCI_PRIMARY)
923                 return 0;
924
925         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
926         if (err < 0)
927                 return err;
928
929         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
930         if (err < 0)
931                 return err;
932
933         /* This function is only called when the controller is actually in
934          * configured state. When the controller is marked as unconfigured,
935          * this initialization procedure is not run.
936          *
937          * It means that it is possible that a controller runs through its
938          * setup phase and then discovers missing settings. If that is the
939          * case, then this function will not be called. It then will only
940          * be called during the config phase.
941          *
942          * So only when in setup phase or config phase, create the debugfs
943          * entries and register the SMP channels.
944          */
945         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
946             !hci_dev_test_flag(hdev, HCI_CONFIG))
947                 return 0;
948
949         hci_debugfs_create_common(hdev);
950
951         if (lmp_bredr_capable(hdev))
952                 hci_debugfs_create_bredr(hdev);
953
954         if (lmp_le_capable(hdev))
955                 hci_debugfs_create_le(hdev);
956
957         return 0;
958 }
959
960 static int hci_init0_req(struct hci_request *req, unsigned long opt)
961 {
962         struct hci_dev *hdev = req->hdev;
963
964         BT_DBG("%s %ld", hdev->name, opt);
965
966         /* Reset */
967         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
968                 hci_reset_req(req, 0);
969
970         /* Read Local Version */
971         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
972
973         /* Read BD Address */
974         if (hdev->set_bdaddr)
975                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
976
977         return 0;
978 }
979
980 static int __hci_unconf_init(struct hci_dev *hdev)
981 {
982         int err;
983
984         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
985                 return 0;
986
987         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
988         if (err < 0)
989                 return err;
990
991         if (hci_dev_test_flag(hdev, HCI_SETUP))
992                 hci_debugfs_create_basic(hdev);
993
994         return 0;
995 }
996
997 static int hci_scan_req(struct hci_request *req, unsigned long opt)
998 {
999         __u8 scan = opt;
1000
1001         BT_DBG("%s %x", req->hdev->name, scan);
1002
1003         /* Inquiry and Page scans */
1004         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1005         return 0;
1006 }
1007
1008 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1009 {
1010         __u8 auth = opt;
1011
1012         BT_DBG("%s %x", req->hdev->name, auth);
1013
1014         /* Authentication */
1015         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1016         return 0;
1017 }
1018
1019 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1020 {
1021         __u8 encrypt = opt;
1022
1023         BT_DBG("%s %x", req->hdev->name, encrypt);
1024
1025         /* Encryption */
1026         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1027         return 0;
1028 }
1029
1030 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1031 {
1032         __le16 policy = cpu_to_le16(opt);
1033
1034         BT_DBG("%s %x", req->hdev->name, policy);
1035
1036         /* Default link policy */
1037         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1038         return 0;
1039 }
1040
1041 /* Get HCI device by index.
1042  * Device is held on return. */
1043 struct hci_dev *hci_dev_get(int index)
1044 {
1045         struct hci_dev *hdev = NULL, *d;
1046
1047         BT_DBG("%d", index);
1048
1049         if (index < 0)
1050                 return NULL;
1051
1052         read_lock(&hci_dev_list_lock);
1053         list_for_each_entry(d, &hci_dev_list, list) {
1054                 if (d->id == index) {
1055                         hdev = hci_dev_hold(d);
1056                         break;
1057                 }
1058         }
1059         read_unlock(&hci_dev_list_lock);
1060         return hdev;
1061 }
1062
1063 /* ---- Inquiry support ---- */
1064
1065 bool hci_discovery_active(struct hci_dev *hdev)
1066 {
1067         struct discovery_state *discov = &hdev->discovery;
1068
1069         switch (discov->state) {
1070         case DISCOVERY_FINDING:
1071         case DISCOVERY_RESOLVING:
1072                 return true;
1073
1074         default:
1075                 return false;
1076         }
1077 }
1078
1079 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1080 {
1081         int old_state = hdev->discovery.state;
1082
1083         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1084
1085         if (old_state == state)
1086                 return;
1087
1088         hdev->discovery.state = state;
1089
1090         switch (state) {
1091         case DISCOVERY_STOPPED:
1092                 hci_update_background_scan(hdev);
1093
1094                 if (old_state != DISCOVERY_STARTING)
1095                         mgmt_discovering(hdev, 0);
1096                 break;
1097         case DISCOVERY_STARTING:
1098                 break;
1099         case DISCOVERY_FINDING:
1100                 mgmt_discovering(hdev, 1);
1101                 break;
1102         case DISCOVERY_RESOLVING:
1103                 break;
1104         case DISCOVERY_STOPPING:
1105                 break;
1106         }
1107 }
1108
1109 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1110 {
1111         struct discovery_state *cache = &hdev->discovery;
1112         struct inquiry_entry *p, *n;
1113
1114         list_for_each_entry_safe(p, n, &cache->all, all) {
1115                 list_del(&p->all);
1116                 kfree(p);
1117         }
1118
1119         INIT_LIST_HEAD(&cache->unknown);
1120         INIT_LIST_HEAD(&cache->resolve);
1121 }
1122
1123 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1124                                                bdaddr_t *bdaddr)
1125 {
1126         struct discovery_state *cache = &hdev->discovery;
1127         struct inquiry_entry *e;
1128
1129         BT_DBG("cache %p, %pMR", cache, bdaddr);
1130
1131         list_for_each_entry(e, &cache->all, all) {
1132                 if (!bacmp(&e->data.bdaddr, bdaddr))
1133                         return e;
1134         }
1135
1136         return NULL;
1137 }
1138
1139 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1140                                                        bdaddr_t *bdaddr)
1141 {
1142         struct discovery_state *cache = &hdev->discovery;
1143         struct inquiry_entry *e;
1144
1145         BT_DBG("cache %p, %pMR", cache, bdaddr);
1146
1147         list_for_each_entry(e, &cache->unknown, list) {
1148                 if (!bacmp(&e->data.bdaddr, bdaddr))
1149                         return e;
1150         }
1151
1152         return NULL;
1153 }
1154
1155 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1156                                                        bdaddr_t *bdaddr,
1157                                                        int state)
1158 {
1159         struct discovery_state *cache = &hdev->discovery;
1160         struct inquiry_entry *e;
1161
1162         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1163
1164         list_for_each_entry(e, &cache->resolve, list) {
1165                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1166                         return e;
1167                 if (!bacmp(&e->data.bdaddr, bdaddr))
1168                         return e;
1169         }
1170
1171         return NULL;
1172 }
1173
1174 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1175                                       struct inquiry_entry *ie)
1176 {
1177         struct discovery_state *cache = &hdev->discovery;
1178         struct list_head *pos = &cache->resolve;
1179         struct inquiry_entry *p;
1180
1181         list_del(&ie->list);
1182
1183         list_for_each_entry(p, &cache->resolve, list) {
1184                 if (p->name_state != NAME_PENDING &&
1185                     abs(p->data.rssi) >= abs(ie->data.rssi))
1186                         break;
1187                 pos = &p->list;
1188         }
1189
1190         list_add(&ie->list, pos);
1191 }
1192
1193 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1194                              bool name_known)
1195 {
1196         struct discovery_state *cache = &hdev->discovery;
1197         struct inquiry_entry *ie;
1198         u32 flags = 0;
1199
1200         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1201
1202         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1203
1204         if (!data->ssp_mode)
1205                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1206
1207         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1208         if (ie) {
1209                 if (!ie->data.ssp_mode)
1210                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1211
1212                 if (ie->name_state == NAME_NEEDED &&
1213                     data->rssi != ie->data.rssi) {
1214                         ie->data.rssi = data->rssi;
1215                         hci_inquiry_cache_update_resolve(hdev, ie);
1216                 }
1217
1218                 goto update;
1219         }
1220
1221         /* Entry not in the cache. Add new one. */
1222         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1223         if (!ie) {
1224                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1225                 goto done;
1226         }
1227
1228         list_add(&ie->all, &cache->all);
1229
1230         if (name_known) {
1231                 ie->name_state = NAME_KNOWN;
1232         } else {
1233                 ie->name_state = NAME_NOT_KNOWN;
1234                 list_add(&ie->list, &cache->unknown);
1235         }
1236
1237 update:
1238         if (name_known && ie->name_state != NAME_KNOWN &&
1239             ie->name_state != NAME_PENDING) {
1240                 ie->name_state = NAME_KNOWN;
1241                 list_del(&ie->list);
1242         }
1243
1244         memcpy(&ie->data, data, sizeof(*data));
1245         ie->timestamp = jiffies;
1246         cache->timestamp = jiffies;
1247
1248         if (ie->name_state == NAME_NOT_KNOWN)
1249                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1250
1251 done:
1252         return flags;
1253 }
1254
1255 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1256 {
1257         struct discovery_state *cache = &hdev->discovery;
1258         struct inquiry_info *info = (struct inquiry_info *) buf;
1259         struct inquiry_entry *e;
1260         int copied = 0;
1261
1262         list_for_each_entry(e, &cache->all, all) {
1263                 struct inquiry_data *data = &e->data;
1264
1265                 if (copied >= num)
1266                         break;
1267
1268                 bacpy(&info->bdaddr, &data->bdaddr);
1269                 info->pscan_rep_mode    = data->pscan_rep_mode;
1270                 info->pscan_period_mode = data->pscan_period_mode;
1271                 info->pscan_mode        = data->pscan_mode;
1272                 memcpy(info->dev_class, data->dev_class, 3);
1273                 info->clock_offset      = data->clock_offset;
1274
1275                 info++;
1276                 copied++;
1277         }
1278
1279         BT_DBG("cache %p, copied %d", cache, copied);
1280         return copied;
1281 }
1282
1283 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1284 {
1285         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1286         struct hci_dev *hdev = req->hdev;
1287         struct hci_cp_inquiry cp;
1288
1289         BT_DBG("%s", hdev->name);
1290
1291         if (test_bit(HCI_INQUIRY, &hdev->flags))
1292                 return 0;
1293
1294         /* Start Inquiry */
1295         memcpy(&cp.lap, &ir->lap, 3);
1296         cp.length  = ir->length;
1297         cp.num_rsp = ir->num_rsp;
1298         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1299
1300         return 0;
1301 }
1302
1303 int hci_inquiry(void __user *arg)
1304 {
1305         __u8 __user *ptr = arg;
1306         struct hci_inquiry_req ir;
1307         struct hci_dev *hdev;
1308         int err = 0, do_inquiry = 0, max_rsp;
1309         long timeo;
1310         __u8 *buf;
1311
1312         if (copy_from_user(&ir, ptr, sizeof(ir)))
1313                 return -EFAULT;
1314
1315         hdev = hci_dev_get(ir.dev_id);
1316         if (!hdev)
1317                 return -ENODEV;
1318
1319         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1320                 err = -EBUSY;
1321                 goto done;
1322         }
1323
1324         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1325                 err = -EOPNOTSUPP;
1326                 goto done;
1327         }
1328
1329         if (hdev->dev_type != HCI_PRIMARY) {
1330                 err = -EOPNOTSUPP;
1331                 goto done;
1332         }
1333
1334         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1335                 err = -EOPNOTSUPP;
1336                 goto done;
1337         }
1338
1339         /* Restrict maximum inquiry length to 60 seconds */
1340         if (ir.length > 60) {
1341                 err = -EINVAL;
1342                 goto done;
1343         }
1344
1345         hci_dev_lock(hdev);
1346         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1347             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1348                 hci_inquiry_cache_flush(hdev);
1349                 do_inquiry = 1;
1350         }
1351         hci_dev_unlock(hdev);
1352
1353         timeo = ir.length * msecs_to_jiffies(2000);
1354
1355         if (do_inquiry) {
1356                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1357                                    timeo, NULL);
1358                 if (err < 0)
1359                         goto done;
1360
1361                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1362                  * cleared). If it is interrupted by a signal, return -EINTR.
1363                  */
1364                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1365                                 TASK_INTERRUPTIBLE)) {
1366                         err = -EINTR;
1367                         goto done;
1368                 }
1369         }
1370
1371         /* for unlimited number of responses we will use buffer with
1372          * 255 entries
1373          */
1374         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1375
1376         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1377          * copy it to the user space.
1378          */
1379         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1380         if (!buf) {
1381                 err = -ENOMEM;
1382                 goto done;
1383         }
1384
1385         hci_dev_lock(hdev);
1386         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1387         hci_dev_unlock(hdev);
1388
1389         BT_DBG("num_rsp %d", ir.num_rsp);
1390
1391         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1392                 ptr += sizeof(ir);
1393                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1394                                  ir.num_rsp))
1395                         err = -EFAULT;
1396         } else
1397                 err = -EFAULT;
1398
1399         kfree(buf);
1400
1401 done:
1402         hci_dev_put(hdev);
1403         return err;
1404 }
1405
1406 /**
1407  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1408  *                                     (BD_ADDR) for a HCI device from
1409  *                                     a firmware node property.
1410  * @hdev:       The HCI device
1411  *
1412  * Search the firmware node for 'local-bd-address'.
1413  *
1414  * All-zero BD addresses are rejected, because those could be properties
1415  * that exist in the firmware tables, but were not updated by the firmware. For
1416  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1417  */
1418 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1419 {
1420         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1421         bdaddr_t ba;
1422         int ret;
1423
1424         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1425                                             (u8 *)&ba, sizeof(ba));
1426         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1427                 return;
1428
1429         bacpy(&hdev->public_addr, &ba);
1430 }
1431
1432 static int hci_dev_do_open(struct hci_dev *hdev)
1433 {
1434         int ret = 0;
1435
1436         BT_DBG("%s %p", hdev->name, hdev);
1437
1438         hci_req_sync_lock(hdev);
1439
1440         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1441                 ret = -ENODEV;
1442                 goto done;
1443         }
1444
1445         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1446             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1447                 /* Check for rfkill but allow the HCI setup stage to
1448                  * proceed (which in itself doesn't cause any RF activity).
1449                  */
1450                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1451                         ret = -ERFKILL;
1452                         goto done;
1453                 }
1454
1455                 /* Check for valid public address or a configured static
1456                  * random adddress, but let the HCI setup proceed to
1457                  * be able to determine if there is a public address
1458                  * or not.
1459                  *
1460                  * In case of user channel usage, it is not important
1461                  * if a public address or static random address is
1462                  * available.
1463                  *
1464                  * This check is only valid for BR/EDR controllers
1465                  * since AMP controllers do not have an address.
1466                  */
1467                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1468                     hdev->dev_type == HCI_PRIMARY &&
1469                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1470                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1471                         ret = -EADDRNOTAVAIL;
1472                         goto done;
1473                 }
1474         }
1475
1476         if (test_bit(HCI_UP, &hdev->flags)) {
1477                 ret = -EALREADY;
1478                 goto done;
1479         }
1480
1481         if (hdev->open(hdev)) {
1482                 ret = -EIO;
1483                 goto done;
1484         }
1485
1486         set_bit(HCI_RUNNING, &hdev->flags);
1487         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1488
1489         atomic_set(&hdev->cmd_cnt, 1);
1490         set_bit(HCI_INIT, &hdev->flags);
1491
1492         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1493             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1494                 bool invalid_bdaddr;
1495
1496                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1497
1498                 if (hdev->setup)
1499                         ret = hdev->setup(hdev);
1500
1501                 /* The transport driver can set the quirk to mark the
1502                  * BD_ADDR invalid before creating the HCI device or in
1503                  * its setup callback.
1504                  */
1505                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1506                                           &hdev->quirks);
1507
1508                 if (ret)
1509                         goto setup_failed;
1510
1511                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1512                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1513                                 hci_dev_get_bd_addr_from_property(hdev);
1514
1515                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1516                             hdev->set_bdaddr) {
1517                                 ret = hdev->set_bdaddr(hdev,
1518                                                        &hdev->public_addr);
1519
1520                                 /* If setting of the BD_ADDR from the device
1521                                  * property succeeds, then treat the address
1522                                  * as valid even if the invalid BD_ADDR
1523                                  * quirk indicates otherwise.
1524                                  */
1525                                 if (!ret)
1526                                         invalid_bdaddr = false;
1527                         }
1528                 }
1529
1530 setup_failed:
1531                 /* The transport driver can set these quirks before
1532                  * creating the HCI device or in its setup callback.
1533                  *
1534                  * For the invalid BD_ADDR quirk it is possible that
1535                  * it becomes a valid address if the bootloader does
1536                  * provide it (see above).
1537                  *
1538                  * In case any of them is set, the controller has to
1539                  * start up as unconfigured.
1540                  */
1541                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1542                     invalid_bdaddr)
1543                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1544
1545                 /* For an unconfigured controller it is required to
1546                  * read at least the version information provided by
1547                  * the Read Local Version Information command.
1548                  *
1549                  * If the set_bdaddr driver callback is provided, then
1550                  * also the original Bluetooth public device address
1551                  * will be read using the Read BD Address command.
1552                  */
1553                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1554                         ret = __hci_unconf_init(hdev);
1555         }
1556
1557         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1558                 /* If public address change is configured, ensure that
1559                  * the address gets programmed. If the driver does not
1560                  * support changing the public address, fail the power
1561                  * on procedure.
1562                  */
1563                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1564                     hdev->set_bdaddr)
1565                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1566                 else
1567                         ret = -EADDRNOTAVAIL;
1568         }
1569
1570         if (!ret) {
1571                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1572                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1573                         ret = __hci_init(hdev);
1574                         if (!ret && hdev->post_init)
1575                                 ret = hdev->post_init(hdev);
1576                 }
1577         }
1578
1579         /* If the HCI Reset command is clearing all diagnostic settings,
1580          * then they need to be reprogrammed after the init procedure
1581          * completed.
1582          */
1583         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1584             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1585             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1586                 ret = hdev->set_diag(hdev, true);
1587
1588         msft_do_open(hdev);
1589
1590         clear_bit(HCI_INIT, &hdev->flags);
1591
1592         if (!ret) {
1593                 hci_dev_hold(hdev);
1594                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1595                 hci_adv_instances_set_rpa_expired(hdev, true);
1596                 set_bit(HCI_UP, &hdev->flags);
1597                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1598                 hci_leds_update_powered(hdev, true);
1599                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1600                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1601                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1602                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1603                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1604                     hdev->dev_type == HCI_PRIMARY) {
1605                         ret = __hci_req_hci_power_on(hdev);
1606                         mgmt_power_on(hdev, ret);
1607                 }
1608         } else {
1609                 /* Init failed, cleanup */
1610                 flush_work(&hdev->tx_work);
1611
1612                 /* Since hci_rx_work() is possible to awake new cmd_work
1613                  * it should be flushed first to avoid unexpected call of
1614                  * hci_cmd_work()
1615                  */
1616                 flush_work(&hdev->rx_work);
1617                 flush_work(&hdev->cmd_work);
1618
1619                 skb_queue_purge(&hdev->cmd_q);
1620                 skb_queue_purge(&hdev->rx_q);
1621
1622                 if (hdev->flush)
1623                         hdev->flush(hdev);
1624
1625                 if (hdev->sent_cmd) {
1626                         kfree_skb(hdev->sent_cmd);
1627                         hdev->sent_cmd = NULL;
1628                 }
1629
1630                 clear_bit(HCI_RUNNING, &hdev->flags);
1631                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1632
1633                 hdev->close(hdev);
1634                 hdev->flags &= BIT(HCI_RAW);
1635         }
1636
1637 done:
1638         hci_req_sync_unlock(hdev);
1639         return ret;
1640 }
1641
1642 /* ---- HCI ioctl helpers ---- */
1643
1644 int hci_dev_open(__u16 dev)
1645 {
1646         struct hci_dev *hdev;
1647         int err;
1648
1649         hdev = hci_dev_get(dev);
1650         if (!hdev)
1651                 return -ENODEV;
1652
1653         /* Devices that are marked as unconfigured can only be powered
1654          * up as user channel. Trying to bring them up as normal devices
1655          * will result into a failure. Only user channel operation is
1656          * possible.
1657          *
1658          * When this function is called for a user channel, the flag
1659          * HCI_USER_CHANNEL will be set first before attempting to
1660          * open the device.
1661          */
1662         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1663             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1664                 err = -EOPNOTSUPP;
1665                 goto done;
1666         }
1667
1668         /* We need to ensure that no other power on/off work is pending
1669          * before proceeding to call hci_dev_do_open. This is
1670          * particularly important if the setup procedure has not yet
1671          * completed.
1672          */
1673         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1674                 cancel_delayed_work(&hdev->power_off);
1675
1676         /* After this call it is guaranteed that the setup procedure
1677          * has finished. This means that error conditions like RFKILL
1678          * or no valid public or static random address apply.
1679          */
1680         flush_workqueue(hdev->req_workqueue);
1681
1682         /* For controllers not using the management interface and that
1683          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1684          * so that pairing works for them. Once the management interface
1685          * is in use this bit will be cleared again and userspace has
1686          * to explicitly enable it.
1687          */
1688         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1689             !hci_dev_test_flag(hdev, HCI_MGMT))
1690                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1691
1692         err = hci_dev_do_open(hdev);
1693
1694 done:
1695         hci_dev_put(hdev);
1696         return err;
1697 }
1698
1699 /* This function requires the caller holds hdev->lock */
1700 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1701 {
1702         struct hci_conn_params *p;
1703
1704         list_for_each_entry(p, &hdev->le_conn_params, list) {
1705                 if (p->conn) {
1706                         hci_conn_drop(p->conn);
1707                         hci_conn_put(p->conn);
1708                         p->conn = NULL;
1709                 }
1710                 list_del_init(&p->action);
1711         }
1712
1713         BT_DBG("All LE pending actions cleared");
1714 }
1715
1716 int hci_dev_do_close(struct hci_dev *hdev)
1717 {
1718         bool auto_off;
1719
1720         BT_DBG("%s %p", hdev->name, hdev);
1721
1722         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1723             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1724             test_bit(HCI_UP, &hdev->flags)) {
1725                 /* Execute vendor specific shutdown routine */
1726                 if (hdev->shutdown)
1727                         hdev->shutdown(hdev);
1728         }
1729
1730         cancel_delayed_work(&hdev->power_off);
1731
1732         hci_request_cancel_all(hdev);
1733         hci_req_sync_lock(hdev);
1734
1735         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1736                 cancel_delayed_work_sync(&hdev->cmd_timer);
1737                 hci_req_sync_unlock(hdev);
1738                 return 0;
1739         }
1740
1741         hci_leds_update_powered(hdev, false);
1742
1743         /* Flush RX and TX works */
1744         flush_work(&hdev->tx_work);
1745         flush_work(&hdev->rx_work);
1746
1747         if (hdev->discov_timeout > 0) {
1748                 hdev->discov_timeout = 0;
1749                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1750                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1751         }
1752
1753         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1754                 cancel_delayed_work(&hdev->service_cache);
1755
1756         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1757                 struct adv_info *adv_instance;
1758
1759                 cancel_delayed_work_sync(&hdev->rpa_expired);
1760
1761                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1762                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1763         }
1764
1765         /* Avoid potential lockdep warnings from the *_flush() calls by
1766          * ensuring the workqueue is empty up front.
1767          */
1768         drain_workqueue(hdev->workqueue);
1769
1770         hci_dev_lock(hdev);
1771
1772         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1773
1774         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1775
1776         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1777             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1778             hci_dev_test_flag(hdev, HCI_MGMT))
1779                 __mgmt_power_off(hdev);
1780
1781         hci_inquiry_cache_flush(hdev);
1782         hci_pend_le_actions_clear(hdev);
1783         hci_conn_hash_flush(hdev);
1784         hci_dev_unlock(hdev);
1785
1786         smp_unregister(hdev);
1787
1788         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1789
1790         msft_do_close(hdev);
1791
1792         if (hdev->flush)
1793                 hdev->flush(hdev);
1794
1795         /* Reset device */
1796         skb_queue_purge(&hdev->cmd_q);
1797         atomic_set(&hdev->cmd_cnt, 1);
1798         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1799             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1800                 set_bit(HCI_INIT, &hdev->flags);
1801                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1802                 clear_bit(HCI_INIT, &hdev->flags);
1803         }
1804
1805         /* flush cmd  work */
1806         flush_work(&hdev->cmd_work);
1807
1808         /* Drop queues */
1809         skb_queue_purge(&hdev->rx_q);
1810         skb_queue_purge(&hdev->cmd_q);
1811         skb_queue_purge(&hdev->raw_q);
1812
1813         /* Drop last sent command */
1814         if (hdev->sent_cmd) {
1815                 cancel_delayed_work_sync(&hdev->cmd_timer);
1816                 kfree_skb(hdev->sent_cmd);
1817                 hdev->sent_cmd = NULL;
1818         }
1819
1820         clear_bit(HCI_RUNNING, &hdev->flags);
1821         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1822
1823         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1824                 wake_up(&hdev->suspend_wait_q);
1825
1826         /* After this point our queues are empty
1827          * and no tasks are scheduled. */
1828         hdev->close(hdev);
1829
1830         /* Clear flags */
1831         hdev->flags &= BIT(HCI_RAW);
1832         hci_dev_clear_volatile_flags(hdev);
1833
1834         /* Controller radio is available but is currently powered down */
1835         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1836
1837         memset(hdev->eir, 0, sizeof(hdev->eir));
1838         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1839         bacpy(&hdev->random_addr, BDADDR_ANY);
1840
1841         hci_req_sync_unlock(hdev);
1842
1843         hci_dev_put(hdev);
1844         return 0;
1845 }
1846
1847 int hci_dev_close(__u16 dev)
1848 {
1849         struct hci_dev *hdev;
1850         int err;
1851
1852         hdev = hci_dev_get(dev);
1853         if (!hdev)
1854                 return -ENODEV;
1855
1856         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1857                 err = -EBUSY;
1858                 goto done;
1859         }
1860
1861         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1862                 cancel_delayed_work(&hdev->power_off);
1863
1864         err = hci_dev_do_close(hdev);
1865
1866 done:
1867         hci_dev_put(hdev);
1868         return err;
1869 }
1870
1871 static int hci_dev_do_reset(struct hci_dev *hdev)
1872 {
1873         int ret;
1874
1875         BT_DBG("%s %p", hdev->name, hdev);
1876
1877         hci_req_sync_lock(hdev);
1878
1879         /* Drop queues */
1880         skb_queue_purge(&hdev->rx_q);
1881         skb_queue_purge(&hdev->cmd_q);
1882
1883         /* Avoid potential lockdep warnings from the *_flush() calls by
1884          * ensuring the workqueue is empty up front.
1885          */
1886         drain_workqueue(hdev->workqueue);
1887
1888         hci_dev_lock(hdev);
1889         hci_inquiry_cache_flush(hdev);
1890         hci_conn_hash_flush(hdev);
1891         hci_dev_unlock(hdev);
1892
1893         if (hdev->flush)
1894                 hdev->flush(hdev);
1895
1896         atomic_set(&hdev->cmd_cnt, 1);
1897         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1898
1899         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1900
1901         hci_req_sync_unlock(hdev);
1902         return ret;
1903 }
1904
1905 int hci_dev_reset(__u16 dev)
1906 {
1907         struct hci_dev *hdev;
1908         int err;
1909
1910         hdev = hci_dev_get(dev);
1911         if (!hdev)
1912                 return -ENODEV;
1913
1914         if (!test_bit(HCI_UP, &hdev->flags)) {
1915                 err = -ENETDOWN;
1916                 goto done;
1917         }
1918
1919         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1920                 err = -EBUSY;
1921                 goto done;
1922         }
1923
1924         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1925                 err = -EOPNOTSUPP;
1926                 goto done;
1927         }
1928
1929         err = hci_dev_do_reset(hdev);
1930
1931 done:
1932         hci_dev_put(hdev);
1933         return err;
1934 }
1935
1936 int hci_dev_reset_stat(__u16 dev)
1937 {
1938         struct hci_dev *hdev;
1939         int ret = 0;
1940
1941         hdev = hci_dev_get(dev);
1942         if (!hdev)
1943                 return -ENODEV;
1944
1945         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1946                 ret = -EBUSY;
1947                 goto done;
1948         }
1949
1950         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1951                 ret = -EOPNOTSUPP;
1952                 goto done;
1953         }
1954
1955         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1956
1957 done:
1958         hci_dev_put(hdev);
1959         return ret;
1960 }
1961
1962 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1963 {
1964         bool conn_changed, discov_changed;
1965
1966         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1967
1968         if ((scan & SCAN_PAGE))
1969                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1970                                                           HCI_CONNECTABLE);
1971         else
1972                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1973                                                            HCI_CONNECTABLE);
1974
1975         if ((scan & SCAN_INQUIRY)) {
1976                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1977                                                             HCI_DISCOVERABLE);
1978         } else {
1979                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1980                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1981                                                              HCI_DISCOVERABLE);
1982         }
1983
1984         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1985                 return;
1986
1987         if (conn_changed || discov_changed) {
1988                 /* In case this was disabled through mgmt */
1989                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1990
1991                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1992                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1993
1994                 mgmt_new_settings(hdev);
1995         }
1996 }
1997
1998 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1999 {
2000         struct hci_dev *hdev;
2001         struct hci_dev_req dr;
2002         int err = 0;
2003
2004         if (copy_from_user(&dr, arg, sizeof(dr)))
2005                 return -EFAULT;
2006
2007         hdev = hci_dev_get(dr.dev_id);
2008         if (!hdev)
2009                 return -ENODEV;
2010
2011         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2012                 err = -EBUSY;
2013                 goto done;
2014         }
2015
2016         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2017                 err = -EOPNOTSUPP;
2018                 goto done;
2019         }
2020
2021         if (hdev->dev_type != HCI_PRIMARY) {
2022                 err = -EOPNOTSUPP;
2023                 goto done;
2024         }
2025
2026         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2027                 err = -EOPNOTSUPP;
2028                 goto done;
2029         }
2030
2031         switch (cmd) {
2032         case HCISETAUTH:
2033                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2034                                    HCI_INIT_TIMEOUT, NULL);
2035                 break;
2036
2037         case HCISETENCRYPT:
2038                 if (!lmp_encrypt_capable(hdev)) {
2039                         err = -EOPNOTSUPP;
2040                         break;
2041                 }
2042
2043                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2044                         /* Auth must be enabled first */
2045                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2046                                            HCI_INIT_TIMEOUT, NULL);
2047                         if (err)
2048                                 break;
2049                 }
2050
2051                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2052                                    HCI_INIT_TIMEOUT, NULL);
2053                 break;
2054
2055         case HCISETSCAN:
2056                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2057                                    HCI_INIT_TIMEOUT, NULL);
2058
2059                 /* Ensure that the connectable and discoverable states
2060                  * get correctly modified as this was a non-mgmt change.
2061                  */
2062                 if (!err)
2063                         hci_update_scan_state(hdev, dr.dev_opt);
2064                 break;
2065
2066         case HCISETLINKPOL:
2067                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2068                                    HCI_INIT_TIMEOUT, NULL);
2069                 break;
2070
2071         case HCISETLINKMODE:
2072                 hdev->link_mode = ((__u16) dr.dev_opt) &
2073                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2074                 break;
2075
2076         case HCISETPTYPE:
2077                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2078                         break;
2079
2080                 hdev->pkt_type = (__u16) dr.dev_opt;
2081                 mgmt_phy_configuration_changed(hdev, NULL);
2082                 break;
2083
2084         case HCISETACLMTU:
2085                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2086                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2087                 break;
2088
2089         case HCISETSCOMTU:
2090                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2091                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2092                 break;
2093
2094         default:
2095                 err = -EINVAL;
2096                 break;
2097         }
2098
2099 done:
2100         hci_dev_put(hdev);
2101         return err;
2102 }
2103
2104 int hci_get_dev_list(void __user *arg)
2105 {
2106         struct hci_dev *hdev;
2107         struct hci_dev_list_req *dl;
2108         struct hci_dev_req *dr;
2109         int n = 0, size, err;
2110         __u16 dev_num;
2111
2112         if (get_user(dev_num, (__u16 __user *) arg))
2113                 return -EFAULT;
2114
2115         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2116                 return -EINVAL;
2117
2118         size = sizeof(*dl) + dev_num * sizeof(*dr);
2119
2120         dl = kzalloc(size, GFP_KERNEL);
2121         if (!dl)
2122                 return -ENOMEM;
2123
2124         dr = dl->dev_req;
2125
2126         read_lock(&hci_dev_list_lock);
2127         list_for_each_entry(hdev, &hci_dev_list, list) {
2128                 unsigned long flags = hdev->flags;
2129
2130                 /* When the auto-off is configured it means the transport
2131                  * is running, but in that case still indicate that the
2132                  * device is actually down.
2133                  */
2134                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2135                         flags &= ~BIT(HCI_UP);
2136
2137                 (dr + n)->dev_id  = hdev->id;
2138                 (dr + n)->dev_opt = flags;
2139
2140                 if (++n >= dev_num)
2141                         break;
2142         }
2143         read_unlock(&hci_dev_list_lock);
2144
2145         dl->dev_num = n;
2146         size = sizeof(*dl) + n * sizeof(*dr);
2147
2148         err = copy_to_user(arg, dl, size);
2149         kfree(dl);
2150
2151         return err ? -EFAULT : 0;
2152 }
2153
2154 int hci_get_dev_info(void __user *arg)
2155 {
2156         struct hci_dev *hdev;
2157         struct hci_dev_info di;
2158         unsigned long flags;
2159         int err = 0;
2160
2161         if (copy_from_user(&di, arg, sizeof(di)))
2162                 return -EFAULT;
2163
2164         hdev = hci_dev_get(di.dev_id);
2165         if (!hdev)
2166                 return -ENODEV;
2167
2168         /* When the auto-off is configured it means the transport
2169          * is running, but in that case still indicate that the
2170          * device is actually down.
2171          */
2172         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2173                 flags = hdev->flags & ~BIT(HCI_UP);
2174         else
2175                 flags = hdev->flags;
2176
2177         strcpy(di.name, hdev->name);
2178         di.bdaddr   = hdev->bdaddr;
2179         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2180         di.flags    = flags;
2181         di.pkt_type = hdev->pkt_type;
2182         if (lmp_bredr_capable(hdev)) {
2183                 di.acl_mtu  = hdev->acl_mtu;
2184                 di.acl_pkts = hdev->acl_pkts;
2185                 di.sco_mtu  = hdev->sco_mtu;
2186                 di.sco_pkts = hdev->sco_pkts;
2187         } else {
2188                 di.acl_mtu  = hdev->le_mtu;
2189                 di.acl_pkts = hdev->le_pkts;
2190                 di.sco_mtu  = 0;
2191                 di.sco_pkts = 0;
2192         }
2193         di.link_policy = hdev->link_policy;
2194         di.link_mode   = hdev->link_mode;
2195
2196         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2197         memcpy(&di.features, &hdev->features, sizeof(di.features));
2198
2199         if (copy_to_user(arg, &di, sizeof(di)))
2200                 err = -EFAULT;
2201
2202         hci_dev_put(hdev);
2203
2204         return err;
2205 }
2206
2207 /* ---- Interface to HCI drivers ---- */
2208
2209 static int hci_rfkill_set_block(void *data, bool blocked)
2210 {
2211         struct hci_dev *hdev = data;
2212
2213         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2214
2215         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2216                 return -EBUSY;
2217
2218         if (blocked) {
2219                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2220                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2221                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2222                         hci_dev_do_close(hdev);
2223         } else {
2224                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2225         }
2226
2227         return 0;
2228 }
2229
2230 static const struct rfkill_ops hci_rfkill_ops = {
2231         .set_block = hci_rfkill_set_block,
2232 };
2233
2234 static void hci_power_on(struct work_struct *work)
2235 {
2236         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2237         int err;
2238
2239         BT_DBG("%s", hdev->name);
2240
2241         if (test_bit(HCI_UP, &hdev->flags) &&
2242             hci_dev_test_flag(hdev, HCI_MGMT) &&
2243             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2244                 cancel_delayed_work(&hdev->power_off);
2245                 hci_req_sync_lock(hdev);
2246                 err = __hci_req_hci_power_on(hdev);
2247                 hci_req_sync_unlock(hdev);
2248                 mgmt_power_on(hdev, err);
2249                 return;
2250         }
2251
2252         err = hci_dev_do_open(hdev);
2253         if (err < 0) {
2254                 hci_dev_lock(hdev);
2255                 mgmt_set_powered_failed(hdev, err);
2256                 hci_dev_unlock(hdev);
2257                 return;
2258         }
2259
2260         /* During the HCI setup phase, a few error conditions are
2261          * ignored and they need to be checked now. If they are still
2262          * valid, it is important to turn the device back off.
2263          */
2264         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2265             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2266             (hdev->dev_type == HCI_PRIMARY &&
2267              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2268              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2269                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2270                 hci_dev_do_close(hdev);
2271         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2272                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2273                                    HCI_AUTO_OFF_TIMEOUT);
2274         }
2275
2276         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2277                 /* For unconfigured devices, set the HCI_RAW flag
2278                  * so that userspace can easily identify them.
2279                  */
2280                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2281                         set_bit(HCI_RAW, &hdev->flags);
2282
2283                 /* For fully configured devices, this will send
2284                  * the Index Added event. For unconfigured devices,
2285                  * it will send Unconfigued Index Added event.
2286                  *
2287                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2288                  * and no event will be send.
2289                  */
2290                 mgmt_index_added(hdev);
2291         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2292                 /* When the controller is now configured, then it
2293                  * is important to clear the HCI_RAW flag.
2294                  */
2295                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2296                         clear_bit(HCI_RAW, &hdev->flags);
2297
2298                 /* Powering on the controller with HCI_CONFIG set only
2299                  * happens with the transition from unconfigured to
2300                  * configured. This will send the Index Added event.
2301                  */
2302                 mgmt_index_added(hdev);
2303         }
2304 }
2305
2306 static void hci_power_off(struct work_struct *work)
2307 {
2308         struct hci_dev *hdev = container_of(work, struct hci_dev,
2309                                             power_off.work);
2310
2311         BT_DBG("%s", hdev->name);
2312
2313         hci_dev_do_close(hdev);
2314 }
2315
2316 static void hci_error_reset(struct work_struct *work)
2317 {
2318         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2319
2320         BT_DBG("%s", hdev->name);
2321
2322         if (hdev->hw_error)
2323                 hdev->hw_error(hdev, hdev->hw_error_code);
2324         else
2325                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2326
2327         if (hci_dev_do_close(hdev))
2328                 return;
2329
2330         hci_dev_do_open(hdev);
2331 }
2332
2333 void hci_uuids_clear(struct hci_dev *hdev)
2334 {
2335         struct bt_uuid *uuid, *tmp;
2336
2337         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2338                 list_del(&uuid->list);
2339                 kfree(uuid);
2340         }
2341 }
2342
2343 void hci_link_keys_clear(struct hci_dev *hdev)
2344 {
2345         struct link_key *key;
2346
2347         list_for_each_entry(key, &hdev->link_keys, list) {
2348                 list_del_rcu(&key->list);
2349                 kfree_rcu(key, rcu);
2350         }
2351 }
2352
2353 void hci_smp_ltks_clear(struct hci_dev *hdev)
2354 {
2355         struct smp_ltk *k;
2356
2357         list_for_each_entry(k, &hdev->long_term_keys, list) {
2358                 list_del_rcu(&k->list);
2359                 kfree_rcu(k, rcu);
2360         }
2361 }
2362
2363 void hci_smp_irks_clear(struct hci_dev *hdev)
2364 {
2365         struct smp_irk *k;
2366
2367         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2368                 list_del_rcu(&k->list);
2369                 kfree_rcu(k, rcu);
2370         }
2371 }
2372
2373 void hci_blocked_keys_clear(struct hci_dev *hdev)
2374 {
2375         struct blocked_key *b;
2376
2377         list_for_each_entry(b, &hdev->blocked_keys, list) {
2378                 list_del_rcu(&b->list);
2379                 kfree_rcu(b, rcu);
2380         }
2381 }
2382
2383 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2384 {
2385         bool blocked = false;
2386         struct blocked_key *b;
2387
2388         rcu_read_lock();
2389         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2390                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2391                         blocked = true;
2392                         break;
2393                 }
2394         }
2395
2396         rcu_read_unlock();
2397         return blocked;
2398 }
2399
2400 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2401 {
2402         struct link_key *k;
2403
2404         rcu_read_lock();
2405         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2406                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2407                         rcu_read_unlock();
2408
2409                         if (hci_is_blocked_key(hdev,
2410                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2411                                                k->val)) {
2412                                 bt_dev_warn_ratelimited(hdev,
2413                                                         "Link key blocked for %pMR",
2414                                                         &k->bdaddr);
2415                                 return NULL;
2416                         }
2417
2418                         return k;
2419                 }
2420         }
2421         rcu_read_unlock();
2422
2423         return NULL;
2424 }
2425
2426 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2427                                u8 key_type, u8 old_key_type)
2428 {
2429         /* Legacy key */
2430         if (key_type < 0x03)
2431                 return true;
2432
2433         /* Debug keys are insecure so don't store them persistently */
2434         if (key_type == HCI_LK_DEBUG_COMBINATION)
2435                 return false;
2436
2437         /* Changed combination key and there's no previous one */
2438         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2439                 return false;
2440
2441         /* Security mode 3 case */
2442         if (!conn)
2443                 return true;
2444
2445         /* BR/EDR key derived using SC from an LE link */
2446         if (conn->type == LE_LINK)
2447                 return true;
2448
2449         /* Neither local nor remote side had no-bonding as requirement */
2450         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2451                 return true;
2452
2453         /* Local side had dedicated bonding as requirement */
2454         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2455                 return true;
2456
2457         /* Remote side had dedicated bonding as requirement */
2458         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2459                 return true;
2460
2461         /* If none of the above criteria match, then don't store the key
2462          * persistently */
2463         return false;
2464 }
2465
2466 static u8 ltk_role(u8 type)
2467 {
2468         if (type == SMP_LTK)
2469                 return HCI_ROLE_MASTER;
2470
2471         return HCI_ROLE_SLAVE;
2472 }
2473
2474 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2475                              u8 addr_type, u8 role)
2476 {
2477         struct smp_ltk *k;
2478
2479         rcu_read_lock();
2480         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2481                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2482                         continue;
2483
2484                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2485                         rcu_read_unlock();
2486
2487                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2488                                                k->val)) {
2489                                 bt_dev_warn_ratelimited(hdev,
2490                                                         "LTK blocked for %pMR",
2491                                                         &k->bdaddr);
2492                                 return NULL;
2493                         }
2494
2495                         return k;
2496                 }
2497         }
2498         rcu_read_unlock();
2499
2500         return NULL;
2501 }
2502
2503 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2504 {
2505         struct smp_irk *irk_to_return = NULL;
2506         struct smp_irk *irk;
2507
2508         rcu_read_lock();
2509         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2510                 if (!bacmp(&irk->rpa, rpa)) {
2511                         irk_to_return = irk;
2512                         goto done;
2513                 }
2514         }
2515
2516         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2517                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2518                         bacpy(&irk->rpa, rpa);
2519                         irk_to_return = irk;
2520                         goto done;
2521                 }
2522         }
2523
2524 done:
2525         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2526                                                 irk_to_return->val)) {
2527                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2528                                         &irk_to_return->bdaddr);
2529                 irk_to_return = NULL;
2530         }
2531
2532         rcu_read_unlock();
2533
2534         return irk_to_return;
2535 }
2536
2537 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2538                                      u8 addr_type)
2539 {
2540         struct smp_irk *irk_to_return = NULL;
2541         struct smp_irk *irk;
2542
2543         /* Identity Address must be public or static random */
2544         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2545                 return NULL;
2546
2547         rcu_read_lock();
2548         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2549                 if (addr_type == irk->addr_type &&
2550                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2551                         irk_to_return = irk;
2552                         goto done;
2553                 }
2554         }
2555
2556 done:
2557
2558         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2559                                                 irk_to_return->val)) {
2560                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2561                                         &irk_to_return->bdaddr);
2562                 irk_to_return = NULL;
2563         }
2564
2565         rcu_read_unlock();
2566
2567         return irk_to_return;
2568 }
2569
2570 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2571                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2572                                   u8 pin_len, bool *persistent)
2573 {
2574         struct link_key *key, *old_key;
2575         u8 old_key_type;
2576
2577         old_key = hci_find_link_key(hdev, bdaddr);
2578         if (old_key) {
2579                 old_key_type = old_key->type;
2580                 key = old_key;
2581         } else {
2582                 old_key_type = conn ? conn->key_type : 0xff;
2583                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2584                 if (!key)
2585                         return NULL;
2586                 list_add_rcu(&key->list, &hdev->link_keys);
2587         }
2588
2589         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2590
2591         /* Some buggy controller combinations generate a changed
2592          * combination key for legacy pairing even when there's no
2593          * previous key */
2594         if (type == HCI_LK_CHANGED_COMBINATION &&
2595             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2596                 type = HCI_LK_COMBINATION;
2597                 if (conn)
2598                         conn->key_type = type;
2599         }
2600
2601         bacpy(&key->bdaddr, bdaddr);
2602         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2603         key->pin_len = pin_len;
2604
2605         if (type == HCI_LK_CHANGED_COMBINATION)
2606                 key->type = old_key_type;
2607         else
2608                 key->type = type;
2609
2610         if (persistent)
2611                 *persistent = hci_persistent_key(hdev, conn, type,
2612                                                  old_key_type);
2613
2614         return key;
2615 }
2616
2617 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2618                             u8 addr_type, u8 type, u8 authenticated,
2619                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2620 {
2621         struct smp_ltk *key, *old_key;
2622         u8 role = ltk_role(type);
2623
2624         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2625         if (old_key)
2626                 key = old_key;
2627         else {
2628                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2629                 if (!key)
2630                         return NULL;
2631                 list_add_rcu(&key->list, &hdev->long_term_keys);
2632         }
2633
2634         bacpy(&key->bdaddr, bdaddr);
2635         key->bdaddr_type = addr_type;
2636         memcpy(key->val, tk, sizeof(key->val));
2637         key->authenticated = authenticated;
2638         key->ediv = ediv;
2639         key->rand = rand;
2640         key->enc_size = enc_size;
2641         key->type = type;
2642
2643         return key;
2644 }
2645
2646 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2647                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2648 {
2649         struct smp_irk *irk;
2650
2651         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2652         if (!irk) {
2653                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2654                 if (!irk)
2655                         return NULL;
2656
2657                 bacpy(&irk->bdaddr, bdaddr);
2658                 irk->addr_type = addr_type;
2659
2660                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2661         }
2662
2663         memcpy(irk->val, val, 16);
2664         bacpy(&irk->rpa, rpa);
2665
2666         return irk;
2667 }
2668
2669 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2670 {
2671         struct link_key *key;
2672
2673         key = hci_find_link_key(hdev, bdaddr);
2674         if (!key)
2675                 return -ENOENT;
2676
2677         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2678
2679         list_del_rcu(&key->list);
2680         kfree_rcu(key, rcu);
2681
2682         return 0;
2683 }
2684
2685 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2686 {
2687         struct smp_ltk *k;
2688         int removed = 0;
2689
2690         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2691                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2692                         continue;
2693
2694                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2695
2696                 list_del_rcu(&k->list);
2697                 kfree_rcu(k, rcu);
2698                 removed++;
2699         }
2700
2701         return removed ? 0 : -ENOENT;
2702 }
2703
2704 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2705 {
2706         struct smp_irk *k;
2707
2708         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2709                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2710                         continue;
2711
2712                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2713
2714                 list_del_rcu(&k->list);
2715                 kfree_rcu(k, rcu);
2716         }
2717 }
2718
2719 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2720 {
2721         struct smp_ltk *k;
2722         struct smp_irk *irk;
2723         u8 addr_type;
2724
2725         if (type == BDADDR_BREDR) {
2726                 if (hci_find_link_key(hdev, bdaddr))
2727                         return true;
2728                 return false;
2729         }
2730
2731         /* Convert to HCI addr type which struct smp_ltk uses */
2732         if (type == BDADDR_LE_PUBLIC)
2733                 addr_type = ADDR_LE_DEV_PUBLIC;
2734         else
2735                 addr_type = ADDR_LE_DEV_RANDOM;
2736
2737         irk = hci_get_irk(hdev, bdaddr, addr_type);
2738         if (irk) {
2739                 bdaddr = &irk->bdaddr;
2740                 addr_type = irk->addr_type;
2741         }
2742
2743         rcu_read_lock();
2744         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2745                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2746                         rcu_read_unlock();
2747                         return true;
2748                 }
2749         }
2750         rcu_read_unlock();
2751
2752         return false;
2753 }
2754
2755 /* HCI command timer function */
2756 static void hci_cmd_timeout(struct work_struct *work)
2757 {
2758         struct hci_dev *hdev = container_of(work, struct hci_dev,
2759                                             cmd_timer.work);
2760
2761         if (hdev->sent_cmd) {
2762                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2763                 u16 opcode = __le16_to_cpu(sent->opcode);
2764
2765                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2766         } else {
2767                 bt_dev_err(hdev, "command tx timeout");
2768         }
2769
2770         if (hdev->cmd_timeout)
2771                 hdev->cmd_timeout(hdev);
2772
2773         atomic_set(&hdev->cmd_cnt, 1);
2774         queue_work(hdev->workqueue, &hdev->cmd_work);
2775 }
2776
2777 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2778                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2779 {
2780         struct oob_data *data;
2781
2782         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2783                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2784                         continue;
2785                 if (data->bdaddr_type != bdaddr_type)
2786                         continue;
2787                 return data;
2788         }
2789
2790         return NULL;
2791 }
2792
2793 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2794                                u8 bdaddr_type)
2795 {
2796         struct oob_data *data;
2797
2798         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2799         if (!data)
2800                 return -ENOENT;
2801
2802         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2803
2804         list_del(&data->list);
2805         kfree(data);
2806
2807         return 0;
2808 }
2809
2810 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2811 {
2812         struct oob_data *data, *n;
2813
2814         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2815                 list_del(&data->list);
2816                 kfree(data);
2817         }
2818 }
2819
2820 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2821                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2822                             u8 *hash256, u8 *rand256)
2823 {
2824         struct oob_data *data;
2825
2826         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2827         if (!data) {
2828                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2829                 if (!data)
2830                         return -ENOMEM;
2831
2832                 bacpy(&data->bdaddr, bdaddr);
2833                 data->bdaddr_type = bdaddr_type;
2834                 list_add(&data->list, &hdev->remote_oob_data);
2835         }
2836
2837         if (hash192 && rand192) {
2838                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2839                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2840                 if (hash256 && rand256)
2841                         data->present = 0x03;
2842         } else {
2843                 memset(data->hash192, 0, sizeof(data->hash192));
2844                 memset(data->rand192, 0, sizeof(data->rand192));
2845                 if (hash256 && rand256)
2846                         data->present = 0x02;
2847                 else
2848                         data->present = 0x00;
2849         }
2850
2851         if (hash256 && rand256) {
2852                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2853                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2854         } else {
2855                 memset(data->hash256, 0, sizeof(data->hash256));
2856                 memset(data->rand256, 0, sizeof(data->rand256));
2857                 if (hash192 && rand192)
2858                         data->present = 0x01;
2859         }
2860
2861         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2862
2863         return 0;
2864 }
2865
2866 /* This function requires the caller holds hdev->lock */
2867 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2868 {
2869         struct adv_info *adv_instance;
2870
2871         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2872                 if (adv_instance->instance == instance)
2873                         return adv_instance;
2874         }
2875
2876         return NULL;
2877 }
2878
2879 /* This function requires the caller holds hdev->lock */
2880 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2881 {
2882         struct adv_info *cur_instance;
2883
2884         cur_instance = hci_find_adv_instance(hdev, instance);
2885         if (!cur_instance)
2886                 return NULL;
2887
2888         if (cur_instance == list_last_entry(&hdev->adv_instances,
2889                                             struct adv_info, list))
2890                 return list_first_entry(&hdev->adv_instances,
2891                                                  struct adv_info, list);
2892         else
2893                 return list_next_entry(cur_instance, list);
2894 }
2895
2896 /* This function requires the caller holds hdev->lock */
2897 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2898 {
2899         struct adv_info *adv_instance;
2900
2901         adv_instance = hci_find_adv_instance(hdev, instance);
2902         if (!adv_instance)
2903                 return -ENOENT;
2904
2905         BT_DBG("%s removing %dMR", hdev->name, instance);
2906
2907         if (hdev->cur_adv_instance == instance) {
2908                 if (hdev->adv_instance_timeout) {
2909                         cancel_delayed_work(&hdev->adv_instance_expire);
2910                         hdev->adv_instance_timeout = 0;
2911                 }
2912                 hdev->cur_adv_instance = 0x00;
2913         }
2914
2915         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2916
2917         list_del(&adv_instance->list);
2918         kfree(adv_instance);
2919
2920         hdev->adv_instance_cnt--;
2921
2922         return 0;
2923 }
2924
2925 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2926 {
2927         struct adv_info *adv_instance, *n;
2928
2929         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2930                 adv_instance->rpa_expired = rpa_expired;
2931 }
2932
2933 /* This function requires the caller holds hdev->lock */
2934 void hci_adv_instances_clear(struct hci_dev *hdev)
2935 {
2936         struct adv_info *adv_instance, *n;
2937
2938         if (hdev->adv_instance_timeout) {
2939                 cancel_delayed_work(&hdev->adv_instance_expire);
2940                 hdev->adv_instance_timeout = 0;
2941         }
2942
2943         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2944                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2945                 list_del(&adv_instance->list);
2946                 kfree(adv_instance);
2947         }
2948
2949         hdev->adv_instance_cnt = 0;
2950         hdev->cur_adv_instance = 0x00;
2951 }
2952
2953 static void adv_instance_rpa_expired(struct work_struct *work)
2954 {
2955         struct adv_info *adv_instance = container_of(work, struct adv_info,
2956                                                      rpa_expired_cb.work);
2957
2958         BT_DBG("");
2959
2960         adv_instance->rpa_expired = true;
2961 }
2962
2963 /* This function requires the caller holds hdev->lock */
2964 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2965                          u16 adv_data_len, u8 *adv_data,
2966                          u16 scan_rsp_len, u8 *scan_rsp_data,
2967                          u16 timeout, u16 duration)
2968 {
2969         struct adv_info *adv_instance;
2970
2971         adv_instance = hci_find_adv_instance(hdev, instance);
2972         if (adv_instance) {
2973                 memset(adv_instance->adv_data, 0,
2974                        sizeof(adv_instance->adv_data));
2975                 memset(adv_instance->scan_rsp_data, 0,
2976                        sizeof(adv_instance->scan_rsp_data));
2977         } else {
2978                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2979                     instance < 1 || instance > hdev->le_num_of_adv_sets)
2980                         return -EOVERFLOW;
2981
2982                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2983                 if (!adv_instance)
2984                         return -ENOMEM;
2985
2986                 adv_instance->pending = true;
2987                 adv_instance->instance = instance;
2988                 list_add(&adv_instance->list, &hdev->adv_instances);
2989                 hdev->adv_instance_cnt++;
2990         }
2991
2992         adv_instance->flags = flags;
2993         adv_instance->adv_data_len = adv_data_len;
2994         adv_instance->scan_rsp_len = scan_rsp_len;
2995
2996         if (adv_data_len)
2997                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2998
2999         if (scan_rsp_len)
3000                 memcpy(adv_instance->scan_rsp_data,
3001                        scan_rsp_data, scan_rsp_len);
3002
3003         adv_instance->timeout = timeout;
3004         adv_instance->remaining_time = timeout;
3005
3006         if (duration == 0)
3007                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3008         else
3009                 adv_instance->duration = duration;
3010
3011         adv_instance->tx_power = HCI_TX_POWER_INVALID;
3012
3013         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3014                           adv_instance_rpa_expired);
3015
3016         BT_DBG("%s for %dMR", hdev->name, instance);
3017
3018         return 0;
3019 }
3020
3021 /* This function requires the caller holds hdev->lock */
3022 void hci_adv_monitors_clear(struct hci_dev *hdev)
3023 {
3024         struct adv_monitor *monitor;
3025         int handle;
3026
3027         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3028                 hci_free_adv_monitor(monitor);
3029
3030         idr_destroy(&hdev->adv_monitors_idr);
3031 }
3032
3033 void hci_free_adv_monitor(struct adv_monitor *monitor)
3034 {
3035         struct adv_pattern *pattern;
3036         struct adv_pattern *tmp;
3037
3038         if (!monitor)
3039                 return;
3040
3041         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list)
3042                 kfree(pattern);
3043
3044         kfree(monitor);
3045 }
3046
3047 /* This function requires the caller holds hdev->lock */
3048 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3049 {
3050         int min, max, handle;
3051
3052         if (!monitor)
3053                 return -EINVAL;
3054
3055         min = HCI_MIN_ADV_MONITOR_HANDLE;
3056         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3057         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3058                            GFP_KERNEL);
3059         if (handle < 0)
3060                 return handle;
3061
3062         hdev->adv_monitors_cnt++;
3063         monitor->handle = handle;
3064
3065         hci_update_background_scan(hdev);
3066
3067         return 0;
3068 }
3069
3070 static int free_adv_monitor(int id, void *ptr, void *data)
3071 {
3072         struct hci_dev *hdev = data;
3073         struct adv_monitor *monitor = ptr;
3074
3075         idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3076         hci_free_adv_monitor(monitor);
3077         hdev->adv_monitors_cnt--;
3078
3079         return 0;
3080 }
3081
3082 /* This function requires the caller holds hdev->lock */
3083 int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
3084 {
3085         struct adv_monitor *monitor;
3086
3087         if (handle) {
3088                 monitor = idr_find(&hdev->adv_monitors_idr, handle);
3089                 if (!monitor)
3090                         return -ENOENT;
3091
3092                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3093                 hci_free_adv_monitor(monitor);
3094                 hdev->adv_monitors_cnt--;
3095         } else {
3096                 /* Remove all monitors if handle is 0. */
3097                 idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
3098         }
3099
3100         hci_update_background_scan(hdev);
3101
3102         return 0;
3103 }
3104
3105 /* This function requires the caller holds hdev->lock */
3106 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3107 {
3108         return !idr_is_empty(&hdev->adv_monitors_idr);
3109 }
3110
3111 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3112                                          bdaddr_t *bdaddr, u8 type)
3113 {
3114         struct bdaddr_list *b;
3115
3116         list_for_each_entry(b, bdaddr_list, list) {
3117                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3118                         return b;
3119         }
3120
3121         return NULL;
3122 }
3123
3124 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3125                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3126                                 u8 type)
3127 {
3128         struct bdaddr_list_with_irk *b;
3129
3130         list_for_each_entry(b, bdaddr_list, list) {
3131                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3132                         return b;
3133         }
3134
3135         return NULL;
3136 }
3137
3138 struct bdaddr_list_with_flags *
3139 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3140                                   bdaddr_t *bdaddr, u8 type)
3141 {
3142         struct bdaddr_list_with_flags *b;
3143
3144         list_for_each_entry(b, bdaddr_list, list) {
3145                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3146                         return b;
3147         }
3148
3149         return NULL;
3150 }
3151
3152 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3153 {
3154         struct bdaddr_list *b, *n;
3155
3156         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3157                 list_del(&b->list);
3158                 kfree(b);
3159         }
3160 }
3161
3162 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3163 {
3164         struct bdaddr_list *entry;
3165
3166         if (!bacmp(bdaddr, BDADDR_ANY))
3167                 return -EBADF;
3168
3169         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3170                 return -EEXIST;
3171
3172         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3173         if (!entry)
3174                 return -ENOMEM;
3175
3176         bacpy(&entry->bdaddr, bdaddr);
3177         entry->bdaddr_type = type;
3178
3179         list_add(&entry->list, list);
3180
3181         return 0;
3182 }
3183
3184 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3185                                         u8 type, u8 *peer_irk, u8 *local_irk)
3186 {
3187         struct bdaddr_list_with_irk *entry;
3188
3189         if (!bacmp(bdaddr, BDADDR_ANY))
3190                 return -EBADF;
3191
3192         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3193                 return -EEXIST;
3194
3195         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3196         if (!entry)
3197                 return -ENOMEM;
3198
3199         bacpy(&entry->bdaddr, bdaddr);
3200         entry->bdaddr_type = type;
3201
3202         if (peer_irk)
3203                 memcpy(entry->peer_irk, peer_irk, 16);
3204
3205         if (local_irk)
3206                 memcpy(entry->local_irk, local_irk, 16);
3207
3208         list_add(&entry->list, list);
3209
3210         return 0;
3211 }
3212
3213 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3214                                    u8 type, u32 flags)
3215 {
3216         struct bdaddr_list_with_flags *entry;
3217
3218         if (!bacmp(bdaddr, BDADDR_ANY))
3219                 return -EBADF;
3220
3221         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3222                 return -EEXIST;
3223
3224         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3225         if (!entry)
3226                 return -ENOMEM;
3227
3228         bacpy(&entry->bdaddr, bdaddr);
3229         entry->bdaddr_type = type;
3230         entry->current_flags = flags;
3231
3232         list_add(&entry->list, list);
3233
3234         return 0;
3235 }
3236
3237 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3238 {
3239         struct bdaddr_list *entry;
3240
3241         if (!bacmp(bdaddr, BDADDR_ANY)) {
3242                 hci_bdaddr_list_clear(list);
3243                 return 0;
3244         }
3245
3246         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3247         if (!entry)
3248                 return -ENOENT;
3249
3250         list_del(&entry->list);
3251         kfree(entry);
3252
3253         return 0;
3254 }
3255
3256 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3257                                                         u8 type)
3258 {
3259         struct bdaddr_list_with_irk *entry;
3260
3261         if (!bacmp(bdaddr, BDADDR_ANY)) {
3262                 hci_bdaddr_list_clear(list);
3263                 return 0;
3264         }
3265
3266         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3267         if (!entry)
3268                 return -ENOENT;
3269
3270         list_del(&entry->list);
3271         kfree(entry);
3272
3273         return 0;
3274 }
3275
3276 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3277                                    u8 type)
3278 {
3279         struct bdaddr_list_with_flags *entry;
3280
3281         if (!bacmp(bdaddr, BDADDR_ANY)) {
3282                 hci_bdaddr_list_clear(list);
3283                 return 0;
3284         }
3285
3286         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3287         if (!entry)
3288                 return -ENOENT;
3289
3290         list_del(&entry->list);
3291         kfree(entry);
3292
3293         return 0;
3294 }
3295
3296 /* This function requires the caller holds hdev->lock */
3297 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3298                                                bdaddr_t *addr, u8 addr_type)
3299 {
3300         struct hci_conn_params *params;
3301
3302         list_for_each_entry(params, &hdev->le_conn_params, list) {
3303                 if (bacmp(&params->addr, addr) == 0 &&
3304                     params->addr_type == addr_type) {
3305                         return params;
3306                 }
3307         }
3308
3309         return NULL;
3310 }
3311
3312 /* This function requires the caller holds hdev->lock */
3313 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3314                                                   bdaddr_t *addr, u8 addr_type)
3315 {
3316         struct hci_conn_params *param;
3317
3318         switch (addr_type) {
3319         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3320                 addr_type = ADDR_LE_DEV_PUBLIC;
3321                 break;
3322         case ADDR_LE_DEV_RANDOM_RESOLVED:
3323                 addr_type = ADDR_LE_DEV_RANDOM;
3324                 break;
3325         }
3326
3327         list_for_each_entry(param, list, action) {
3328                 if (bacmp(&param->addr, addr) == 0 &&
3329                     param->addr_type == addr_type)
3330                         return param;
3331         }
3332
3333         return NULL;
3334 }
3335
3336 /* This function requires the caller holds hdev->lock */
3337 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3338                                             bdaddr_t *addr, u8 addr_type)
3339 {
3340         struct hci_conn_params *params;
3341
3342         params = hci_conn_params_lookup(hdev, addr, addr_type);
3343         if (params)
3344                 return params;
3345
3346         params = kzalloc(sizeof(*params), GFP_KERNEL);
3347         if (!params) {
3348                 bt_dev_err(hdev, "out of memory");
3349                 return NULL;
3350         }
3351
3352         bacpy(&params->addr, addr);
3353         params->addr_type = addr_type;
3354
3355         list_add(&params->list, &hdev->le_conn_params);
3356         INIT_LIST_HEAD(&params->action);
3357
3358         params->conn_min_interval = hdev->le_conn_min_interval;
3359         params->conn_max_interval = hdev->le_conn_max_interval;
3360         params->conn_latency = hdev->le_conn_latency;
3361         params->supervision_timeout = hdev->le_supv_timeout;
3362         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3363
3364         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3365
3366         return params;
3367 }
3368
3369 static void hci_conn_params_free(struct hci_conn_params *params)
3370 {
3371         if (params->conn) {
3372                 hci_conn_drop(params->conn);
3373                 hci_conn_put(params->conn);
3374         }
3375
3376         list_del(&params->action);
3377         list_del(&params->list);
3378         kfree(params);
3379 }
3380
3381 /* This function requires the caller holds hdev->lock */
3382 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3383 {
3384         struct hci_conn_params *params;
3385
3386         params = hci_conn_params_lookup(hdev, addr, addr_type);
3387         if (!params)
3388                 return;
3389
3390         hci_conn_params_free(params);
3391
3392         hci_update_background_scan(hdev);
3393
3394         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3395 }
3396
3397 /* This function requires the caller holds hdev->lock */
3398 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3399 {
3400         struct hci_conn_params *params, *tmp;
3401
3402         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3403                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3404                         continue;
3405
3406                 /* If trying to estabilish one time connection to disabled
3407                  * device, leave the params, but mark them as just once.
3408                  */
3409                 if (params->explicit_connect) {
3410                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3411                         continue;
3412                 }
3413
3414                 list_del(&params->list);
3415                 kfree(params);
3416         }
3417
3418         BT_DBG("All LE disabled connection parameters were removed");
3419 }
3420
3421 /* This function requires the caller holds hdev->lock */
3422 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3423 {
3424         struct hci_conn_params *params, *tmp;
3425
3426         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3427                 hci_conn_params_free(params);
3428
3429         BT_DBG("All LE connection parameters were removed");
3430 }
3431
3432 /* Copy the Identity Address of the controller.
3433  *
3434  * If the controller has a public BD_ADDR, then by default use that one.
3435  * If this is a LE only controller without a public address, default to
3436  * the static random address.
3437  *
3438  * For debugging purposes it is possible to force controllers with a
3439  * public address to use the static random address instead.
3440  *
3441  * In case BR/EDR has been disabled on a dual-mode controller and
3442  * userspace has configured a static address, then that address
3443  * becomes the identity address instead of the public BR/EDR address.
3444  */
3445 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3446                                u8 *bdaddr_type)
3447 {
3448         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3449             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3450             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3451              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3452                 bacpy(bdaddr, &hdev->static_addr);
3453                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3454         } else {
3455                 bacpy(bdaddr, &hdev->bdaddr);
3456                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3457         }
3458 }
3459
3460 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3461 {
3462         int i;
3463
3464         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3465                 clear_bit(i, hdev->suspend_tasks);
3466
3467         wake_up(&hdev->suspend_wait_q);
3468 }
3469
3470 static int hci_suspend_wait_event(struct hci_dev *hdev)
3471 {
3472 #define WAKE_COND                                                              \
3473         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3474          __SUSPEND_NUM_TASKS)
3475
3476         int i;
3477         int ret = wait_event_timeout(hdev->suspend_wait_q,
3478                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3479
3480         if (ret == 0) {
3481                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3482                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3483                         if (test_bit(i, hdev->suspend_tasks))
3484                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3485                         clear_bit(i, hdev->suspend_tasks);
3486                 }
3487
3488                 ret = -ETIMEDOUT;
3489         } else {
3490                 ret = 0;
3491         }
3492
3493         return ret;
3494 }
3495
3496 static void hci_prepare_suspend(struct work_struct *work)
3497 {
3498         struct hci_dev *hdev =
3499                 container_of(work, struct hci_dev, suspend_prepare);
3500
3501         hci_dev_lock(hdev);
3502         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3503         hci_dev_unlock(hdev);
3504 }
3505
3506 static int hci_change_suspend_state(struct hci_dev *hdev,
3507                                     enum suspended_state next)
3508 {
3509         hdev->suspend_state_next = next;
3510         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3511         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3512         return hci_suspend_wait_event(hdev);
3513 }
3514
3515 static void hci_clear_wake_reason(struct hci_dev *hdev)
3516 {
3517         hci_dev_lock(hdev);
3518
3519         hdev->wake_reason = 0;
3520         bacpy(&hdev->wake_addr, BDADDR_ANY);
3521         hdev->wake_addr_type = 0;
3522
3523         hci_dev_unlock(hdev);
3524 }
3525
3526 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3527                                 void *data)
3528 {
3529         struct hci_dev *hdev =
3530                 container_of(nb, struct hci_dev, suspend_notifier);
3531         int ret = 0;
3532         u8 state = BT_RUNNING;
3533
3534         /* If powering down, wait for completion. */
3535         if (mgmt_powering_down(hdev)) {
3536                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3537                 ret = hci_suspend_wait_event(hdev);
3538                 if (ret)
3539                         goto done;
3540         }
3541
3542         /* Suspend notifier should only act on events when powered. */
3543         if (!hdev_is_powered(hdev) ||
3544             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3545                 goto done;
3546
3547         if (action == PM_SUSPEND_PREPARE) {
3548                 /* Suspend consists of two actions:
3549                  *  - First, disconnect everything and make the controller not
3550                  *    connectable (disabling scanning)
3551                  *  - Second, program event filter/accept list and enable scan
3552                  */
3553                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3554                 if (!ret)
3555                         state = BT_SUSPEND_DISCONNECT;
3556
3557                 /* Only configure accept list if disconnect succeeded and wake
3558                  * isn't being prevented.
3559                  */
3560                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3561                         ret = hci_change_suspend_state(hdev,
3562                                                 BT_SUSPEND_CONFIGURE_WAKE);
3563                         if (!ret)
3564                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3565                 }
3566
3567                 hci_clear_wake_reason(hdev);
3568                 mgmt_suspending(hdev, state);
3569
3570         } else if (action == PM_POST_SUSPEND) {
3571                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3572
3573                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3574                               hdev->wake_addr_type);
3575         }
3576
3577 done:
3578         /* We always allow suspend even if suspend preparation failed and
3579          * attempt to recover in resume.
3580          */
3581         if (ret)
3582                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3583                            action, ret);
3584
3585         return NOTIFY_DONE;
3586 }
3587
3588 /* Alloc HCI device */
3589 struct hci_dev *hci_alloc_dev(void)
3590 {
3591         struct hci_dev *hdev;
3592
3593         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3594         if (!hdev)
3595                 return NULL;
3596
3597         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3598         hdev->esco_type = (ESCO_HV1);
3599         hdev->link_mode = (HCI_LM_ACCEPT);
3600         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3601         hdev->io_capability = 0x03;     /* No Input No Output */
3602         hdev->manufacturer = 0xffff;    /* Default to internal use */
3603         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3604         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3605         hdev->adv_instance_cnt = 0;
3606         hdev->cur_adv_instance = 0x00;
3607         hdev->adv_instance_timeout = 0;
3608
3609         hdev->advmon_allowlist_duration = 300;
3610         hdev->advmon_no_filter_duration = 500;
3611
3612         hdev->sniff_max_interval = 800;
3613         hdev->sniff_min_interval = 80;
3614
3615         hdev->le_adv_channel_map = 0x07;
3616         hdev->le_adv_min_interval = 0x0800;
3617         hdev->le_adv_max_interval = 0x0800;
3618         hdev->le_scan_interval = 0x0060;
3619         hdev->le_scan_window = 0x0030;
3620         hdev->le_scan_int_suspend = 0x0400;
3621         hdev->le_scan_window_suspend = 0x0012;
3622         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3623         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3624         hdev->le_scan_int_connect = 0x0060;
3625         hdev->le_scan_window_connect = 0x0060;
3626         hdev->le_conn_min_interval = 0x0018;
3627         hdev->le_conn_max_interval = 0x0028;
3628         hdev->le_conn_latency = 0x0000;
3629         hdev->le_supv_timeout = 0x002a;
3630         hdev->le_def_tx_len = 0x001b;
3631         hdev->le_def_tx_time = 0x0148;
3632         hdev->le_max_tx_len = 0x001b;
3633         hdev->le_max_tx_time = 0x0148;
3634         hdev->le_max_rx_len = 0x001b;
3635         hdev->le_max_rx_time = 0x0148;
3636         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3637         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3638         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3639         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3640         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3641         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3642         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3643
3644         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3645         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3646         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3647         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3648         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3649         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3650
3651         /* default 1.28 sec page scan */
3652         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3653         hdev->def_page_scan_int = 0x0800;
3654         hdev->def_page_scan_window = 0x0012;
3655
3656         mutex_init(&hdev->lock);
3657         mutex_init(&hdev->req_lock);
3658
3659         INIT_LIST_HEAD(&hdev->mgmt_pending);
3660         INIT_LIST_HEAD(&hdev->reject_list);
3661         INIT_LIST_HEAD(&hdev->accept_list);
3662         INIT_LIST_HEAD(&hdev->uuids);
3663         INIT_LIST_HEAD(&hdev->link_keys);
3664         INIT_LIST_HEAD(&hdev->long_term_keys);
3665         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3666         INIT_LIST_HEAD(&hdev->remote_oob_data);
3667         INIT_LIST_HEAD(&hdev->le_accept_list);
3668         INIT_LIST_HEAD(&hdev->le_resolv_list);
3669         INIT_LIST_HEAD(&hdev->le_conn_params);
3670         INIT_LIST_HEAD(&hdev->pend_le_conns);
3671         INIT_LIST_HEAD(&hdev->pend_le_reports);
3672         INIT_LIST_HEAD(&hdev->conn_hash.list);
3673         INIT_LIST_HEAD(&hdev->adv_instances);
3674         INIT_LIST_HEAD(&hdev->blocked_keys);
3675
3676         INIT_WORK(&hdev->rx_work, hci_rx_work);
3677         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3678         INIT_WORK(&hdev->tx_work, hci_tx_work);
3679         INIT_WORK(&hdev->power_on, hci_power_on);
3680         INIT_WORK(&hdev->error_reset, hci_error_reset);
3681         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3682
3683         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3684
3685         skb_queue_head_init(&hdev->rx_q);
3686         skb_queue_head_init(&hdev->cmd_q);
3687         skb_queue_head_init(&hdev->raw_q);
3688
3689         init_waitqueue_head(&hdev->req_wait_q);
3690         init_waitqueue_head(&hdev->suspend_wait_q);
3691
3692         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3693
3694         hci_request_setup(hdev);
3695
3696         hci_init_sysfs(hdev);
3697         discovery_init(hdev);
3698
3699         return hdev;
3700 }
3701 EXPORT_SYMBOL(hci_alloc_dev);
3702
3703 /* Free HCI device */
3704 void hci_free_dev(struct hci_dev *hdev)
3705 {
3706         /* will free via device release */
3707         put_device(&hdev->dev);
3708 }
3709 EXPORT_SYMBOL(hci_free_dev);
3710
3711 /* Register HCI device */
3712 int hci_register_dev(struct hci_dev *hdev)
3713 {
3714         int id, error;
3715
3716         if (!hdev->open || !hdev->close || !hdev->send)
3717                 return -EINVAL;
3718
3719         /* Do not allow HCI_AMP devices to register at index 0,
3720          * so the index can be used as the AMP controller ID.
3721          */
3722         switch (hdev->dev_type) {
3723         case HCI_PRIMARY:
3724                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3725                 break;
3726         case HCI_AMP:
3727                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3728                 break;
3729         default:
3730                 return -EINVAL;
3731         }
3732
3733         if (id < 0)
3734                 return id;
3735
3736         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3737         hdev->id = id;
3738
3739         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3740
3741         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3742         if (!hdev->workqueue) {
3743                 error = -ENOMEM;
3744                 goto err;
3745         }
3746
3747         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3748                                                       hdev->name);
3749         if (!hdev->req_workqueue) {
3750                 destroy_workqueue(hdev->workqueue);
3751                 error = -ENOMEM;
3752                 goto err;
3753         }
3754
3755         if (!IS_ERR_OR_NULL(bt_debugfs))
3756                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3757
3758         dev_set_name(&hdev->dev, "%s", hdev->name);
3759
3760         error = device_add(&hdev->dev);
3761         if (error < 0)
3762                 goto err_wqueue;
3763
3764         hci_leds_init(hdev);
3765
3766         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3767                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3768                                     hdev);
3769         if (hdev->rfkill) {
3770                 if (rfkill_register(hdev->rfkill) < 0) {
3771                         rfkill_destroy(hdev->rfkill);
3772                         hdev->rfkill = NULL;
3773                 }
3774         }
3775
3776         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3777                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3778
3779         hci_dev_set_flag(hdev, HCI_SETUP);
3780         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3781
3782         if (hdev->dev_type == HCI_PRIMARY) {
3783                 /* Assume BR/EDR support until proven otherwise (such as
3784                  * through reading supported features during init.
3785                  */
3786                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3787         }
3788
3789         write_lock(&hci_dev_list_lock);
3790         list_add(&hdev->list, &hci_dev_list);
3791         write_unlock(&hci_dev_list_lock);
3792
3793         /* Devices that are marked for raw-only usage are unconfigured
3794          * and should not be included in normal operation.
3795          */
3796         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3797                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3798
3799         hci_sock_dev_event(hdev, HCI_DEV_REG);
3800         hci_dev_hold(hdev);
3801
3802         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3803                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3804                 error = register_pm_notifier(&hdev->suspend_notifier);
3805                 if (error)
3806                         goto err_wqueue;
3807         }
3808
3809         queue_work(hdev->req_workqueue, &hdev->power_on);
3810
3811         idr_init(&hdev->adv_monitors_idr);
3812
3813         return id;
3814
3815 err_wqueue:
3816         debugfs_remove_recursive(hdev->debugfs);
3817         destroy_workqueue(hdev->workqueue);
3818         destroy_workqueue(hdev->req_workqueue);
3819 err:
3820         ida_simple_remove(&hci_index_ida, hdev->id);
3821
3822         return error;
3823 }
3824 EXPORT_SYMBOL(hci_register_dev);
3825
3826 /* Unregister HCI device */
3827 void hci_unregister_dev(struct hci_dev *hdev)
3828 {
3829         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3830
3831         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3832
3833         write_lock(&hci_dev_list_lock);
3834         list_del(&hdev->list);
3835         write_unlock(&hci_dev_list_lock);
3836
3837         cancel_work_sync(&hdev->power_on);
3838
3839         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3840                 hci_suspend_clear_tasks(hdev);
3841                 unregister_pm_notifier(&hdev->suspend_notifier);
3842                 cancel_work_sync(&hdev->suspend_prepare);
3843         }
3844
3845         hci_dev_do_close(hdev);
3846
3847         if (!test_bit(HCI_INIT, &hdev->flags) &&
3848             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3849             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3850                 hci_dev_lock(hdev);
3851                 mgmt_index_removed(hdev);
3852                 hci_dev_unlock(hdev);
3853         }
3854
3855         /* mgmt_index_removed should take care of emptying the
3856          * pending list */
3857         BUG_ON(!list_empty(&hdev->mgmt_pending));
3858
3859         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3860
3861         if (hdev->rfkill) {
3862                 rfkill_unregister(hdev->rfkill);
3863                 rfkill_destroy(hdev->rfkill);
3864         }
3865
3866         device_del(&hdev->dev);
3867         /* Actual cleanup is deferred until hci_cleanup_dev(). */
3868         hci_dev_put(hdev);
3869 }
3870 EXPORT_SYMBOL(hci_unregister_dev);
3871
3872 /* Cleanup HCI device */
3873 void hci_cleanup_dev(struct hci_dev *hdev)
3874 {
3875         debugfs_remove_recursive(hdev->debugfs);
3876         kfree_const(hdev->hw_info);
3877         kfree_const(hdev->fw_info);
3878
3879         destroy_workqueue(hdev->workqueue);
3880         destroy_workqueue(hdev->req_workqueue);
3881
3882         hci_dev_lock(hdev);
3883         hci_bdaddr_list_clear(&hdev->reject_list);
3884         hci_bdaddr_list_clear(&hdev->accept_list);
3885         hci_uuids_clear(hdev);
3886         hci_link_keys_clear(hdev);
3887         hci_smp_ltks_clear(hdev);
3888         hci_smp_irks_clear(hdev);
3889         hci_remote_oob_data_clear(hdev);
3890         hci_adv_instances_clear(hdev);
3891         hci_adv_monitors_clear(hdev);
3892         hci_bdaddr_list_clear(&hdev->le_accept_list);
3893         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3894         hci_conn_params_clear_all(hdev);
3895         hci_discovery_filter_clear(hdev);
3896         hci_blocked_keys_clear(hdev);
3897         hci_dev_unlock(hdev);
3898
3899         ida_simple_remove(&hci_index_ida, hdev->id);
3900 }
3901
3902 /* Suspend HCI device */
3903 int hci_suspend_dev(struct hci_dev *hdev)
3904 {
3905         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3906         return 0;
3907 }
3908 EXPORT_SYMBOL(hci_suspend_dev);
3909
3910 /* Resume HCI device */
3911 int hci_resume_dev(struct hci_dev *hdev)
3912 {
3913         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3914         return 0;
3915 }
3916 EXPORT_SYMBOL(hci_resume_dev);
3917
3918 /* Reset HCI device */
3919 int hci_reset_dev(struct hci_dev *hdev)
3920 {
3921         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3922         struct sk_buff *skb;
3923
3924         skb = bt_skb_alloc(3, GFP_ATOMIC);
3925         if (!skb)
3926                 return -ENOMEM;
3927
3928         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3929         skb_put_data(skb, hw_err, 3);
3930
3931         /* Send Hardware Error to upper stack */
3932         return hci_recv_frame(hdev, skb);
3933 }
3934 EXPORT_SYMBOL(hci_reset_dev);
3935
3936 /* Receive frame from HCI drivers */
3937 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3938 {
3939         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3940                       && !test_bit(HCI_INIT, &hdev->flags))) {
3941                 kfree_skb(skb);
3942                 return -ENXIO;
3943         }
3944
3945         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3946             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3947             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3948             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3949                 kfree_skb(skb);
3950                 return -EINVAL;
3951         }
3952
3953         /* Incoming skb */
3954         bt_cb(skb)->incoming = 1;
3955
3956         /* Time stamp */
3957         __net_timestamp(skb);
3958
3959         skb_queue_tail(&hdev->rx_q, skb);
3960         queue_work(hdev->workqueue, &hdev->rx_work);
3961
3962         return 0;
3963 }
3964 EXPORT_SYMBOL(hci_recv_frame);
3965
3966 /* Receive diagnostic message from HCI drivers */
3967 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3968 {
3969         /* Mark as diagnostic packet */
3970         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3971
3972         /* Time stamp */
3973         __net_timestamp(skb);
3974
3975         skb_queue_tail(&hdev->rx_q, skb);
3976         queue_work(hdev->workqueue, &hdev->rx_work);
3977
3978         return 0;
3979 }
3980 EXPORT_SYMBOL(hci_recv_diag);
3981
3982 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3983 {
3984         va_list vargs;
3985
3986         va_start(vargs, fmt);
3987         kfree_const(hdev->hw_info);
3988         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3989         va_end(vargs);
3990 }
3991 EXPORT_SYMBOL(hci_set_hw_info);
3992
3993 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3994 {
3995         va_list vargs;
3996
3997         va_start(vargs, fmt);
3998         kfree_const(hdev->fw_info);
3999         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4000         va_end(vargs);
4001 }
4002 EXPORT_SYMBOL(hci_set_fw_info);
4003
4004 /* ---- Interface to upper protocols ---- */
4005
4006 int hci_register_cb(struct hci_cb *cb)
4007 {
4008         BT_DBG("%p name %s", cb, cb->name);
4009
4010         mutex_lock(&hci_cb_list_lock);
4011         list_add_tail(&cb->list, &hci_cb_list);
4012         mutex_unlock(&hci_cb_list_lock);
4013
4014         return 0;
4015 }
4016 EXPORT_SYMBOL(hci_register_cb);
4017
4018 int hci_unregister_cb(struct hci_cb *cb)
4019 {
4020         BT_DBG("%p name %s", cb, cb->name);
4021
4022         mutex_lock(&hci_cb_list_lock);
4023         list_del(&cb->list);
4024         mutex_unlock(&hci_cb_list_lock);
4025
4026         return 0;
4027 }
4028 EXPORT_SYMBOL(hci_unregister_cb);
4029
4030 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4031 {
4032         int err;
4033
4034         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4035                skb->len);
4036
4037         /* Time stamp */
4038         __net_timestamp(skb);
4039
4040         /* Send copy to monitor */
4041         hci_send_to_monitor(hdev, skb);
4042
4043         if (atomic_read(&hdev->promisc)) {
4044                 /* Send copy to the sockets */
4045                 hci_send_to_sock(hdev, skb);
4046         }
4047
4048         /* Get rid of skb owner, prior to sending to the driver. */
4049         skb_orphan(skb);
4050
4051         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4052                 kfree_skb(skb);
4053                 return;
4054         }
4055
4056         err = hdev->send(hdev, skb);
4057         if (err < 0) {
4058                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4059                 kfree_skb(skb);
4060         }
4061 }
4062
4063 /* Send HCI command */
4064 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4065                  const void *param)
4066 {
4067         struct sk_buff *skb;
4068
4069         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4070
4071         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4072         if (!skb) {
4073                 bt_dev_err(hdev, "no memory for command");
4074                 return -ENOMEM;
4075         }
4076
4077         /* Stand-alone HCI commands must be flagged as
4078          * single-command requests.
4079          */
4080         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4081
4082         skb_queue_tail(&hdev->cmd_q, skb);
4083         queue_work(hdev->workqueue, &hdev->cmd_work);
4084
4085         return 0;
4086 }
4087
4088 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4089                    const void *param)
4090 {
4091         struct sk_buff *skb;
4092
4093         if (hci_opcode_ogf(opcode) != 0x3f) {
4094                 /* A controller receiving a command shall respond with either
4095                  * a Command Status Event or a Command Complete Event.
4096                  * Therefore, all standard HCI commands must be sent via the
4097                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4098                  * Some vendors do not comply with this rule for vendor-specific
4099                  * commands and do not return any event. We want to support
4100                  * unresponded commands for such cases only.
4101                  */
4102                 bt_dev_err(hdev, "unresponded command not supported");
4103                 return -EINVAL;
4104         }
4105
4106         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4107         if (!skb) {
4108                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4109                            opcode);
4110                 return -ENOMEM;
4111         }
4112
4113         hci_send_frame(hdev, skb);
4114
4115         return 0;
4116 }
4117 EXPORT_SYMBOL(__hci_cmd_send);
4118
4119 /* Get data from the previously sent command */
4120 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4121 {
4122         struct hci_command_hdr *hdr;
4123
4124         if (!hdev->sent_cmd)
4125                 return NULL;
4126
4127         hdr = (void *) hdev->sent_cmd->data;
4128
4129         if (hdr->opcode != cpu_to_le16(opcode))
4130                 return NULL;
4131
4132         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4133
4134         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4135 }
4136
4137 /* Send HCI command and wait for command commplete event */
4138 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4139                              const void *param, u32 timeout)
4140 {
4141         struct sk_buff *skb;
4142
4143         if (!test_bit(HCI_UP, &hdev->flags))
4144                 return ERR_PTR(-ENETDOWN);
4145
4146         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4147
4148         hci_req_sync_lock(hdev);
4149         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4150         hci_req_sync_unlock(hdev);
4151
4152         return skb;
4153 }
4154 EXPORT_SYMBOL(hci_cmd_sync);
4155
4156 /* Send ACL data */
4157 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4158 {
4159         struct hci_acl_hdr *hdr;
4160         int len = skb->len;
4161
4162         skb_push(skb, HCI_ACL_HDR_SIZE);
4163         skb_reset_transport_header(skb);
4164         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4165         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4166         hdr->dlen   = cpu_to_le16(len);
4167 }
4168
4169 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4170                           struct sk_buff *skb, __u16 flags)
4171 {
4172         struct hci_conn *conn = chan->conn;
4173         struct hci_dev *hdev = conn->hdev;
4174         struct sk_buff *list;
4175
4176         skb->len = skb_headlen(skb);
4177         skb->data_len = 0;
4178
4179         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4180
4181         switch (hdev->dev_type) {
4182         case HCI_PRIMARY:
4183                 hci_add_acl_hdr(skb, conn->handle, flags);
4184                 break;
4185         case HCI_AMP:
4186                 hci_add_acl_hdr(skb, chan->handle, flags);
4187                 break;
4188         default:
4189                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4190                 return;
4191         }
4192
4193         list = skb_shinfo(skb)->frag_list;
4194         if (!list) {
4195                 /* Non fragmented */
4196                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4197
4198                 skb_queue_tail(queue, skb);
4199         } else {
4200                 /* Fragmented */
4201                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4202
4203                 skb_shinfo(skb)->frag_list = NULL;
4204
4205                 /* Queue all fragments atomically. We need to use spin_lock_bh
4206                  * here because of 6LoWPAN links, as there this function is
4207                  * called from softirq and using normal spin lock could cause
4208                  * deadlocks.
4209                  */
4210                 spin_lock_bh(&queue->lock);
4211
4212                 __skb_queue_tail(queue, skb);
4213
4214                 flags &= ~ACL_START;
4215                 flags |= ACL_CONT;
4216                 do {
4217                         skb = list; list = list->next;
4218
4219                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4220                         hci_add_acl_hdr(skb, conn->handle, flags);
4221
4222                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4223
4224                         __skb_queue_tail(queue, skb);
4225                 } while (list);
4226
4227                 spin_unlock_bh(&queue->lock);
4228         }
4229 }
4230
4231 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4232 {
4233         struct hci_dev *hdev = chan->conn->hdev;
4234
4235         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4236
4237         hci_queue_acl(chan, &chan->data_q, skb, flags);
4238
4239         queue_work(hdev->workqueue, &hdev->tx_work);
4240 }
4241
4242 /* Send SCO data */
4243 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4244 {
4245         struct hci_dev *hdev = conn->hdev;
4246         struct hci_sco_hdr hdr;
4247
4248         BT_DBG("%s len %d", hdev->name, skb->len);
4249
4250         hdr.handle = cpu_to_le16(conn->handle);
4251         hdr.dlen   = skb->len;
4252
4253         skb_push(skb, HCI_SCO_HDR_SIZE);
4254         skb_reset_transport_header(skb);
4255         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4256
4257         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4258
4259         skb_queue_tail(&conn->data_q, skb);
4260         queue_work(hdev->workqueue, &hdev->tx_work);
4261 }
4262
4263 /* ---- HCI TX task (outgoing data) ---- */
4264
4265 /* HCI Connection scheduler */
4266 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4267                                      int *quote)
4268 {
4269         struct hci_conn_hash *h = &hdev->conn_hash;
4270         struct hci_conn *conn = NULL, *c;
4271         unsigned int num = 0, min = ~0;
4272
4273         /* We don't have to lock device here. Connections are always
4274          * added and removed with TX task disabled. */
4275
4276         rcu_read_lock();
4277
4278         list_for_each_entry_rcu(c, &h->list, list) {
4279                 if (c->type != type || skb_queue_empty(&c->data_q))
4280                         continue;
4281
4282                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4283                         continue;
4284
4285                 num++;
4286
4287                 if (c->sent < min) {
4288                         min  = c->sent;
4289                         conn = c;
4290                 }
4291
4292                 if (hci_conn_num(hdev, type) == num)
4293                         break;
4294         }
4295
4296         rcu_read_unlock();
4297
4298         if (conn) {
4299                 int cnt, q;
4300
4301                 switch (conn->type) {
4302                 case ACL_LINK:
4303                         cnt = hdev->acl_cnt;
4304                         break;
4305                 case SCO_LINK:
4306                 case ESCO_LINK:
4307                         cnt = hdev->sco_cnt;
4308                         break;
4309                 case LE_LINK:
4310                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4311                         break;
4312                 default:
4313                         cnt = 0;
4314                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4315                 }
4316
4317                 q = cnt / num;
4318                 *quote = q ? q : 1;
4319         } else
4320                 *quote = 0;
4321
4322         BT_DBG("conn %p quote %d", conn, *quote);
4323         return conn;
4324 }
4325
4326 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4327 {
4328         struct hci_conn_hash *h = &hdev->conn_hash;
4329         struct hci_conn *c;
4330
4331         bt_dev_err(hdev, "link tx timeout");
4332
4333         rcu_read_lock();
4334
4335         /* Kill stalled connections */
4336         list_for_each_entry_rcu(c, &h->list, list) {
4337                 if (c->type == type && c->sent) {
4338                         bt_dev_err(hdev, "killing stalled connection %pMR",
4339                                    &c->dst);
4340                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4341                 }
4342         }
4343
4344         rcu_read_unlock();
4345 }
4346
4347 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4348                                       int *quote)
4349 {
4350         struct hci_conn_hash *h = &hdev->conn_hash;
4351         struct hci_chan *chan = NULL;
4352         unsigned int num = 0, min = ~0, cur_prio = 0;
4353         struct hci_conn *conn;
4354         int cnt, q, conn_num = 0;
4355
4356         BT_DBG("%s", hdev->name);
4357
4358         rcu_read_lock();
4359
4360         list_for_each_entry_rcu(conn, &h->list, list) {
4361                 struct hci_chan *tmp;
4362
4363                 if (conn->type != type)
4364                         continue;
4365
4366                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4367                         continue;
4368
4369                 conn_num++;
4370
4371                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4372                         struct sk_buff *skb;
4373
4374                         if (skb_queue_empty(&tmp->data_q))
4375                                 continue;
4376
4377                         skb = skb_peek(&tmp->data_q);
4378                         if (skb->priority < cur_prio)
4379                                 continue;
4380
4381                         if (skb->priority > cur_prio) {
4382                                 num = 0;
4383                                 min = ~0;
4384                                 cur_prio = skb->priority;
4385                         }
4386
4387                         num++;
4388
4389                         if (conn->sent < min) {
4390                                 min  = conn->sent;
4391                                 chan = tmp;
4392                         }
4393                 }
4394
4395                 if (hci_conn_num(hdev, type) == conn_num)
4396                         break;
4397         }
4398
4399         rcu_read_unlock();
4400
4401         if (!chan)
4402                 return NULL;
4403
4404         switch (chan->conn->type) {
4405         case ACL_LINK:
4406                 cnt = hdev->acl_cnt;
4407                 break;
4408         case AMP_LINK:
4409                 cnt = hdev->block_cnt;
4410                 break;
4411         case SCO_LINK:
4412         case ESCO_LINK:
4413                 cnt = hdev->sco_cnt;
4414                 break;
4415         case LE_LINK:
4416                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4417                 break;
4418         default:
4419                 cnt = 0;
4420                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4421         }
4422
4423         q = cnt / num;
4424         *quote = q ? q : 1;
4425         BT_DBG("chan %p quote %d", chan, *quote);
4426         return chan;
4427 }
4428
4429 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4430 {
4431         struct hci_conn_hash *h = &hdev->conn_hash;
4432         struct hci_conn *conn;
4433         int num = 0;
4434
4435         BT_DBG("%s", hdev->name);
4436
4437         rcu_read_lock();
4438
4439         list_for_each_entry_rcu(conn, &h->list, list) {
4440                 struct hci_chan *chan;
4441
4442                 if (conn->type != type)
4443                         continue;
4444
4445                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4446                         continue;
4447
4448                 num++;
4449
4450                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4451                         struct sk_buff *skb;
4452
4453                         if (chan->sent) {
4454                                 chan->sent = 0;
4455                                 continue;
4456                         }
4457
4458                         if (skb_queue_empty(&chan->data_q))
4459                                 continue;
4460
4461                         skb = skb_peek(&chan->data_q);
4462                         if (skb->priority >= HCI_PRIO_MAX - 1)
4463                                 continue;
4464
4465                         skb->priority = HCI_PRIO_MAX - 1;
4466
4467                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4468                                skb->priority);
4469                 }
4470
4471                 if (hci_conn_num(hdev, type) == num)
4472                         break;
4473         }
4474
4475         rcu_read_unlock();
4476
4477 }
4478
4479 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4480 {
4481         /* Calculate count of blocks used by this packet */
4482         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4483 }
4484
4485 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4486 {
4487         unsigned long last_tx;
4488
4489         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4490                 return;
4491
4492         switch (type) {
4493         case LE_LINK:
4494                 last_tx = hdev->le_last_tx;
4495                 break;
4496         default:
4497                 last_tx = hdev->acl_last_tx;
4498                 break;
4499         }
4500
4501         /* tx timeout must be longer than maximum link supervision timeout
4502          * (40.9 seconds)
4503          */
4504         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4505                 hci_link_tx_to(hdev, type);
4506 }
4507
4508 /* Schedule SCO */
4509 static void hci_sched_sco(struct hci_dev *hdev)
4510 {
4511         struct hci_conn *conn;
4512         struct sk_buff *skb;
4513         int quote;
4514
4515         BT_DBG("%s", hdev->name);
4516
4517         if (!hci_conn_num(hdev, SCO_LINK))
4518                 return;
4519
4520         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4521                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4522                         BT_DBG("skb %p len %d", skb, skb->len);
4523                         hci_send_frame(hdev, skb);
4524
4525                         conn->sent++;
4526                         if (conn->sent == ~0)
4527                                 conn->sent = 0;
4528                 }
4529         }
4530 }
4531
4532 static void hci_sched_esco(struct hci_dev *hdev)
4533 {
4534         struct hci_conn *conn;
4535         struct sk_buff *skb;
4536         int quote;
4537
4538         BT_DBG("%s", hdev->name);
4539
4540         if (!hci_conn_num(hdev, ESCO_LINK))
4541                 return;
4542
4543         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4544                                                      &quote))) {
4545                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4546                         BT_DBG("skb %p len %d", skb, skb->len);
4547                         hci_send_frame(hdev, skb);
4548
4549                         conn->sent++;
4550                         if (conn->sent == ~0)
4551                                 conn->sent = 0;
4552                 }
4553         }
4554 }
4555
4556 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4557 {
4558         unsigned int cnt = hdev->acl_cnt;
4559         struct hci_chan *chan;
4560         struct sk_buff *skb;
4561         int quote;
4562
4563         __check_timeout(hdev, cnt, ACL_LINK);
4564
4565         while (hdev->acl_cnt &&
4566                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4567                 u32 priority = (skb_peek(&chan->data_q))->priority;
4568                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4569                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4570                                skb->len, skb->priority);
4571
4572                         /* Stop if priority has changed */
4573                         if (skb->priority < priority)
4574                                 break;
4575
4576                         skb = skb_dequeue(&chan->data_q);
4577
4578                         hci_conn_enter_active_mode(chan->conn,
4579                                                    bt_cb(skb)->force_active);
4580
4581                         hci_send_frame(hdev, skb);
4582                         hdev->acl_last_tx = jiffies;
4583
4584                         hdev->acl_cnt--;
4585                         chan->sent++;
4586                         chan->conn->sent++;
4587
4588                         /* Send pending SCO packets right away */
4589                         hci_sched_sco(hdev);
4590                         hci_sched_esco(hdev);
4591                 }
4592         }
4593
4594         if (cnt != hdev->acl_cnt)
4595                 hci_prio_recalculate(hdev, ACL_LINK);
4596 }
4597
4598 static void hci_sched_acl_blk(struct hci_dev *hdev)
4599 {
4600         unsigned int cnt = hdev->block_cnt;
4601         struct hci_chan *chan;
4602         struct sk_buff *skb;
4603         int quote;
4604         u8 type;
4605
4606         BT_DBG("%s", hdev->name);
4607
4608         if (hdev->dev_type == HCI_AMP)
4609                 type = AMP_LINK;
4610         else
4611                 type = ACL_LINK;
4612
4613         __check_timeout(hdev, cnt, type);
4614
4615         while (hdev->block_cnt > 0 &&
4616                (chan = hci_chan_sent(hdev, type, &quote))) {
4617                 u32 priority = (skb_peek(&chan->data_q))->priority;
4618                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4619                         int blocks;
4620
4621                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4622                                skb->len, skb->priority);
4623
4624                         /* Stop if priority has changed */
4625                         if (skb->priority < priority)
4626                                 break;
4627
4628                         skb = skb_dequeue(&chan->data_q);
4629
4630                         blocks = __get_blocks(hdev, skb);
4631                         if (blocks > hdev->block_cnt)
4632                                 return;
4633
4634                         hci_conn_enter_active_mode(chan->conn,
4635                                                    bt_cb(skb)->force_active);
4636
4637                         hci_send_frame(hdev, skb);
4638                         hdev->acl_last_tx = jiffies;
4639
4640                         hdev->block_cnt -= blocks;
4641                         quote -= blocks;
4642
4643                         chan->sent += blocks;
4644                         chan->conn->sent += blocks;
4645                 }
4646         }
4647
4648         if (cnt != hdev->block_cnt)
4649                 hci_prio_recalculate(hdev, type);
4650 }
4651
4652 static void hci_sched_acl(struct hci_dev *hdev)
4653 {
4654         BT_DBG("%s", hdev->name);
4655
4656         /* No ACL link over BR/EDR controller */
4657         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4658                 return;
4659
4660         /* No AMP link over AMP controller */
4661         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4662                 return;
4663
4664         switch (hdev->flow_ctl_mode) {
4665         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4666                 hci_sched_acl_pkt(hdev);
4667                 break;
4668
4669         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4670                 hci_sched_acl_blk(hdev);
4671                 break;
4672         }
4673 }
4674
4675 static void hci_sched_le(struct hci_dev *hdev)
4676 {
4677         struct hci_chan *chan;
4678         struct sk_buff *skb;
4679         int quote, cnt, tmp;
4680
4681         BT_DBG("%s", hdev->name);
4682
4683         if (!hci_conn_num(hdev, LE_LINK))
4684                 return;
4685
4686         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4687
4688         __check_timeout(hdev, cnt, LE_LINK);
4689
4690         tmp = cnt;
4691         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4692                 u32 priority = (skb_peek(&chan->data_q))->priority;
4693                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4694                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4695                                skb->len, skb->priority);
4696
4697                         /* Stop if priority has changed */
4698                         if (skb->priority < priority)
4699                                 break;
4700
4701                         skb = skb_dequeue(&chan->data_q);
4702
4703                         hci_send_frame(hdev, skb);
4704                         hdev->le_last_tx = jiffies;
4705
4706                         cnt--;
4707                         chan->sent++;
4708                         chan->conn->sent++;
4709
4710                         /* Send pending SCO packets right away */
4711                         hci_sched_sco(hdev);
4712                         hci_sched_esco(hdev);
4713                 }
4714         }
4715
4716         if (hdev->le_pkts)
4717                 hdev->le_cnt = cnt;
4718         else
4719                 hdev->acl_cnt = cnt;
4720
4721         if (cnt != tmp)
4722                 hci_prio_recalculate(hdev, LE_LINK);
4723 }
4724
4725 static void hci_tx_work(struct work_struct *work)
4726 {
4727         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4728         struct sk_buff *skb;
4729
4730         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4731                hdev->sco_cnt, hdev->le_cnt);
4732
4733         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4734                 /* Schedule queues and send stuff to HCI driver */
4735                 hci_sched_sco(hdev);
4736                 hci_sched_esco(hdev);
4737                 hci_sched_acl(hdev);
4738                 hci_sched_le(hdev);
4739         }
4740
4741         /* Send next queued raw (unknown type) packet */
4742         while ((skb = skb_dequeue(&hdev->raw_q)))
4743                 hci_send_frame(hdev, skb);
4744 }
4745
4746 /* ----- HCI RX task (incoming data processing) ----- */
4747
4748 /* ACL data packet */
4749 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4750 {
4751         struct hci_acl_hdr *hdr = (void *) skb->data;
4752         struct hci_conn *conn;
4753         __u16 handle, flags;
4754
4755         skb_pull(skb, HCI_ACL_HDR_SIZE);
4756
4757         handle = __le16_to_cpu(hdr->handle);
4758         flags  = hci_flags(handle);
4759         handle = hci_handle(handle);
4760
4761         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4762                handle, flags);
4763
4764         hdev->stat.acl_rx++;
4765
4766         hci_dev_lock(hdev);
4767         conn = hci_conn_hash_lookup_handle(hdev, handle);
4768         hci_dev_unlock(hdev);
4769
4770         if (conn) {
4771                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4772
4773                 /* Send to upper protocol */
4774                 l2cap_recv_acldata(conn, skb, flags);
4775                 return;
4776         } else {
4777                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4778                            handle);
4779         }
4780
4781         kfree_skb(skb);
4782 }
4783
4784 /* SCO data packet */
4785 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4786 {
4787         struct hci_sco_hdr *hdr = (void *) skb->data;
4788         struct hci_conn *conn;
4789         __u16 handle, flags;
4790
4791         skb_pull(skb, HCI_SCO_HDR_SIZE);
4792
4793         handle = __le16_to_cpu(hdr->handle);
4794         flags  = hci_flags(handle);
4795         handle = hci_handle(handle);
4796
4797         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4798                handle, flags);
4799
4800         hdev->stat.sco_rx++;
4801
4802         hci_dev_lock(hdev);
4803         conn = hci_conn_hash_lookup_handle(hdev, handle);
4804         hci_dev_unlock(hdev);
4805
4806         if (conn) {
4807                 /* Send to upper protocol */
4808                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4809                 sco_recv_scodata(conn, skb);
4810                 return;
4811         } else {
4812                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4813                            handle);
4814         }
4815
4816         kfree_skb(skb);
4817 }
4818
4819 static bool hci_req_is_complete(struct hci_dev *hdev)
4820 {
4821         struct sk_buff *skb;
4822
4823         skb = skb_peek(&hdev->cmd_q);
4824         if (!skb)
4825                 return true;
4826
4827         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4828 }
4829
4830 static void hci_resend_last(struct hci_dev *hdev)
4831 {
4832         struct hci_command_hdr *sent;
4833         struct sk_buff *skb;
4834         u16 opcode;
4835
4836         if (!hdev->sent_cmd)
4837                 return;
4838
4839         sent = (void *) hdev->sent_cmd->data;
4840         opcode = __le16_to_cpu(sent->opcode);
4841         if (opcode == HCI_OP_RESET)
4842                 return;
4843
4844         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4845         if (!skb)
4846                 return;
4847
4848         skb_queue_head(&hdev->cmd_q, skb);
4849         queue_work(hdev->workqueue, &hdev->cmd_work);
4850 }
4851
4852 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4853                           hci_req_complete_t *req_complete,
4854                           hci_req_complete_skb_t *req_complete_skb)
4855 {
4856         struct sk_buff *skb;
4857         unsigned long flags;
4858
4859         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4860
4861         /* If the completed command doesn't match the last one that was
4862          * sent we need to do special handling of it.
4863          */
4864         if (!hci_sent_cmd_data(hdev, opcode)) {
4865                 /* Some CSR based controllers generate a spontaneous
4866                  * reset complete event during init and any pending
4867                  * command will never be completed. In such a case we
4868                  * need to resend whatever was the last sent
4869                  * command.
4870                  */
4871                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4872                         hci_resend_last(hdev);
4873
4874                 return;
4875         }
4876
4877         /* If we reach this point this event matches the last command sent */
4878         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4879
4880         /* If the command succeeded and there's still more commands in
4881          * this request the request is not yet complete.
4882          */
4883         if (!status && !hci_req_is_complete(hdev))
4884                 return;
4885
4886         /* If this was the last command in a request the complete
4887          * callback would be found in hdev->sent_cmd instead of the
4888          * command queue (hdev->cmd_q).
4889          */
4890         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4891                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4892                 return;
4893         }
4894
4895         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4896                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4897                 return;
4898         }
4899
4900         /* Remove all pending commands belonging to this request */
4901         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4902         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4903                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4904                         __skb_queue_head(&hdev->cmd_q, skb);
4905                         break;
4906                 }
4907
4908                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4909                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4910                 else
4911                         *req_complete = bt_cb(skb)->hci.req_complete;
4912                 kfree_skb(skb);
4913         }
4914         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4915 }
4916
4917 static void hci_rx_work(struct work_struct *work)
4918 {
4919         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4920         struct sk_buff *skb;
4921
4922         BT_DBG("%s", hdev->name);
4923
4924         while ((skb = skb_dequeue(&hdev->rx_q))) {
4925                 /* Send copy to monitor */
4926                 hci_send_to_monitor(hdev, skb);
4927
4928                 if (atomic_read(&hdev->promisc)) {
4929                         /* Send copy to the sockets */
4930                         hci_send_to_sock(hdev, skb);
4931                 }
4932
4933                 /* If the device has been opened in HCI_USER_CHANNEL,
4934                  * the userspace has exclusive access to device.
4935                  * When device is HCI_INIT, we still need to process
4936                  * the data packets to the driver in order
4937                  * to complete its setup().
4938                  */
4939                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4940                     !test_bit(HCI_INIT, &hdev->flags)) {
4941                         kfree_skb(skb);
4942                         continue;
4943                 }
4944
4945                 if (test_bit(HCI_INIT, &hdev->flags)) {
4946                         /* Don't process data packets in this states. */
4947                         switch (hci_skb_pkt_type(skb)) {
4948                         case HCI_ACLDATA_PKT:
4949                         case HCI_SCODATA_PKT:
4950                         case HCI_ISODATA_PKT:
4951                                 kfree_skb(skb);
4952                                 continue;
4953                         }
4954                 }
4955
4956                 /* Process frame */
4957                 switch (hci_skb_pkt_type(skb)) {
4958                 case HCI_EVENT_PKT:
4959                         BT_DBG("%s Event packet", hdev->name);
4960                         hci_event_packet(hdev, skb);
4961                         break;
4962
4963                 case HCI_ACLDATA_PKT:
4964                         BT_DBG("%s ACL data packet", hdev->name);
4965                         hci_acldata_packet(hdev, skb);
4966                         break;
4967
4968                 case HCI_SCODATA_PKT:
4969                         BT_DBG("%s SCO data packet", hdev->name);
4970                         hci_scodata_packet(hdev, skb);
4971                         break;
4972
4973                 default:
4974                         kfree_skb(skb);
4975                         break;
4976                 }
4977         }
4978 }
4979
4980 static void hci_cmd_work(struct work_struct *work)
4981 {
4982         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4983         struct sk_buff *skb;
4984
4985         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4986                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4987
4988         /* Send queued commands */
4989         if (atomic_read(&hdev->cmd_cnt)) {
4990                 skb = skb_dequeue(&hdev->cmd_q);
4991                 if (!skb)
4992                         return;
4993
4994                 kfree_skb(hdev->sent_cmd);
4995
4996                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4997                 if (hdev->sent_cmd) {
4998                         if (hci_req_status_pend(hdev))
4999                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5000                         atomic_dec(&hdev->cmd_cnt);
5001                         hci_send_frame(hdev, skb);
5002                         if (test_bit(HCI_RESET, &hdev->flags))
5003                                 cancel_delayed_work(&hdev->cmd_timer);
5004                         else
5005                                 schedule_delayed_work(&hdev->cmd_timer,
5006                                                       HCI_CMD_TIMEOUT);
5007                 } else {
5008                         skb_queue_head(&hdev->cmd_q, skb);
5009                         queue_work(hdev->workqueue, &hdev->cmd_work);
5010                 }
5011         }
5012 }