GNU Linux-libre 5.10.217-gnu1
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47
48 static void hci_rx_work(struct work_struct *work);
49 static void hci_cmd_work(struct work_struct *work);
50 static void hci_tx_work(struct work_struct *work);
51
52 /* HCI device list */
53 LIST_HEAD(hci_dev_list);
54 DEFINE_RWLOCK(hci_dev_list_lock);
55
56 /* HCI callback list */
57 LIST_HEAD(hci_cb_list);
58 DEFINE_MUTEX(hci_cb_list_lock);
59
60 /* HCI ID Numbering */
61 static DEFINE_IDA(hci_index_ida);
62
63 /* ---- HCI debugfs entries ---- */
64
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66                              size_t count, loff_t *ppos)
67 {
68         struct hci_dev *hdev = file->private_data;
69         char buf[3];
70
71         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
72         buf[1] = '\n';
73         buf[2] = '\0';
74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78                               size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         struct sk_buff *skb;
82         bool enable;
83         int err;
84
85         if (!test_bit(HCI_UP, &hdev->flags))
86                 return -ENETDOWN;
87
88         err = kstrtobool_from_user(user_buf, count, &enable);
89         if (err)
90                 return err;
91
92         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
93                 return -EALREADY;
94
95         hci_req_sync_lock(hdev);
96         if (enable)
97                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         else
100                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101                                      HCI_CMD_TIMEOUT);
102         hci_req_sync_unlock(hdev);
103
104         if (IS_ERR(skb))
105                 return PTR_ERR(skb);
106
107         kfree_skb(skb);
108
109         hci_dev_change_flag(hdev, HCI_DUT_MODE);
110
111         return count;
112 }
113
114 static const struct file_operations dut_mode_fops = {
115         .open           = simple_open,
116         .read           = dut_mode_read,
117         .write          = dut_mode_write,
118         .llseek         = default_llseek,
119 };
120
121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122                                 size_t count, loff_t *ppos)
123 {
124         struct hci_dev *hdev = file->private_data;
125         char buf[3];
126
127         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128         buf[1] = '\n';
129         buf[2] = '\0';
130         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131 }
132
133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134                                  size_t count, loff_t *ppos)
135 {
136         struct hci_dev *hdev = file->private_data;
137         bool enable;
138         int err;
139
140         err = kstrtobool_from_user(user_buf, count, &enable);
141         if (err)
142                 return err;
143
144         /* When the diagnostic flags are not persistent and the transport
145          * is not active or in user channel operation, then there is no need
146          * for the vendor callback. Instead just store the desired value and
147          * the setting will be programmed when the controller gets powered on.
148          */
149         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150             (!test_bit(HCI_RUNNING, &hdev->flags) ||
151              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
152                 goto done;
153
154         hci_req_sync_lock(hdev);
155         err = hdev->set_diag(hdev, enable);
156         hci_req_sync_unlock(hdev);
157
158         if (err < 0)
159                 return err;
160
161 done:
162         if (enable)
163                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164         else
165                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166
167         return count;
168 }
169
170 static const struct file_operations vendor_diag_fops = {
171         .open           = simple_open,
172         .read           = vendor_diag_read,
173         .write          = vendor_diag_write,
174         .llseek         = default_llseek,
175 };
176
177 static void hci_debugfs_create_basic(struct hci_dev *hdev)
178 {
179         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180                             &dut_mode_fops);
181
182         if (hdev->set_diag)
183                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184                                     &vendor_diag_fops);
185 }
186
187 static int hci_reset_req(struct hci_request *req, unsigned long opt)
188 {
189         BT_DBG("%s %ld", req->hdev->name, opt);
190
191         /* Reset device */
192         set_bit(HCI_RESET, &req->hdev->flags);
193         hci_req_add(req, HCI_OP_RESET, 0, NULL);
194         return 0;
195 }
196
197 static void bredr_init(struct hci_request *req)
198 {
199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
201         /* Read Local Supported Features */
202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
203
204         /* Read Local Version */
205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
206
207         /* Read BD Address */
208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
209 }
210
211 static void amp_init1(struct hci_request *req)
212 {
213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
214
215         /* Read Local Version */
216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217
218         /* Read Local Supported Commands */
219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220
221         /* Read Local AMP Info */
222         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
223
224         /* Read Data Blk size */
225         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
226
227         /* Read Flow Control Mode */
228         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229
230         /* Read Location Data */
231         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
232 }
233
234 static int amp_init2(struct hci_request *req)
235 {
236         /* Read Local Supported Features. Not all AMP controllers
237          * support this so it's placed conditionally in the second
238          * stage init.
239          */
240         if (req->hdev->commands[14] & 0x20)
241                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
242
243         return 0;
244 }
245
246 static int hci_init1_req(struct hci_request *req, unsigned long opt)
247 {
248         struct hci_dev *hdev = req->hdev;
249
250         BT_DBG("%s %ld", hdev->name, opt);
251
252         /* Reset */
253         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254                 hci_reset_req(req, 0);
255
256         switch (hdev->dev_type) {
257         case HCI_PRIMARY:
258                 bredr_init(req);
259                 break;
260         case HCI_AMP:
261                 amp_init1(req);
262                 break;
263         default:
264                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
265                 break;
266         }
267
268         return 0;
269 }
270
271 static void bredr_setup(struct hci_request *req)
272 {
273         __le16 param;
274         __u8 flt_type;
275
276         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
277         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
278
279         /* Read Class of Device */
280         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
281
282         /* Read Local Name */
283         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
284
285         /* Read Voice Setting */
286         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
287
288         /* Read Number of Supported IAC */
289         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290
291         /* Read Current IAC LAP */
292         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293
294         /* Clear Event Filters */
295         flt_type = HCI_FLT_CLEAR_ALL;
296         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
297
298         /* Connection accept timeout ~20 secs */
299         param = cpu_to_le16(0x7d00);
300         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
301 }
302
303 static void le_setup(struct hci_request *req)
304 {
305         struct hci_dev *hdev = req->hdev;
306
307         /* Read LE Buffer Size */
308         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
309
310         /* Read LE Local Supported Features */
311         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
312
313         /* Read LE Supported States */
314         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315
316         /* LE-only controllers have LE implicitly enabled */
317         if (!lmp_bredr_capable(hdev))
318                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
319 }
320
321 static void hci_setup_event_mask(struct hci_request *req)
322 {
323         struct hci_dev *hdev = req->hdev;
324
325         /* The second byte is 0xff instead of 0x9f (two reserved bits
326          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327          * command otherwise.
328          */
329         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330
331         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332          * any event mask for pre 1.2 devices.
333          */
334         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335                 return;
336
337         if (lmp_bredr_capable(hdev)) {
338                 events[4] |= 0x01; /* Flow Specification Complete */
339         } else {
340                 /* Use a different default for LE-only devices */
341                 memset(events, 0, sizeof(events));
342                 events[1] |= 0x20; /* Command Complete */
343                 events[1] |= 0x40; /* Command Status */
344                 events[1] |= 0x80; /* Hardware Error */
345
346                 /* If the controller supports the Disconnect command, enable
347                  * the corresponding event. In addition enable packet flow
348                  * control related events.
349                  */
350                 if (hdev->commands[0] & 0x20) {
351                         events[0] |= 0x10; /* Disconnection Complete */
352                         events[2] |= 0x04; /* Number of Completed Packets */
353                         events[3] |= 0x02; /* Data Buffer Overflow */
354                 }
355
356                 /* If the controller supports the Read Remote Version
357                  * Information command, enable the corresponding event.
358                  */
359                 if (hdev->commands[2] & 0x80)
360                         events[1] |= 0x08; /* Read Remote Version Information
361                                             * Complete
362                                             */
363
364                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365                         events[0] |= 0x80; /* Encryption Change */
366                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
367                 }
368         }
369
370         if (lmp_inq_rssi_capable(hdev) ||
371             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372                 events[4] |= 0x02; /* Inquiry Result with RSSI */
373
374         if (lmp_ext_feat_capable(hdev))
375                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
376
377         if (lmp_esco_capable(hdev)) {
378                 events[5] |= 0x08; /* Synchronous Connection Complete */
379                 events[5] |= 0x10; /* Synchronous Connection Changed */
380         }
381
382         if (lmp_sniffsubr_capable(hdev))
383                 events[5] |= 0x20; /* Sniff Subrating */
384
385         if (lmp_pause_enc_capable(hdev))
386                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
387
388         if (lmp_ext_inq_capable(hdev))
389                 events[5] |= 0x40; /* Extended Inquiry Result */
390
391         if (lmp_no_flush_capable(hdev))
392                 events[7] |= 0x01; /* Enhanced Flush Complete */
393
394         if (lmp_lsto_capable(hdev))
395                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
396
397         if (lmp_ssp_capable(hdev)) {
398                 events[6] |= 0x01;      /* IO Capability Request */
399                 events[6] |= 0x02;      /* IO Capability Response */
400                 events[6] |= 0x04;      /* User Confirmation Request */
401                 events[6] |= 0x08;      /* User Passkey Request */
402                 events[6] |= 0x10;      /* Remote OOB Data Request */
403                 events[6] |= 0x20;      /* Simple Pairing Complete */
404                 events[7] |= 0x04;      /* User Passkey Notification */
405                 events[7] |= 0x08;      /* Keypress Notification */
406                 events[7] |= 0x10;      /* Remote Host Supported
407                                          * Features Notification
408                                          */
409         }
410
411         if (lmp_le_capable(hdev))
412                 events[7] |= 0x20;      /* LE Meta-Event */
413
414         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
415 }
416
417 static int hci_init2_req(struct hci_request *req, unsigned long opt)
418 {
419         struct hci_dev *hdev = req->hdev;
420
421         if (hdev->dev_type == HCI_AMP)
422                 return amp_init2(req);
423
424         if (lmp_bredr_capable(hdev))
425                 bredr_setup(req);
426         else
427                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
428
429         if (lmp_le_capable(hdev))
430                 le_setup(req);
431
432         /* All Bluetooth 1.2 and later controllers should support the
433          * HCI command for reading the local supported commands.
434          *
435          * Unfortunately some controllers indicate Bluetooth 1.2 support,
436          * but do not have support for this command. If that is the case,
437          * the driver can quirk the behavior and skip reading the local
438          * supported commands.
439          */
440         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
443
444         if (lmp_ssp_capable(hdev)) {
445                 /* When SSP is available, then the host features page
446                  * should also be available as well. However some
447                  * controllers list the max_page as 0 as long as SSP
448                  * has not been enabled. To achieve proper debugging
449                  * output, force the minimum max_page to 1 at least.
450                  */
451                 hdev->max_page = 0x01;
452
453                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
454                         u8 mode = 0x01;
455
456                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457                                     sizeof(mode), &mode);
458                 } else {
459                         struct hci_cp_write_eir cp;
460
461                         memset(hdev->eir, 0, sizeof(hdev->eir));
462                         memset(&cp, 0, sizeof(cp));
463
464                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465                 }
466         }
467
468         if (lmp_inq_rssi_capable(hdev) ||
469             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
470                 u8 mode;
471
472                 /* If Extended Inquiry Result events are supported, then
473                  * they are clearly preferred over Inquiry Result with RSSI
474                  * events.
475                  */
476                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477
478                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479         }
480
481         if (lmp_inq_tx_pwr_capable(hdev))
482                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
483
484         if (lmp_ext_feat_capable(hdev)) {
485                 struct hci_cp_read_local_ext_features cp;
486
487                 cp.page = 0x01;
488                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489                             sizeof(cp), &cp);
490         }
491
492         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
493                 u8 enable = 1;
494                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495                             &enable);
496         }
497
498         return 0;
499 }
500
501 static void hci_setup_link_policy(struct hci_request *req)
502 {
503         struct hci_dev *hdev = req->hdev;
504         struct hci_cp_write_def_link_policy cp;
505         u16 link_policy = 0;
506
507         if (lmp_rswitch_capable(hdev))
508                 link_policy |= HCI_LP_RSWITCH;
509         if (lmp_hold_capable(hdev))
510                 link_policy |= HCI_LP_HOLD;
511         if (lmp_sniff_capable(hdev))
512                 link_policy |= HCI_LP_SNIFF;
513         if (lmp_park_capable(hdev))
514                 link_policy |= HCI_LP_PARK;
515
516         cp.policy = cpu_to_le16(link_policy);
517         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
518 }
519
520 static void hci_set_le_support(struct hci_request *req)
521 {
522         struct hci_dev *hdev = req->hdev;
523         struct hci_cp_write_le_host_supported cp;
524
525         /* LE-only devices do not support explicit enablement */
526         if (!lmp_bredr_capable(hdev))
527                 return;
528
529         memset(&cp, 0, sizeof(cp));
530
531         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
532                 cp.le = 0x01;
533                 cp.simul = 0x00;
534         }
535
536         if (cp.le != lmp_host_le_capable(hdev))
537                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538                             &cp);
539 }
540
541 static void hci_set_event_mask_page_2(struct hci_request *req)
542 {
543         struct hci_dev *hdev = req->hdev;
544         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545         bool changed = false;
546
547         /* If Connectionless Slave Broadcast master role is supported
548          * enable all necessary events for it.
549          */
550         if (lmp_csb_master_capable(hdev)) {
551                 events[1] |= 0x40;      /* Triggered Clock Capture */
552                 events[1] |= 0x80;      /* Synchronization Train Complete */
553                 events[2] |= 0x10;      /* Slave Page Response Timeout */
554                 events[2] |= 0x20;      /* CSB Channel Map Change */
555                 changed = true;
556         }
557
558         /* If Connectionless Slave Broadcast slave role is supported
559          * enable all necessary events for it.
560          */
561         if (lmp_csb_slave_capable(hdev)) {
562                 events[2] |= 0x01;      /* Synchronization Train Received */
563                 events[2] |= 0x02;      /* CSB Receive */
564                 events[2] |= 0x04;      /* CSB Timeout */
565                 events[2] |= 0x08;      /* Truncated Page Complete */
566                 changed = true;
567         }
568
569         /* Enable Authenticated Payload Timeout Expired event if supported */
570         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
571                 events[2] |= 0x80;
572                 changed = true;
573         }
574
575         /* Some Broadcom based controllers indicate support for Set Event
576          * Mask Page 2 command, but then actually do not support it. Since
577          * the default value is all bits set to zero, the command is only
578          * required if the event mask has to be changed. In case no change
579          * to the event mask is needed, skip this command.
580          */
581         if (changed)
582                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583                             sizeof(events), events);
584 }
585
586 static int hci_init3_req(struct hci_request *req, unsigned long opt)
587 {
588         struct hci_dev *hdev = req->hdev;
589         u8 p;
590
591         hci_setup_event_mask(req);
592
593         if (hdev->commands[6] & 0x20 &&
594             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595                 struct hci_cp_read_stored_link_key cp;
596
597                 bacpy(&cp.bdaddr, BDADDR_ANY);
598                 cp.read_all = 0x01;
599                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600         }
601
602         if (hdev->commands[5] & 0x10)
603                 hci_setup_link_policy(req);
604
605         if (hdev->commands[8] & 0x01)
606                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607
608         if (hdev->commands[18] & 0x04 &&
609             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
610                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
612         /* Some older Broadcom based Bluetooth 1.2 controllers do not
613          * support the Read Page Scan Type command. Check support for
614          * this command in the bit mask of supported commands.
615          */
616         if (hdev->commands[13] & 0x01)
617                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
619         if (lmp_le_capable(hdev)) {
620                 u8 events[8];
621
622                 memset(events, 0, sizeof(events));
623
624                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625                         events[0] |= 0x10;      /* LE Long Term Key Request */
626
627                 /* If controller supports the Connection Parameters Request
628                  * Link Layer Procedure, enable the corresponding event.
629                  */
630                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631                         events[0] |= 0x20;      /* LE Remote Connection
632                                                  * Parameter Request
633                                                  */
634
635                 /* If the controller supports the Data Length Extension
636                  * feature, enable the corresponding event.
637                  */
638                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639                         events[0] |= 0x40;      /* LE Data Length Change */
640
641                 /* If the controller supports LL Privacy feature, enable
642                  * the corresponding event.
643                  */
644                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645                         events[1] |= 0x02;      /* LE Enhanced Connection
646                                                  * Complete
647                                                  */
648
649                 /* If the controller supports Extended Scanner Filter
650                  * Policies, enable the correspondig event.
651                  */
652                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653                         events[1] |= 0x04;      /* LE Direct Advertising
654                                                  * Report
655                                                  */
656
657                 /* If the controller supports Channel Selection Algorithm #2
658                  * feature, enable the corresponding event.
659                  */
660                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661                         events[2] |= 0x08;      /* LE Channel Selection
662                                                  * Algorithm
663                                                  */
664
665                 /* If the controller supports the LE Set Scan Enable command,
666                  * enable the corresponding advertising report event.
667                  */
668                 if (hdev->commands[26] & 0x08)
669                         events[0] |= 0x02;      /* LE Advertising Report */
670
671                 /* If the controller supports the LE Create Connection
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[26] & 0x10)
675                         events[0] |= 0x01;      /* LE Connection Complete */
676
677                 /* If the controller supports the LE Connection Update
678                  * command, enable the corresponding event.
679                  */
680                 if (hdev->commands[27] & 0x04)
681                         events[0] |= 0x04;      /* LE Connection Update
682                                                  * Complete
683                                                  */
684
685                 /* If the controller supports the LE Read Remote Used Features
686                  * command, enable the corresponding event.
687                  */
688                 if (hdev->commands[27] & 0x20)
689                         events[0] |= 0x08;      /* LE Read Remote Used
690                                                  * Features Complete
691                                                  */
692
693                 /* If the controller supports the LE Read Local P-256
694                  * Public Key command, enable the corresponding event.
695                  */
696                 if (hdev->commands[34] & 0x02)
697                         events[0] |= 0x80;      /* LE Read Local P-256
698                                                  * Public Key Complete
699                                                  */
700
701                 /* If the controller supports the LE Generate DHKey
702                  * command, enable the corresponding event.
703                  */
704                 if (hdev->commands[34] & 0x04)
705                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
706
707                 /* If the controller supports the LE Set Default PHY or
708                  * LE Set PHY commands, enable the corresponding event.
709                  */
710                 if (hdev->commands[35] & (0x20 | 0x40))
711                         events[1] |= 0x08;        /* LE PHY Update Complete */
712
713                 /* If the controller supports LE Set Extended Scan Parameters
714                  * and LE Set Extended Scan Enable commands, enable the
715                  * corresponding event.
716                  */
717                 if (use_ext_scan(hdev))
718                         events[1] |= 0x10;      /* LE Extended Advertising
719                                                  * Report
720                                                  */
721
722                 /* If the controller supports the LE Extended Advertising
723                  * command, enable the corresponding event.
724                  */
725                 if (ext_adv_capable(hdev))
726                         events[2] |= 0x02;      /* LE Advertising Set
727                                                  * Terminated
728                                                  */
729
730                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731                             events);
732
733                 /* Read LE Advertising Channel TX Power */
734                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735                         /* HCI TS spec forbids mixing of legacy and extended
736                          * advertising commands wherein READ_ADV_TX_POWER is
737                          * also included. So do not call it if extended adv
738                          * is supported otherwise controller will return
739                          * COMMAND_DISALLOWED for extended commands.
740                          */
741                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742                 }
743
744                 if (hdev->commands[26] & 0x40) {
745                         /* Read LE Accept List Size */
746                         hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
747                                     0, NULL);
748                 }
749
750                 if (hdev->commands[26] & 0x80) {
751                         /* Clear LE Accept List */
752                         hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
753                 }
754
755                 if (hdev->commands[34] & 0x40) {
756                         /* Read LE Resolving List Size */
757                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
758                                     0, NULL);
759                 }
760
761                 if (hdev->commands[34] & 0x20) {
762                         /* Clear LE Resolving List */
763                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
764                 }
765
766                 if (hdev->commands[35] & 0x04) {
767                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
768
769                         /* Set RPA timeout */
770                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
771                                     &rpa_timeout);
772                 }
773
774                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
775                         /* Read LE Maximum Data Length */
776                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
777
778                         /* Read LE Suggested Default Data Length */
779                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
780                 }
781
782                 if (ext_adv_capable(hdev)) {
783                         /* Read LE Number of Supported Advertising Sets */
784                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
785                                     0, NULL);
786                 }
787
788                 hci_set_le_support(req);
789         }
790
791         /* Read features beyond page 1 if available */
792         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
793                 struct hci_cp_read_local_ext_features cp;
794
795                 cp.page = p;
796                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
797                             sizeof(cp), &cp);
798         }
799
800         return 0;
801 }
802
803 static int hci_init4_req(struct hci_request *req, unsigned long opt)
804 {
805         struct hci_dev *hdev = req->hdev;
806
807         /* Some Broadcom based Bluetooth controllers do not support the
808          * Delete Stored Link Key command. They are clearly indicating its
809          * absence in the bit mask of supported commands.
810          *
811          * Check the supported commands and only if the command is marked
812          * as supported send it. If not supported assume that the controller
813          * does not have actual support for stored link keys which makes this
814          * command redundant anyway.
815          *
816          * Some controllers indicate that they support handling deleting
817          * stored link keys, but they don't. The quirk lets a driver
818          * just disable this command.
819          */
820         if (hdev->commands[6] & 0x80 &&
821             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
822                 struct hci_cp_delete_stored_link_key cp;
823
824                 bacpy(&cp.bdaddr, BDADDR_ANY);
825                 cp.delete_all = 0x01;
826                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
827                             sizeof(cp), &cp);
828         }
829
830         /* Set event mask page 2 if the HCI command for it is supported */
831         if (hdev->commands[22] & 0x04)
832                 hci_set_event_mask_page_2(req);
833
834         /* Read local codec list if the HCI command is supported */
835         if (hdev->commands[29] & 0x20)
836                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
837
838         /* Read local pairing options if the HCI command is supported */
839         if (hdev->commands[41] & 0x08)
840                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
841
842         /* Get MWS transport configuration if the HCI command is supported */
843         if (hdev->commands[30] & 0x08)
844                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
845
846         /* Check for Synchronization Train support */
847         if (lmp_sync_train_capable(hdev))
848                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
849
850         /* Enable Secure Connections if supported and configured */
851         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
852             bredr_sc_enabled(hdev)) {
853                 u8 support = 0x01;
854
855                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
856                             sizeof(support), &support);
857         }
858
859         /* Set erroneous data reporting if supported to the wideband speech
860          * setting value
861          */
862         if (hdev->commands[18] & 0x08 &&
863             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
864                 bool enabled = hci_dev_test_flag(hdev,
865                                                  HCI_WIDEBAND_SPEECH_ENABLED);
866
867                 if (enabled !=
868                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
869                         struct hci_cp_write_def_err_data_reporting cp;
870
871                         cp.err_data_reporting = enabled ?
872                                                 ERR_DATA_REPORTING_ENABLED :
873                                                 ERR_DATA_REPORTING_DISABLED;
874
875                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
876                                     sizeof(cp), &cp);
877                 }
878         }
879
880         /* Set Suggested Default Data Length to maximum if supported */
881         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
882                 struct hci_cp_le_write_def_data_len cp;
883
884                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
885                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
886                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
887         }
888
889         /* Set Default PHY parameters if command is supported */
890         if (hdev->commands[35] & 0x20) {
891                 struct hci_cp_le_set_default_phy cp;
892
893                 cp.all_phys = 0x00;
894                 cp.tx_phys = hdev->le_tx_def_phys;
895                 cp.rx_phys = hdev->le_rx_def_phys;
896
897                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
898         }
899
900         return 0;
901 }
902
903 static int __hci_init(struct hci_dev *hdev)
904 {
905         int err;
906
907         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
908         if (err < 0)
909                 return err;
910
911         if (hci_dev_test_flag(hdev, HCI_SETUP))
912                 hci_debugfs_create_basic(hdev);
913
914         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
915         if (err < 0)
916                 return err;
917
918         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
919          * BR/EDR/LE type controllers. AMP controllers only need the
920          * first two stages of init.
921          */
922         if (hdev->dev_type != HCI_PRIMARY)
923                 return 0;
924
925         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
926         if (err < 0)
927                 return err;
928
929         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
930         if (err < 0)
931                 return err;
932
933         /* This function is only called when the controller is actually in
934          * configured state. When the controller is marked as unconfigured,
935          * this initialization procedure is not run.
936          *
937          * It means that it is possible that a controller runs through its
938          * setup phase and then discovers missing settings. If that is the
939          * case, then this function will not be called. It then will only
940          * be called during the config phase.
941          *
942          * So only when in setup phase or config phase, create the debugfs
943          * entries and register the SMP channels.
944          */
945         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
946             !hci_dev_test_flag(hdev, HCI_CONFIG))
947                 return 0;
948
949         hci_debugfs_create_common(hdev);
950
951         if (lmp_bredr_capable(hdev))
952                 hci_debugfs_create_bredr(hdev);
953
954         if (lmp_le_capable(hdev))
955                 hci_debugfs_create_le(hdev);
956
957         return 0;
958 }
959
960 static int hci_init0_req(struct hci_request *req, unsigned long opt)
961 {
962         struct hci_dev *hdev = req->hdev;
963
964         BT_DBG("%s %ld", hdev->name, opt);
965
966         /* Reset */
967         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
968                 hci_reset_req(req, 0);
969
970         /* Read Local Version */
971         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
972
973         /* Read BD Address */
974         if (hdev->set_bdaddr)
975                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
976
977         return 0;
978 }
979
980 static int __hci_unconf_init(struct hci_dev *hdev)
981 {
982         int err;
983
984         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
985                 return 0;
986
987         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
988         if (err < 0)
989                 return err;
990
991         if (hci_dev_test_flag(hdev, HCI_SETUP))
992                 hci_debugfs_create_basic(hdev);
993
994         return 0;
995 }
996
997 static int hci_scan_req(struct hci_request *req, unsigned long opt)
998 {
999         __u8 scan = opt;
1000
1001         BT_DBG("%s %x", req->hdev->name, scan);
1002
1003         /* Inquiry and Page scans */
1004         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1005         return 0;
1006 }
1007
1008 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1009 {
1010         __u8 auth = opt;
1011
1012         BT_DBG("%s %x", req->hdev->name, auth);
1013
1014         /* Authentication */
1015         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1016         return 0;
1017 }
1018
1019 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1020 {
1021         __u8 encrypt = opt;
1022
1023         BT_DBG("%s %x", req->hdev->name, encrypt);
1024
1025         /* Encryption */
1026         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1027         return 0;
1028 }
1029
1030 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1031 {
1032         __le16 policy = cpu_to_le16(opt);
1033
1034         BT_DBG("%s %x", req->hdev->name, policy);
1035
1036         /* Default link policy */
1037         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1038         return 0;
1039 }
1040
1041 /* Get HCI device by index.
1042  * Device is held on return. */
1043 struct hci_dev *hci_dev_get(int index)
1044 {
1045         struct hci_dev *hdev = NULL, *d;
1046
1047         BT_DBG("%d", index);
1048
1049         if (index < 0)
1050                 return NULL;
1051
1052         read_lock(&hci_dev_list_lock);
1053         list_for_each_entry(d, &hci_dev_list, list) {
1054                 if (d->id == index) {
1055                         hdev = hci_dev_hold(d);
1056                         break;
1057                 }
1058         }
1059         read_unlock(&hci_dev_list_lock);
1060         return hdev;
1061 }
1062
1063 /* ---- Inquiry support ---- */
1064
1065 bool hci_discovery_active(struct hci_dev *hdev)
1066 {
1067         struct discovery_state *discov = &hdev->discovery;
1068
1069         switch (discov->state) {
1070         case DISCOVERY_FINDING:
1071         case DISCOVERY_RESOLVING:
1072                 return true;
1073
1074         default:
1075                 return false;
1076         }
1077 }
1078
1079 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1080 {
1081         int old_state = hdev->discovery.state;
1082
1083         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1084
1085         if (old_state == state)
1086                 return;
1087
1088         hdev->discovery.state = state;
1089
1090         switch (state) {
1091         case DISCOVERY_STOPPED:
1092                 hci_update_background_scan(hdev);
1093
1094                 if (old_state != DISCOVERY_STARTING)
1095                         mgmt_discovering(hdev, 0);
1096                 break;
1097         case DISCOVERY_STARTING:
1098                 break;
1099         case DISCOVERY_FINDING:
1100                 mgmt_discovering(hdev, 1);
1101                 break;
1102         case DISCOVERY_RESOLVING:
1103                 break;
1104         case DISCOVERY_STOPPING:
1105                 break;
1106         }
1107 }
1108
1109 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1110 {
1111         struct discovery_state *cache = &hdev->discovery;
1112         struct inquiry_entry *p, *n;
1113
1114         list_for_each_entry_safe(p, n, &cache->all, all) {
1115                 list_del(&p->all);
1116                 kfree(p);
1117         }
1118
1119         INIT_LIST_HEAD(&cache->unknown);
1120         INIT_LIST_HEAD(&cache->resolve);
1121 }
1122
1123 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1124                                                bdaddr_t *bdaddr)
1125 {
1126         struct discovery_state *cache = &hdev->discovery;
1127         struct inquiry_entry *e;
1128
1129         BT_DBG("cache %p, %pMR", cache, bdaddr);
1130
1131         list_for_each_entry(e, &cache->all, all) {
1132                 if (!bacmp(&e->data.bdaddr, bdaddr))
1133                         return e;
1134         }
1135
1136         return NULL;
1137 }
1138
1139 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1140                                                        bdaddr_t *bdaddr)
1141 {
1142         struct discovery_state *cache = &hdev->discovery;
1143         struct inquiry_entry *e;
1144
1145         BT_DBG("cache %p, %pMR", cache, bdaddr);
1146
1147         list_for_each_entry(e, &cache->unknown, list) {
1148                 if (!bacmp(&e->data.bdaddr, bdaddr))
1149                         return e;
1150         }
1151
1152         return NULL;
1153 }
1154
1155 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1156                                                        bdaddr_t *bdaddr,
1157                                                        int state)
1158 {
1159         struct discovery_state *cache = &hdev->discovery;
1160         struct inquiry_entry *e;
1161
1162         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1163
1164         list_for_each_entry(e, &cache->resolve, list) {
1165                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1166                         return e;
1167                 if (!bacmp(&e->data.bdaddr, bdaddr))
1168                         return e;
1169         }
1170
1171         return NULL;
1172 }
1173
1174 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1175                                       struct inquiry_entry *ie)
1176 {
1177         struct discovery_state *cache = &hdev->discovery;
1178         struct list_head *pos = &cache->resolve;
1179         struct inquiry_entry *p;
1180
1181         list_del(&ie->list);
1182
1183         list_for_each_entry(p, &cache->resolve, list) {
1184                 if (p->name_state != NAME_PENDING &&
1185                     abs(p->data.rssi) >= abs(ie->data.rssi))
1186                         break;
1187                 pos = &p->list;
1188         }
1189
1190         list_add(&ie->list, pos);
1191 }
1192
1193 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1194                              bool name_known)
1195 {
1196         struct discovery_state *cache = &hdev->discovery;
1197         struct inquiry_entry *ie;
1198         u32 flags = 0;
1199
1200         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1201
1202         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1203
1204         if (!data->ssp_mode)
1205                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1206
1207         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1208         if (ie) {
1209                 if (!ie->data.ssp_mode)
1210                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1211
1212                 if (ie->name_state == NAME_NEEDED &&
1213                     data->rssi != ie->data.rssi) {
1214                         ie->data.rssi = data->rssi;
1215                         hci_inquiry_cache_update_resolve(hdev, ie);
1216                 }
1217
1218                 goto update;
1219         }
1220
1221         /* Entry not in the cache. Add new one. */
1222         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1223         if (!ie) {
1224                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1225                 goto done;
1226         }
1227
1228         list_add(&ie->all, &cache->all);
1229
1230         if (name_known) {
1231                 ie->name_state = NAME_KNOWN;
1232         } else {
1233                 ie->name_state = NAME_NOT_KNOWN;
1234                 list_add(&ie->list, &cache->unknown);
1235         }
1236
1237 update:
1238         if (name_known && ie->name_state != NAME_KNOWN &&
1239             ie->name_state != NAME_PENDING) {
1240                 ie->name_state = NAME_KNOWN;
1241                 list_del(&ie->list);
1242         }
1243
1244         memcpy(&ie->data, data, sizeof(*data));
1245         ie->timestamp = jiffies;
1246         cache->timestamp = jiffies;
1247
1248         if (ie->name_state == NAME_NOT_KNOWN)
1249                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1250
1251 done:
1252         return flags;
1253 }
1254
1255 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1256 {
1257         struct discovery_state *cache = &hdev->discovery;
1258         struct inquiry_info *info = (struct inquiry_info *) buf;
1259         struct inquiry_entry *e;
1260         int copied = 0;
1261
1262         list_for_each_entry(e, &cache->all, all) {
1263                 struct inquiry_data *data = &e->data;
1264
1265                 if (copied >= num)
1266                         break;
1267
1268                 bacpy(&info->bdaddr, &data->bdaddr);
1269                 info->pscan_rep_mode    = data->pscan_rep_mode;
1270                 info->pscan_period_mode = data->pscan_period_mode;
1271                 info->pscan_mode        = data->pscan_mode;
1272                 memcpy(info->dev_class, data->dev_class, 3);
1273                 info->clock_offset      = data->clock_offset;
1274
1275                 info++;
1276                 copied++;
1277         }
1278
1279         BT_DBG("cache %p, copied %d", cache, copied);
1280         return copied;
1281 }
1282
1283 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1284 {
1285         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1286         struct hci_dev *hdev = req->hdev;
1287         struct hci_cp_inquiry cp;
1288
1289         BT_DBG("%s", hdev->name);
1290
1291         if (test_bit(HCI_INQUIRY, &hdev->flags))
1292                 return 0;
1293
1294         /* Start Inquiry */
1295         memcpy(&cp.lap, &ir->lap, 3);
1296         cp.length  = ir->length;
1297         cp.num_rsp = ir->num_rsp;
1298         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1299
1300         return 0;
1301 }
1302
1303 int hci_inquiry(void __user *arg)
1304 {
1305         __u8 __user *ptr = arg;
1306         struct hci_inquiry_req ir;
1307         struct hci_dev *hdev;
1308         int err = 0, do_inquiry = 0, max_rsp;
1309         long timeo;
1310         __u8 *buf;
1311
1312         if (copy_from_user(&ir, ptr, sizeof(ir)))
1313                 return -EFAULT;
1314
1315         hdev = hci_dev_get(ir.dev_id);
1316         if (!hdev)
1317                 return -ENODEV;
1318
1319         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1320                 err = -EBUSY;
1321                 goto done;
1322         }
1323
1324         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1325                 err = -EOPNOTSUPP;
1326                 goto done;
1327         }
1328
1329         if (hdev->dev_type != HCI_PRIMARY) {
1330                 err = -EOPNOTSUPP;
1331                 goto done;
1332         }
1333
1334         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1335                 err = -EOPNOTSUPP;
1336                 goto done;
1337         }
1338
1339         /* Restrict maximum inquiry length to 60 seconds */
1340         if (ir.length > 60) {
1341                 err = -EINVAL;
1342                 goto done;
1343         }
1344
1345         hci_dev_lock(hdev);
1346         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1347             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1348                 hci_inquiry_cache_flush(hdev);
1349                 do_inquiry = 1;
1350         }
1351         hci_dev_unlock(hdev);
1352
1353         timeo = ir.length * msecs_to_jiffies(2000);
1354
1355         if (do_inquiry) {
1356                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1357                                    timeo, NULL);
1358                 if (err < 0)
1359                         goto done;
1360
1361                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1362                  * cleared). If it is interrupted by a signal, return -EINTR.
1363                  */
1364                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1365                                 TASK_INTERRUPTIBLE)) {
1366                         err = -EINTR;
1367                         goto done;
1368                 }
1369         }
1370
1371         /* for unlimited number of responses we will use buffer with
1372          * 255 entries
1373          */
1374         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1375
1376         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1377          * copy it to the user space.
1378          */
1379         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1380         if (!buf) {
1381                 err = -ENOMEM;
1382                 goto done;
1383         }
1384
1385         hci_dev_lock(hdev);
1386         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1387         hci_dev_unlock(hdev);
1388
1389         BT_DBG("num_rsp %d", ir.num_rsp);
1390
1391         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1392                 ptr += sizeof(ir);
1393                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1394                                  ir.num_rsp))
1395                         err = -EFAULT;
1396         } else
1397                 err = -EFAULT;
1398
1399         kfree(buf);
1400
1401 done:
1402         hci_dev_put(hdev);
1403         return err;
1404 }
1405
1406 /**
1407  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1408  *                                     (BD_ADDR) for a HCI device from
1409  *                                     a firmware node property.
1410  * @hdev:       The HCI device
1411  *
1412  * Search the firmware node for 'local-bd-address'.
1413  *
1414  * All-zero BD addresses are rejected, because those could be properties
1415  * that exist in the firmware tables, but were not updated by the firmware. For
1416  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1417  */
1418 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1419 {
1420         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1421         bdaddr_t ba;
1422         int ret;
1423
1424         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1425                                             (u8 *)&ba, sizeof(ba));
1426         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1427                 return;
1428
1429         bacpy(&hdev->public_addr, &ba);
1430 }
1431
1432 static int hci_dev_do_open(struct hci_dev *hdev)
1433 {
1434         int ret = 0;
1435
1436         BT_DBG("%s %p", hdev->name, hdev);
1437
1438         hci_req_sync_lock(hdev);
1439
1440         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1441                 ret = -ENODEV;
1442                 goto done;
1443         }
1444
1445         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1446             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1447                 /* Check for rfkill but allow the HCI setup stage to
1448                  * proceed (which in itself doesn't cause any RF activity).
1449                  */
1450                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1451                         ret = -ERFKILL;
1452                         goto done;
1453                 }
1454
1455                 /* Check for valid public address or a configured static
1456                  * random adddress, but let the HCI setup proceed to
1457                  * be able to determine if there is a public address
1458                  * or not.
1459                  *
1460                  * In case of user channel usage, it is not important
1461                  * if a public address or static random address is
1462                  * available.
1463                  *
1464                  * This check is only valid for BR/EDR controllers
1465                  * since AMP controllers do not have an address.
1466                  */
1467                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1468                     hdev->dev_type == HCI_PRIMARY &&
1469                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1470                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1471                         ret = -EADDRNOTAVAIL;
1472                         goto done;
1473                 }
1474         }
1475
1476         if (test_bit(HCI_UP, &hdev->flags)) {
1477                 ret = -EALREADY;
1478                 goto done;
1479         }
1480
1481         if (hdev->open(hdev)) {
1482                 ret = -EIO;
1483                 goto done;
1484         }
1485
1486         set_bit(HCI_RUNNING, &hdev->flags);
1487         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1488
1489         atomic_set(&hdev->cmd_cnt, 1);
1490         set_bit(HCI_INIT, &hdev->flags);
1491
1492         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1493             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1494                 bool invalid_bdaddr;
1495
1496                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1497
1498                 if (hdev->setup)
1499                         ret = hdev->setup(hdev);
1500
1501                 /* The transport driver can set the quirk to mark the
1502                  * BD_ADDR invalid before creating the HCI device or in
1503                  * its setup callback.
1504                  */
1505                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1506                                           &hdev->quirks);
1507
1508                 if (ret)
1509                         goto setup_failed;
1510
1511                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1512                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1513                                 hci_dev_get_bd_addr_from_property(hdev);
1514
1515                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1516                             hdev->set_bdaddr) {
1517                                 ret = hdev->set_bdaddr(hdev,
1518                                                        &hdev->public_addr);
1519
1520                                 /* If setting of the BD_ADDR from the device
1521                                  * property succeeds, then treat the address
1522                                  * as valid even if the invalid BD_ADDR
1523                                  * quirk indicates otherwise.
1524                                  */
1525                                 if (!ret)
1526                                         invalid_bdaddr = false;
1527                         }
1528                 }
1529
1530 setup_failed:
1531                 /* The transport driver can set these quirks before
1532                  * creating the HCI device or in its setup callback.
1533                  *
1534                  * For the invalid BD_ADDR quirk it is possible that
1535                  * it becomes a valid address if the bootloader does
1536                  * provide it (see above).
1537                  *
1538                  * In case any of them is set, the controller has to
1539                  * start up as unconfigured.
1540                  */
1541                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1542                     invalid_bdaddr)
1543                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1544
1545                 /* For an unconfigured controller it is required to
1546                  * read at least the version information provided by
1547                  * the Read Local Version Information command.
1548                  *
1549                  * If the set_bdaddr driver callback is provided, then
1550                  * also the original Bluetooth public device address
1551                  * will be read using the Read BD Address command.
1552                  */
1553                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1554                         ret = __hci_unconf_init(hdev);
1555         }
1556
1557         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1558                 /* If public address change is configured, ensure that
1559                  * the address gets programmed. If the driver does not
1560                  * support changing the public address, fail the power
1561                  * on procedure.
1562                  */
1563                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1564                     hdev->set_bdaddr)
1565                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1566                 else
1567                         ret = -EADDRNOTAVAIL;
1568         }
1569
1570         if (!ret) {
1571                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1572                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1573                         ret = __hci_init(hdev);
1574                         if (!ret && hdev->post_init)
1575                                 ret = hdev->post_init(hdev);
1576                 }
1577         }
1578
1579         /* If the HCI Reset command is clearing all diagnostic settings,
1580          * then they need to be reprogrammed after the init procedure
1581          * completed.
1582          */
1583         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1584             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1585             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1586                 ret = hdev->set_diag(hdev, true);
1587
1588         msft_do_open(hdev);
1589
1590         clear_bit(HCI_INIT, &hdev->flags);
1591
1592         if (!ret) {
1593                 hci_dev_hold(hdev);
1594                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1595                 hci_adv_instances_set_rpa_expired(hdev, true);
1596                 set_bit(HCI_UP, &hdev->flags);
1597                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1598                 hci_leds_update_powered(hdev, true);
1599                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1600                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1601                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1602                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1603                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1604                     hdev->dev_type == HCI_PRIMARY) {
1605                         ret = __hci_req_hci_power_on(hdev);
1606                         mgmt_power_on(hdev, ret);
1607                 }
1608         } else {
1609                 /* Init failed, cleanup */
1610                 flush_work(&hdev->tx_work);
1611
1612                 /* Since hci_rx_work() is possible to awake new cmd_work
1613                  * it should be flushed first to avoid unexpected call of
1614                  * hci_cmd_work()
1615                  */
1616                 flush_work(&hdev->rx_work);
1617                 flush_work(&hdev->cmd_work);
1618
1619                 skb_queue_purge(&hdev->cmd_q);
1620                 skb_queue_purge(&hdev->rx_q);
1621
1622                 if (hdev->flush)
1623                         hdev->flush(hdev);
1624
1625                 if (hdev->sent_cmd) {
1626                         cancel_delayed_work_sync(&hdev->cmd_timer);
1627                         kfree_skb(hdev->sent_cmd);
1628                         hdev->sent_cmd = NULL;
1629                 }
1630
1631                 clear_bit(HCI_RUNNING, &hdev->flags);
1632                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1633
1634                 hdev->close(hdev);
1635                 hdev->flags &= BIT(HCI_RAW);
1636         }
1637
1638 done:
1639         hci_req_sync_unlock(hdev);
1640         return ret;
1641 }
1642
1643 /* ---- HCI ioctl helpers ---- */
1644
1645 int hci_dev_open(__u16 dev)
1646 {
1647         struct hci_dev *hdev;
1648         int err;
1649
1650         hdev = hci_dev_get(dev);
1651         if (!hdev)
1652                 return -ENODEV;
1653
1654         /* Devices that are marked as unconfigured can only be powered
1655          * up as user channel. Trying to bring them up as normal devices
1656          * will result into a failure. Only user channel operation is
1657          * possible.
1658          *
1659          * When this function is called for a user channel, the flag
1660          * HCI_USER_CHANNEL will be set first before attempting to
1661          * open the device.
1662          */
1663         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1664             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1665                 err = -EOPNOTSUPP;
1666                 goto done;
1667         }
1668
1669         /* We need to ensure that no other power on/off work is pending
1670          * before proceeding to call hci_dev_do_open. This is
1671          * particularly important if the setup procedure has not yet
1672          * completed.
1673          */
1674         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1675                 cancel_delayed_work(&hdev->power_off);
1676
1677         /* After this call it is guaranteed that the setup procedure
1678          * has finished. This means that error conditions like RFKILL
1679          * or no valid public or static random address apply.
1680          */
1681         flush_workqueue(hdev->req_workqueue);
1682
1683         /* For controllers not using the management interface and that
1684          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1685          * so that pairing works for them. Once the management interface
1686          * is in use this bit will be cleared again and userspace has
1687          * to explicitly enable it.
1688          */
1689         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1690             !hci_dev_test_flag(hdev, HCI_MGMT))
1691                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1692
1693         err = hci_dev_do_open(hdev);
1694
1695 done:
1696         hci_dev_put(hdev);
1697         return err;
1698 }
1699
1700 /* This function requires the caller holds hdev->lock */
1701 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1702 {
1703         struct hci_conn_params *p;
1704
1705         list_for_each_entry(p, &hdev->le_conn_params, list) {
1706                 if (p->conn) {
1707                         hci_conn_drop(p->conn);
1708                         hci_conn_put(p->conn);
1709                         p->conn = NULL;
1710                 }
1711                 list_del_init(&p->action);
1712         }
1713
1714         BT_DBG("All LE pending actions cleared");
1715 }
1716
1717 int hci_dev_do_close(struct hci_dev *hdev)
1718 {
1719         bool auto_off;
1720
1721         BT_DBG("%s %p", hdev->name, hdev);
1722
1723         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1724             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1725             test_bit(HCI_UP, &hdev->flags)) {
1726                 /* Execute vendor specific shutdown routine */
1727                 if (hdev->shutdown)
1728                         hdev->shutdown(hdev);
1729         }
1730
1731         cancel_delayed_work(&hdev->power_off);
1732
1733         hci_request_cancel_all(hdev);
1734         hci_req_sync_lock(hdev);
1735
1736         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1737                 cancel_delayed_work_sync(&hdev->cmd_timer);
1738                 hci_req_sync_unlock(hdev);
1739                 return 0;
1740         }
1741
1742         hci_leds_update_powered(hdev, false);
1743
1744         /* Flush RX and TX works */
1745         flush_work(&hdev->tx_work);
1746         flush_work(&hdev->rx_work);
1747
1748         if (hdev->discov_timeout > 0) {
1749                 hdev->discov_timeout = 0;
1750                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1751                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1752         }
1753
1754         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1755                 cancel_delayed_work(&hdev->service_cache);
1756
1757         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1758                 struct adv_info *adv_instance;
1759
1760                 cancel_delayed_work_sync(&hdev->rpa_expired);
1761
1762                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1763                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1764         }
1765
1766         /* Avoid potential lockdep warnings from the *_flush() calls by
1767          * ensuring the workqueue is empty up front.
1768          */
1769         drain_workqueue(hdev->workqueue);
1770
1771         hci_dev_lock(hdev);
1772
1773         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1774
1775         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1776
1777         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1778             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1779             hci_dev_test_flag(hdev, HCI_MGMT))
1780                 __mgmt_power_off(hdev);
1781
1782         hci_inquiry_cache_flush(hdev);
1783         hci_pend_le_actions_clear(hdev);
1784         hci_conn_hash_flush(hdev);
1785         hci_dev_unlock(hdev);
1786
1787         smp_unregister(hdev);
1788
1789         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1790
1791         msft_do_close(hdev);
1792
1793         if (hdev->flush)
1794                 hdev->flush(hdev);
1795
1796         /* Reset device */
1797         skb_queue_purge(&hdev->cmd_q);
1798         atomic_set(&hdev->cmd_cnt, 1);
1799         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1800             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1801                 set_bit(HCI_INIT, &hdev->flags);
1802                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1803                 clear_bit(HCI_INIT, &hdev->flags);
1804         }
1805
1806         /* flush cmd  work */
1807         flush_work(&hdev->cmd_work);
1808
1809         /* Drop queues */
1810         skb_queue_purge(&hdev->rx_q);
1811         skb_queue_purge(&hdev->cmd_q);
1812         skb_queue_purge(&hdev->raw_q);
1813
1814         /* Drop last sent command */
1815         if (hdev->sent_cmd) {
1816                 cancel_delayed_work_sync(&hdev->cmd_timer);
1817                 kfree_skb(hdev->sent_cmd);
1818                 hdev->sent_cmd = NULL;
1819         }
1820
1821         clear_bit(HCI_RUNNING, &hdev->flags);
1822         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1823
1824         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1825                 wake_up(&hdev->suspend_wait_q);
1826
1827         /* After this point our queues are empty
1828          * and no tasks are scheduled. */
1829         hdev->close(hdev);
1830
1831         /* Clear flags */
1832         hdev->flags &= BIT(HCI_RAW);
1833         hci_dev_clear_volatile_flags(hdev);
1834
1835         /* Controller radio is available but is currently powered down */
1836         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1837
1838         memset(hdev->eir, 0, sizeof(hdev->eir));
1839         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1840         bacpy(&hdev->random_addr, BDADDR_ANY);
1841
1842         hci_req_sync_unlock(hdev);
1843
1844         hci_dev_put(hdev);
1845         return 0;
1846 }
1847
1848 int hci_dev_close(__u16 dev)
1849 {
1850         struct hci_dev *hdev;
1851         int err;
1852
1853         hdev = hci_dev_get(dev);
1854         if (!hdev)
1855                 return -ENODEV;
1856
1857         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1858                 err = -EBUSY;
1859                 goto done;
1860         }
1861
1862         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1863                 cancel_delayed_work(&hdev->power_off);
1864
1865         err = hci_dev_do_close(hdev);
1866
1867 done:
1868         hci_dev_put(hdev);
1869         return err;
1870 }
1871
1872 static int hci_dev_do_reset(struct hci_dev *hdev)
1873 {
1874         int ret;
1875
1876         BT_DBG("%s %p", hdev->name, hdev);
1877
1878         hci_req_sync_lock(hdev);
1879
1880         /* Drop queues */
1881         skb_queue_purge(&hdev->rx_q);
1882         skb_queue_purge(&hdev->cmd_q);
1883
1884         /* Avoid potential lockdep warnings from the *_flush() calls by
1885          * ensuring the workqueue is empty up front.
1886          */
1887         drain_workqueue(hdev->workqueue);
1888
1889         hci_dev_lock(hdev);
1890         hci_inquiry_cache_flush(hdev);
1891         hci_conn_hash_flush(hdev);
1892         hci_dev_unlock(hdev);
1893
1894         if (hdev->flush)
1895                 hdev->flush(hdev);
1896
1897         atomic_set(&hdev->cmd_cnt, 1);
1898         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1899
1900         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1901
1902         hci_req_sync_unlock(hdev);
1903         return ret;
1904 }
1905
1906 int hci_dev_reset(__u16 dev)
1907 {
1908         struct hci_dev *hdev;
1909         int err;
1910
1911         hdev = hci_dev_get(dev);
1912         if (!hdev)
1913                 return -ENODEV;
1914
1915         if (!test_bit(HCI_UP, &hdev->flags)) {
1916                 err = -ENETDOWN;
1917                 goto done;
1918         }
1919
1920         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1921                 err = -EBUSY;
1922                 goto done;
1923         }
1924
1925         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1926                 err = -EOPNOTSUPP;
1927                 goto done;
1928         }
1929
1930         err = hci_dev_do_reset(hdev);
1931
1932 done:
1933         hci_dev_put(hdev);
1934         return err;
1935 }
1936
1937 int hci_dev_reset_stat(__u16 dev)
1938 {
1939         struct hci_dev *hdev;
1940         int ret = 0;
1941
1942         hdev = hci_dev_get(dev);
1943         if (!hdev)
1944                 return -ENODEV;
1945
1946         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1947                 ret = -EBUSY;
1948                 goto done;
1949         }
1950
1951         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1952                 ret = -EOPNOTSUPP;
1953                 goto done;
1954         }
1955
1956         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1957
1958 done:
1959         hci_dev_put(hdev);
1960         return ret;
1961 }
1962
1963 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1964 {
1965         bool conn_changed, discov_changed;
1966
1967         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1968
1969         if ((scan & SCAN_PAGE))
1970                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1971                                                           HCI_CONNECTABLE);
1972         else
1973                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1974                                                            HCI_CONNECTABLE);
1975
1976         if ((scan & SCAN_INQUIRY)) {
1977                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1978                                                             HCI_DISCOVERABLE);
1979         } else {
1980                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1981                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1982                                                              HCI_DISCOVERABLE);
1983         }
1984
1985         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1986                 return;
1987
1988         if (conn_changed || discov_changed) {
1989                 /* In case this was disabled through mgmt */
1990                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1991
1992                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1993                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1994
1995                 mgmt_new_settings(hdev);
1996         }
1997 }
1998
1999 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2000 {
2001         struct hci_dev *hdev;
2002         struct hci_dev_req dr;
2003         int err = 0;
2004
2005         if (copy_from_user(&dr, arg, sizeof(dr)))
2006                 return -EFAULT;
2007
2008         hdev = hci_dev_get(dr.dev_id);
2009         if (!hdev)
2010                 return -ENODEV;
2011
2012         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2013                 err = -EBUSY;
2014                 goto done;
2015         }
2016
2017         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2018                 err = -EOPNOTSUPP;
2019                 goto done;
2020         }
2021
2022         if (hdev->dev_type != HCI_PRIMARY) {
2023                 err = -EOPNOTSUPP;
2024                 goto done;
2025         }
2026
2027         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2028                 err = -EOPNOTSUPP;
2029                 goto done;
2030         }
2031
2032         switch (cmd) {
2033         case HCISETAUTH:
2034                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2035                                    HCI_INIT_TIMEOUT, NULL);
2036                 break;
2037
2038         case HCISETENCRYPT:
2039                 if (!lmp_encrypt_capable(hdev)) {
2040                         err = -EOPNOTSUPP;
2041                         break;
2042                 }
2043
2044                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2045                         /* Auth must be enabled first */
2046                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2047                                            HCI_INIT_TIMEOUT, NULL);
2048                         if (err)
2049                                 break;
2050                 }
2051
2052                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2053                                    HCI_INIT_TIMEOUT, NULL);
2054                 break;
2055
2056         case HCISETSCAN:
2057                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2058                                    HCI_INIT_TIMEOUT, NULL);
2059
2060                 /* Ensure that the connectable and discoverable states
2061                  * get correctly modified as this was a non-mgmt change.
2062                  */
2063                 if (!err)
2064                         hci_update_scan_state(hdev, dr.dev_opt);
2065                 break;
2066
2067         case HCISETLINKPOL:
2068                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2069                                    HCI_INIT_TIMEOUT, NULL);
2070                 break;
2071
2072         case HCISETLINKMODE:
2073                 hdev->link_mode = ((__u16) dr.dev_opt) &
2074                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2075                 break;
2076
2077         case HCISETPTYPE:
2078                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2079                         break;
2080
2081                 hdev->pkt_type = (__u16) dr.dev_opt;
2082                 mgmt_phy_configuration_changed(hdev, NULL);
2083                 break;
2084
2085         case HCISETACLMTU:
2086                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2087                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2088                 break;
2089
2090         case HCISETSCOMTU:
2091                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2092                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2093                 break;
2094
2095         default:
2096                 err = -EINVAL;
2097                 break;
2098         }
2099
2100 done:
2101         hci_dev_put(hdev);
2102         return err;
2103 }
2104
2105 int hci_get_dev_list(void __user *arg)
2106 {
2107         struct hci_dev *hdev;
2108         struct hci_dev_list_req *dl;
2109         struct hci_dev_req *dr;
2110         int n = 0, size, err;
2111         __u16 dev_num;
2112
2113         if (get_user(dev_num, (__u16 __user *) arg))
2114                 return -EFAULT;
2115
2116         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2117                 return -EINVAL;
2118
2119         size = sizeof(*dl) + dev_num * sizeof(*dr);
2120
2121         dl = kzalloc(size, GFP_KERNEL);
2122         if (!dl)
2123                 return -ENOMEM;
2124
2125         dr = dl->dev_req;
2126
2127         read_lock(&hci_dev_list_lock);
2128         list_for_each_entry(hdev, &hci_dev_list, list) {
2129                 unsigned long flags = hdev->flags;
2130
2131                 /* When the auto-off is configured it means the transport
2132                  * is running, but in that case still indicate that the
2133                  * device is actually down.
2134                  */
2135                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2136                         flags &= ~BIT(HCI_UP);
2137
2138                 (dr + n)->dev_id  = hdev->id;
2139                 (dr + n)->dev_opt = flags;
2140
2141                 if (++n >= dev_num)
2142                         break;
2143         }
2144         read_unlock(&hci_dev_list_lock);
2145
2146         dl->dev_num = n;
2147         size = sizeof(*dl) + n * sizeof(*dr);
2148
2149         err = copy_to_user(arg, dl, size);
2150         kfree(dl);
2151
2152         return err ? -EFAULT : 0;
2153 }
2154
2155 int hci_get_dev_info(void __user *arg)
2156 {
2157         struct hci_dev *hdev;
2158         struct hci_dev_info di;
2159         unsigned long flags;
2160         int err = 0;
2161
2162         if (copy_from_user(&di, arg, sizeof(di)))
2163                 return -EFAULT;
2164
2165         hdev = hci_dev_get(di.dev_id);
2166         if (!hdev)
2167                 return -ENODEV;
2168
2169         /* When the auto-off is configured it means the transport
2170          * is running, but in that case still indicate that the
2171          * device is actually down.
2172          */
2173         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2174                 flags = hdev->flags & ~BIT(HCI_UP);
2175         else
2176                 flags = hdev->flags;
2177
2178         strscpy(di.name, hdev->name, sizeof(di.name));
2179         di.bdaddr   = hdev->bdaddr;
2180         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2181         di.flags    = flags;
2182         di.pkt_type = hdev->pkt_type;
2183         if (lmp_bredr_capable(hdev)) {
2184                 di.acl_mtu  = hdev->acl_mtu;
2185                 di.acl_pkts = hdev->acl_pkts;
2186                 di.sco_mtu  = hdev->sco_mtu;
2187                 di.sco_pkts = hdev->sco_pkts;
2188         } else {
2189                 di.acl_mtu  = hdev->le_mtu;
2190                 di.acl_pkts = hdev->le_pkts;
2191                 di.sco_mtu  = 0;
2192                 di.sco_pkts = 0;
2193         }
2194         di.link_policy = hdev->link_policy;
2195         di.link_mode   = hdev->link_mode;
2196
2197         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2198         memcpy(&di.features, &hdev->features, sizeof(di.features));
2199
2200         if (copy_to_user(arg, &di, sizeof(di)))
2201                 err = -EFAULT;
2202
2203         hci_dev_put(hdev);
2204
2205         return err;
2206 }
2207
2208 /* ---- Interface to HCI drivers ---- */
2209
2210 static int hci_rfkill_set_block(void *data, bool blocked)
2211 {
2212         struct hci_dev *hdev = data;
2213
2214         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2215
2216         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2217                 return -EBUSY;
2218
2219         if (blocked) {
2220                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2221                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2222                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2223                         hci_dev_do_close(hdev);
2224         } else {
2225                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2226         }
2227
2228         return 0;
2229 }
2230
2231 static const struct rfkill_ops hci_rfkill_ops = {
2232         .set_block = hci_rfkill_set_block,
2233 };
2234
2235 static void hci_power_on(struct work_struct *work)
2236 {
2237         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2238         int err;
2239
2240         BT_DBG("%s", hdev->name);
2241
2242         if (test_bit(HCI_UP, &hdev->flags) &&
2243             hci_dev_test_flag(hdev, HCI_MGMT) &&
2244             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2245                 cancel_delayed_work(&hdev->power_off);
2246                 hci_req_sync_lock(hdev);
2247                 err = __hci_req_hci_power_on(hdev);
2248                 hci_req_sync_unlock(hdev);
2249                 mgmt_power_on(hdev, err);
2250                 return;
2251         }
2252
2253         err = hci_dev_do_open(hdev);
2254         if (err < 0) {
2255                 hci_dev_lock(hdev);
2256                 mgmt_set_powered_failed(hdev, err);
2257                 hci_dev_unlock(hdev);
2258                 return;
2259         }
2260
2261         /* During the HCI setup phase, a few error conditions are
2262          * ignored and they need to be checked now. If they are still
2263          * valid, it is important to turn the device back off.
2264          */
2265         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2266             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2267             (hdev->dev_type == HCI_PRIMARY &&
2268              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2269              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2270                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2271                 hci_dev_do_close(hdev);
2272         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2273                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2274                                    HCI_AUTO_OFF_TIMEOUT);
2275         }
2276
2277         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2278                 /* For unconfigured devices, set the HCI_RAW flag
2279                  * so that userspace can easily identify them.
2280                  */
2281                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2282                         set_bit(HCI_RAW, &hdev->flags);
2283
2284                 /* For fully configured devices, this will send
2285                  * the Index Added event. For unconfigured devices,
2286                  * it will send Unconfigued Index Added event.
2287                  *
2288                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2289                  * and no event will be send.
2290                  */
2291                 mgmt_index_added(hdev);
2292         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2293                 /* When the controller is now configured, then it
2294                  * is important to clear the HCI_RAW flag.
2295                  */
2296                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2297                         clear_bit(HCI_RAW, &hdev->flags);
2298
2299                 /* Powering on the controller with HCI_CONFIG set only
2300                  * happens with the transition from unconfigured to
2301                  * configured. This will send the Index Added event.
2302                  */
2303                 mgmt_index_added(hdev);
2304         }
2305 }
2306
2307 static void hci_power_off(struct work_struct *work)
2308 {
2309         struct hci_dev *hdev = container_of(work, struct hci_dev,
2310                                             power_off.work);
2311
2312         BT_DBG("%s", hdev->name);
2313
2314         hci_dev_do_close(hdev);
2315 }
2316
2317 static void hci_error_reset(struct work_struct *work)
2318 {
2319         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2320
2321         hci_dev_hold(hdev);
2322         BT_DBG("%s", hdev->name);
2323
2324         if (hdev->hw_error)
2325                 hdev->hw_error(hdev, hdev->hw_error_code);
2326         else
2327                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2328
2329         if (!hci_dev_do_close(hdev))
2330                 hci_dev_do_open(hdev);
2331
2332         hci_dev_put(hdev);
2333 }
2334
2335 void hci_uuids_clear(struct hci_dev *hdev)
2336 {
2337         struct bt_uuid *uuid, *tmp;
2338
2339         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2340                 list_del(&uuid->list);
2341                 kfree(uuid);
2342         }
2343 }
2344
2345 void hci_link_keys_clear(struct hci_dev *hdev)
2346 {
2347         struct link_key *key, *tmp;
2348
2349         list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
2350                 list_del_rcu(&key->list);
2351                 kfree_rcu(key, rcu);
2352         }
2353 }
2354
2355 void hci_smp_ltks_clear(struct hci_dev *hdev)
2356 {
2357         struct smp_ltk *k, *tmp;
2358
2359         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2360                 list_del_rcu(&k->list);
2361                 kfree_rcu(k, rcu);
2362         }
2363 }
2364
2365 void hci_smp_irks_clear(struct hci_dev *hdev)
2366 {
2367         struct smp_irk *k, *tmp;
2368
2369         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2370                 list_del_rcu(&k->list);
2371                 kfree_rcu(k, rcu);
2372         }
2373 }
2374
2375 void hci_blocked_keys_clear(struct hci_dev *hdev)
2376 {
2377         struct blocked_key *b, *tmp;
2378
2379         list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
2380                 list_del_rcu(&b->list);
2381                 kfree_rcu(b, rcu);
2382         }
2383 }
2384
2385 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2386 {
2387         bool blocked = false;
2388         struct blocked_key *b;
2389
2390         rcu_read_lock();
2391         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2392                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2393                         blocked = true;
2394                         break;
2395                 }
2396         }
2397
2398         rcu_read_unlock();
2399         return blocked;
2400 }
2401
2402 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2403 {
2404         struct link_key *k;
2405
2406         rcu_read_lock();
2407         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2408                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2409                         rcu_read_unlock();
2410
2411                         if (hci_is_blocked_key(hdev,
2412                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2413                                                k->val)) {
2414                                 bt_dev_warn_ratelimited(hdev,
2415                                                         "Link key blocked for %pMR",
2416                                                         &k->bdaddr);
2417                                 return NULL;
2418                         }
2419
2420                         return k;
2421                 }
2422         }
2423         rcu_read_unlock();
2424
2425         return NULL;
2426 }
2427
2428 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2429                                u8 key_type, u8 old_key_type)
2430 {
2431         /* Legacy key */
2432         if (key_type < 0x03)
2433                 return true;
2434
2435         /* Debug keys are insecure so don't store them persistently */
2436         if (key_type == HCI_LK_DEBUG_COMBINATION)
2437                 return false;
2438
2439         /* Changed combination key and there's no previous one */
2440         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2441                 return false;
2442
2443         /* Security mode 3 case */
2444         if (!conn)
2445                 return true;
2446
2447         /* BR/EDR key derived using SC from an LE link */
2448         if (conn->type == LE_LINK)
2449                 return true;
2450
2451         /* Neither local nor remote side had no-bonding as requirement */
2452         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2453                 return true;
2454
2455         /* Local side had dedicated bonding as requirement */
2456         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2457                 return true;
2458
2459         /* Remote side had dedicated bonding as requirement */
2460         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2461                 return true;
2462
2463         /* If none of the above criteria match, then don't store the key
2464          * persistently */
2465         return false;
2466 }
2467
2468 static u8 ltk_role(u8 type)
2469 {
2470         if (type == SMP_LTK)
2471                 return HCI_ROLE_MASTER;
2472
2473         return HCI_ROLE_SLAVE;
2474 }
2475
2476 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2477                              u8 addr_type, u8 role)
2478 {
2479         struct smp_ltk *k;
2480
2481         rcu_read_lock();
2482         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2483                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2484                         continue;
2485
2486                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2487                         rcu_read_unlock();
2488
2489                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2490                                                k->val)) {
2491                                 bt_dev_warn_ratelimited(hdev,
2492                                                         "LTK blocked for %pMR",
2493                                                         &k->bdaddr);
2494                                 return NULL;
2495                         }
2496
2497                         return k;
2498                 }
2499         }
2500         rcu_read_unlock();
2501
2502         return NULL;
2503 }
2504
2505 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2506 {
2507         struct smp_irk *irk_to_return = NULL;
2508         struct smp_irk *irk;
2509
2510         rcu_read_lock();
2511         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2512                 if (!bacmp(&irk->rpa, rpa)) {
2513                         irk_to_return = irk;
2514                         goto done;
2515                 }
2516         }
2517
2518         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2519                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2520                         bacpy(&irk->rpa, rpa);
2521                         irk_to_return = irk;
2522                         goto done;
2523                 }
2524         }
2525
2526 done:
2527         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2528                                                 irk_to_return->val)) {
2529                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2530                                         &irk_to_return->bdaddr);
2531                 irk_to_return = NULL;
2532         }
2533
2534         rcu_read_unlock();
2535
2536         return irk_to_return;
2537 }
2538
2539 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2540                                      u8 addr_type)
2541 {
2542         struct smp_irk *irk_to_return = NULL;
2543         struct smp_irk *irk;
2544
2545         /* Identity Address must be public or static random */
2546         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2547                 return NULL;
2548
2549         rcu_read_lock();
2550         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2551                 if (addr_type == irk->addr_type &&
2552                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2553                         irk_to_return = irk;
2554                         goto done;
2555                 }
2556         }
2557
2558 done:
2559
2560         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2561                                                 irk_to_return->val)) {
2562                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2563                                         &irk_to_return->bdaddr);
2564                 irk_to_return = NULL;
2565         }
2566
2567         rcu_read_unlock();
2568
2569         return irk_to_return;
2570 }
2571
2572 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2573                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2574                                   u8 pin_len, bool *persistent)
2575 {
2576         struct link_key *key, *old_key;
2577         u8 old_key_type;
2578
2579         old_key = hci_find_link_key(hdev, bdaddr);
2580         if (old_key) {
2581                 old_key_type = old_key->type;
2582                 key = old_key;
2583         } else {
2584                 old_key_type = conn ? conn->key_type : 0xff;
2585                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2586                 if (!key)
2587                         return NULL;
2588                 list_add_rcu(&key->list, &hdev->link_keys);
2589         }
2590
2591         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2592
2593         /* Some buggy controller combinations generate a changed
2594          * combination key for legacy pairing even when there's no
2595          * previous key */
2596         if (type == HCI_LK_CHANGED_COMBINATION &&
2597             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2598                 type = HCI_LK_COMBINATION;
2599                 if (conn)
2600                         conn->key_type = type;
2601         }
2602
2603         bacpy(&key->bdaddr, bdaddr);
2604         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2605         key->pin_len = pin_len;
2606
2607         if (type == HCI_LK_CHANGED_COMBINATION)
2608                 key->type = old_key_type;
2609         else
2610                 key->type = type;
2611
2612         if (persistent)
2613                 *persistent = hci_persistent_key(hdev, conn, type,
2614                                                  old_key_type);
2615
2616         return key;
2617 }
2618
2619 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2620                             u8 addr_type, u8 type, u8 authenticated,
2621                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2622 {
2623         struct smp_ltk *key, *old_key;
2624         u8 role = ltk_role(type);
2625
2626         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2627         if (old_key)
2628                 key = old_key;
2629         else {
2630                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2631                 if (!key)
2632                         return NULL;
2633                 list_add_rcu(&key->list, &hdev->long_term_keys);
2634         }
2635
2636         bacpy(&key->bdaddr, bdaddr);
2637         key->bdaddr_type = addr_type;
2638         memcpy(key->val, tk, sizeof(key->val));
2639         key->authenticated = authenticated;
2640         key->ediv = ediv;
2641         key->rand = rand;
2642         key->enc_size = enc_size;
2643         key->type = type;
2644
2645         return key;
2646 }
2647
2648 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2649                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2650 {
2651         struct smp_irk *irk;
2652
2653         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2654         if (!irk) {
2655                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2656                 if (!irk)
2657                         return NULL;
2658
2659                 bacpy(&irk->bdaddr, bdaddr);
2660                 irk->addr_type = addr_type;
2661
2662                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2663         }
2664
2665         memcpy(irk->val, val, 16);
2666         bacpy(&irk->rpa, rpa);
2667
2668         return irk;
2669 }
2670
2671 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2672 {
2673         struct link_key *key;
2674
2675         key = hci_find_link_key(hdev, bdaddr);
2676         if (!key)
2677                 return -ENOENT;
2678
2679         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2680
2681         list_del_rcu(&key->list);
2682         kfree_rcu(key, rcu);
2683
2684         return 0;
2685 }
2686
2687 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2688 {
2689         struct smp_ltk *k, *tmp;
2690         int removed = 0;
2691
2692         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2693                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2694                         continue;
2695
2696                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2697
2698                 list_del_rcu(&k->list);
2699                 kfree_rcu(k, rcu);
2700                 removed++;
2701         }
2702
2703         return removed ? 0 : -ENOENT;
2704 }
2705
2706 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2707 {
2708         struct smp_irk *k, *tmp;
2709
2710         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2711                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2712                         continue;
2713
2714                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2715
2716                 list_del_rcu(&k->list);
2717                 kfree_rcu(k, rcu);
2718         }
2719 }
2720
2721 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2722 {
2723         struct smp_ltk *k;
2724         struct smp_irk *irk;
2725         u8 addr_type;
2726
2727         if (type == BDADDR_BREDR) {
2728                 if (hci_find_link_key(hdev, bdaddr))
2729                         return true;
2730                 return false;
2731         }
2732
2733         /* Convert to HCI addr type which struct smp_ltk uses */
2734         if (type == BDADDR_LE_PUBLIC)
2735                 addr_type = ADDR_LE_DEV_PUBLIC;
2736         else
2737                 addr_type = ADDR_LE_DEV_RANDOM;
2738
2739         irk = hci_get_irk(hdev, bdaddr, addr_type);
2740         if (irk) {
2741                 bdaddr = &irk->bdaddr;
2742                 addr_type = irk->addr_type;
2743         }
2744
2745         rcu_read_lock();
2746         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2747                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2748                         rcu_read_unlock();
2749                         return true;
2750                 }
2751         }
2752         rcu_read_unlock();
2753
2754         return false;
2755 }
2756
2757 /* HCI command timer function */
2758 static void hci_cmd_timeout(struct work_struct *work)
2759 {
2760         struct hci_dev *hdev = container_of(work, struct hci_dev,
2761                                             cmd_timer.work);
2762
2763         if (hdev->sent_cmd) {
2764                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2765                 u16 opcode = __le16_to_cpu(sent->opcode);
2766
2767                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2768         } else {
2769                 bt_dev_err(hdev, "command tx timeout");
2770         }
2771
2772         if (hdev->cmd_timeout)
2773                 hdev->cmd_timeout(hdev);
2774
2775         atomic_set(&hdev->cmd_cnt, 1);
2776         queue_work(hdev->workqueue, &hdev->cmd_work);
2777 }
2778
2779 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2780                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2781 {
2782         struct oob_data *data;
2783
2784         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2785                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2786                         continue;
2787                 if (data->bdaddr_type != bdaddr_type)
2788                         continue;
2789                 return data;
2790         }
2791
2792         return NULL;
2793 }
2794
2795 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2796                                u8 bdaddr_type)
2797 {
2798         struct oob_data *data;
2799
2800         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2801         if (!data)
2802                 return -ENOENT;
2803
2804         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2805
2806         list_del(&data->list);
2807         kfree(data);
2808
2809         return 0;
2810 }
2811
2812 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2813 {
2814         struct oob_data *data, *n;
2815
2816         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2817                 list_del(&data->list);
2818                 kfree(data);
2819         }
2820 }
2821
2822 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2823                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2824                             u8 *hash256, u8 *rand256)
2825 {
2826         struct oob_data *data;
2827
2828         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2829         if (!data) {
2830                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2831                 if (!data)
2832                         return -ENOMEM;
2833
2834                 bacpy(&data->bdaddr, bdaddr);
2835                 data->bdaddr_type = bdaddr_type;
2836                 list_add(&data->list, &hdev->remote_oob_data);
2837         }
2838
2839         if (hash192 && rand192) {
2840                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2841                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2842                 if (hash256 && rand256)
2843                         data->present = 0x03;
2844         } else {
2845                 memset(data->hash192, 0, sizeof(data->hash192));
2846                 memset(data->rand192, 0, sizeof(data->rand192));
2847                 if (hash256 && rand256)
2848                         data->present = 0x02;
2849                 else
2850                         data->present = 0x00;
2851         }
2852
2853         if (hash256 && rand256) {
2854                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2855                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2856         } else {
2857                 memset(data->hash256, 0, sizeof(data->hash256));
2858                 memset(data->rand256, 0, sizeof(data->rand256));
2859                 if (hash192 && rand192)
2860                         data->present = 0x01;
2861         }
2862
2863         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2864
2865         return 0;
2866 }
2867
2868 /* This function requires the caller holds hdev->lock */
2869 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2870 {
2871         struct adv_info *adv_instance;
2872
2873         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2874                 if (adv_instance->instance == instance)
2875                         return adv_instance;
2876         }
2877
2878         return NULL;
2879 }
2880
2881 /* This function requires the caller holds hdev->lock */
2882 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2883 {
2884         struct adv_info *cur_instance;
2885
2886         cur_instance = hci_find_adv_instance(hdev, instance);
2887         if (!cur_instance)
2888                 return NULL;
2889
2890         if (cur_instance == list_last_entry(&hdev->adv_instances,
2891                                             struct adv_info, list))
2892                 return list_first_entry(&hdev->adv_instances,
2893                                                  struct adv_info, list);
2894         else
2895                 return list_next_entry(cur_instance, list);
2896 }
2897
2898 /* This function requires the caller holds hdev->lock */
2899 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2900 {
2901         struct adv_info *adv_instance;
2902
2903         adv_instance = hci_find_adv_instance(hdev, instance);
2904         if (!adv_instance)
2905                 return -ENOENT;
2906
2907         BT_DBG("%s removing %dMR", hdev->name, instance);
2908
2909         if (hdev->cur_adv_instance == instance) {
2910                 if (hdev->adv_instance_timeout) {
2911                         cancel_delayed_work(&hdev->adv_instance_expire);
2912                         hdev->adv_instance_timeout = 0;
2913                 }
2914                 hdev->cur_adv_instance = 0x00;
2915         }
2916
2917         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2918
2919         list_del(&adv_instance->list);
2920         kfree(adv_instance);
2921
2922         hdev->adv_instance_cnt--;
2923
2924         return 0;
2925 }
2926
2927 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2928 {
2929         struct adv_info *adv_instance, *n;
2930
2931         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2932                 adv_instance->rpa_expired = rpa_expired;
2933 }
2934
2935 /* This function requires the caller holds hdev->lock */
2936 void hci_adv_instances_clear(struct hci_dev *hdev)
2937 {
2938         struct adv_info *adv_instance, *n;
2939
2940         if (hdev->adv_instance_timeout) {
2941                 cancel_delayed_work(&hdev->adv_instance_expire);
2942                 hdev->adv_instance_timeout = 0;
2943         }
2944
2945         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2946                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2947                 list_del(&adv_instance->list);
2948                 kfree(adv_instance);
2949         }
2950
2951         hdev->adv_instance_cnt = 0;
2952         hdev->cur_adv_instance = 0x00;
2953 }
2954
2955 static void adv_instance_rpa_expired(struct work_struct *work)
2956 {
2957         struct adv_info *adv_instance = container_of(work, struct adv_info,
2958                                                      rpa_expired_cb.work);
2959
2960         BT_DBG("");
2961
2962         adv_instance->rpa_expired = true;
2963 }
2964
2965 /* This function requires the caller holds hdev->lock */
2966 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2967                          u16 adv_data_len, u8 *adv_data,
2968                          u16 scan_rsp_len, u8 *scan_rsp_data,
2969                          u16 timeout, u16 duration)
2970 {
2971         struct adv_info *adv_instance;
2972
2973         adv_instance = hci_find_adv_instance(hdev, instance);
2974         if (adv_instance) {
2975                 memset(adv_instance->adv_data, 0,
2976                        sizeof(adv_instance->adv_data));
2977                 memset(adv_instance->scan_rsp_data, 0,
2978                        sizeof(adv_instance->scan_rsp_data));
2979         } else {
2980                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2981                     instance < 1 || instance > hdev->le_num_of_adv_sets)
2982                         return -EOVERFLOW;
2983
2984                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2985                 if (!adv_instance)
2986                         return -ENOMEM;
2987
2988                 adv_instance->pending = true;
2989                 adv_instance->instance = instance;
2990                 list_add(&adv_instance->list, &hdev->adv_instances);
2991                 hdev->adv_instance_cnt++;
2992         }
2993
2994         adv_instance->flags = flags;
2995         adv_instance->adv_data_len = adv_data_len;
2996         adv_instance->scan_rsp_len = scan_rsp_len;
2997
2998         if (adv_data_len)
2999                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3000
3001         if (scan_rsp_len)
3002                 memcpy(adv_instance->scan_rsp_data,
3003                        scan_rsp_data, scan_rsp_len);
3004
3005         adv_instance->timeout = timeout;
3006         adv_instance->remaining_time = timeout;
3007
3008         if (duration == 0)
3009                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3010         else
3011                 adv_instance->duration = duration;
3012
3013         adv_instance->tx_power = HCI_TX_POWER_INVALID;
3014
3015         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3016                           adv_instance_rpa_expired);
3017
3018         BT_DBG("%s for %dMR", hdev->name, instance);
3019
3020         return 0;
3021 }
3022
3023 /* This function requires the caller holds hdev->lock */
3024 void hci_adv_monitors_clear(struct hci_dev *hdev)
3025 {
3026         struct adv_monitor *monitor;
3027         int handle;
3028
3029         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3030                 hci_free_adv_monitor(monitor);
3031
3032         idr_destroy(&hdev->adv_monitors_idr);
3033 }
3034
3035 void hci_free_adv_monitor(struct adv_monitor *monitor)
3036 {
3037         struct adv_pattern *pattern;
3038         struct adv_pattern *tmp;
3039
3040         if (!monitor)
3041                 return;
3042
3043         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list)
3044                 kfree(pattern);
3045
3046         kfree(monitor);
3047 }
3048
3049 /* This function requires the caller holds hdev->lock */
3050 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3051 {
3052         int min, max, handle;
3053
3054         if (!monitor)
3055                 return -EINVAL;
3056
3057         min = HCI_MIN_ADV_MONITOR_HANDLE;
3058         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3059         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3060                            GFP_KERNEL);
3061         if (handle < 0)
3062                 return handle;
3063
3064         hdev->adv_monitors_cnt++;
3065         monitor->handle = handle;
3066
3067         hci_update_background_scan(hdev);
3068
3069         return 0;
3070 }
3071
3072 static int free_adv_monitor(int id, void *ptr, void *data)
3073 {
3074         struct hci_dev *hdev = data;
3075         struct adv_monitor *monitor = ptr;
3076
3077         idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3078         hci_free_adv_monitor(monitor);
3079         hdev->adv_monitors_cnt--;
3080
3081         return 0;
3082 }
3083
3084 /* This function requires the caller holds hdev->lock */
3085 int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
3086 {
3087         struct adv_monitor *monitor;
3088
3089         if (handle) {
3090                 monitor = idr_find(&hdev->adv_monitors_idr, handle);
3091                 if (!monitor)
3092                         return -ENOENT;
3093
3094                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3095                 hci_free_adv_monitor(monitor);
3096                 hdev->adv_monitors_cnt--;
3097         } else {
3098                 /* Remove all monitors if handle is 0. */
3099                 idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
3100         }
3101
3102         hci_update_background_scan(hdev);
3103
3104         return 0;
3105 }
3106
3107 /* This function requires the caller holds hdev->lock */
3108 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3109 {
3110         return !idr_is_empty(&hdev->adv_monitors_idr);
3111 }
3112
3113 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3114                                          bdaddr_t *bdaddr, u8 type)
3115 {
3116         struct bdaddr_list *b;
3117
3118         list_for_each_entry(b, bdaddr_list, list) {
3119                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3120                         return b;
3121         }
3122
3123         return NULL;
3124 }
3125
3126 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3127                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3128                                 u8 type)
3129 {
3130         struct bdaddr_list_with_irk *b;
3131
3132         list_for_each_entry(b, bdaddr_list, list) {
3133                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3134                         return b;
3135         }
3136
3137         return NULL;
3138 }
3139
3140 struct bdaddr_list_with_flags *
3141 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3142                                   bdaddr_t *bdaddr, u8 type)
3143 {
3144         struct bdaddr_list_with_flags *b;
3145
3146         list_for_each_entry(b, bdaddr_list, list) {
3147                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3148                         return b;
3149         }
3150
3151         return NULL;
3152 }
3153
3154 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3155 {
3156         struct bdaddr_list *b, *n;
3157
3158         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3159                 list_del(&b->list);
3160                 kfree(b);
3161         }
3162 }
3163
3164 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3165 {
3166         struct bdaddr_list *entry;
3167
3168         if (!bacmp(bdaddr, BDADDR_ANY))
3169                 return -EBADF;
3170
3171         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3172                 return -EEXIST;
3173
3174         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3175         if (!entry)
3176                 return -ENOMEM;
3177
3178         bacpy(&entry->bdaddr, bdaddr);
3179         entry->bdaddr_type = type;
3180
3181         list_add(&entry->list, list);
3182
3183         return 0;
3184 }
3185
3186 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3187                                         u8 type, u8 *peer_irk, u8 *local_irk)
3188 {
3189         struct bdaddr_list_with_irk *entry;
3190
3191         if (!bacmp(bdaddr, BDADDR_ANY))
3192                 return -EBADF;
3193
3194         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3195                 return -EEXIST;
3196
3197         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3198         if (!entry)
3199                 return -ENOMEM;
3200
3201         bacpy(&entry->bdaddr, bdaddr);
3202         entry->bdaddr_type = type;
3203
3204         if (peer_irk)
3205                 memcpy(entry->peer_irk, peer_irk, 16);
3206
3207         if (local_irk)
3208                 memcpy(entry->local_irk, local_irk, 16);
3209
3210         list_add(&entry->list, list);
3211
3212         return 0;
3213 }
3214
3215 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3216                                    u8 type, u32 flags)
3217 {
3218         struct bdaddr_list_with_flags *entry;
3219
3220         if (!bacmp(bdaddr, BDADDR_ANY))
3221                 return -EBADF;
3222
3223         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3224                 return -EEXIST;
3225
3226         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3227         if (!entry)
3228                 return -ENOMEM;
3229
3230         bacpy(&entry->bdaddr, bdaddr);
3231         entry->bdaddr_type = type;
3232         entry->current_flags = flags;
3233
3234         list_add(&entry->list, list);
3235
3236         return 0;
3237 }
3238
3239 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3240 {
3241         struct bdaddr_list *entry;
3242
3243         if (!bacmp(bdaddr, BDADDR_ANY)) {
3244                 hci_bdaddr_list_clear(list);
3245                 return 0;
3246         }
3247
3248         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3249         if (!entry)
3250                 return -ENOENT;
3251
3252         list_del(&entry->list);
3253         kfree(entry);
3254
3255         return 0;
3256 }
3257
3258 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3259                                                         u8 type)
3260 {
3261         struct bdaddr_list_with_irk *entry;
3262
3263         if (!bacmp(bdaddr, BDADDR_ANY)) {
3264                 hci_bdaddr_list_clear(list);
3265                 return 0;
3266         }
3267
3268         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3269         if (!entry)
3270                 return -ENOENT;
3271
3272         list_del(&entry->list);
3273         kfree(entry);
3274
3275         return 0;
3276 }
3277
3278 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3279                                    u8 type)
3280 {
3281         struct bdaddr_list_with_flags *entry;
3282
3283         if (!bacmp(bdaddr, BDADDR_ANY)) {
3284                 hci_bdaddr_list_clear(list);
3285                 return 0;
3286         }
3287
3288         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3289         if (!entry)
3290                 return -ENOENT;
3291
3292         list_del(&entry->list);
3293         kfree(entry);
3294
3295         return 0;
3296 }
3297
3298 /* This function requires the caller holds hdev->lock */
3299 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3300                                                bdaddr_t *addr, u8 addr_type)
3301 {
3302         struct hci_conn_params *params;
3303
3304         list_for_each_entry(params, &hdev->le_conn_params, list) {
3305                 if (bacmp(&params->addr, addr) == 0 &&
3306                     params->addr_type == addr_type) {
3307                         return params;
3308                 }
3309         }
3310
3311         return NULL;
3312 }
3313
3314 /* This function requires the caller holds hdev->lock */
3315 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3316                                                   bdaddr_t *addr, u8 addr_type)
3317 {
3318         struct hci_conn_params *param;
3319
3320         switch (addr_type) {
3321         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3322                 addr_type = ADDR_LE_DEV_PUBLIC;
3323                 break;
3324         case ADDR_LE_DEV_RANDOM_RESOLVED:
3325                 addr_type = ADDR_LE_DEV_RANDOM;
3326                 break;
3327         }
3328
3329         list_for_each_entry(param, list, action) {
3330                 if (bacmp(&param->addr, addr) == 0 &&
3331                     param->addr_type == addr_type)
3332                         return param;
3333         }
3334
3335         return NULL;
3336 }
3337
3338 /* This function requires the caller holds hdev->lock */
3339 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3340                                             bdaddr_t *addr, u8 addr_type)
3341 {
3342         struct hci_conn_params *params;
3343
3344         params = hci_conn_params_lookup(hdev, addr, addr_type);
3345         if (params)
3346                 return params;
3347
3348         params = kzalloc(sizeof(*params), GFP_KERNEL);
3349         if (!params) {
3350                 bt_dev_err(hdev, "out of memory");
3351                 return NULL;
3352         }
3353
3354         bacpy(&params->addr, addr);
3355         params->addr_type = addr_type;
3356
3357         list_add(&params->list, &hdev->le_conn_params);
3358         INIT_LIST_HEAD(&params->action);
3359
3360         params->conn_min_interval = hdev->le_conn_min_interval;
3361         params->conn_max_interval = hdev->le_conn_max_interval;
3362         params->conn_latency = hdev->le_conn_latency;
3363         params->supervision_timeout = hdev->le_supv_timeout;
3364         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3365
3366         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3367
3368         return params;
3369 }
3370
3371 static void hci_conn_params_free(struct hci_conn_params *params)
3372 {
3373         if (params->conn) {
3374                 hci_conn_drop(params->conn);
3375                 hci_conn_put(params->conn);
3376         }
3377
3378         list_del(&params->action);
3379         list_del(&params->list);
3380         kfree(params);
3381 }
3382
3383 /* This function requires the caller holds hdev->lock */
3384 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3385 {
3386         struct hci_conn_params *params;
3387
3388         params = hci_conn_params_lookup(hdev, addr, addr_type);
3389         if (!params)
3390                 return;
3391
3392         hci_conn_params_free(params);
3393
3394         hci_update_background_scan(hdev);
3395
3396         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3397 }
3398
3399 /* This function requires the caller holds hdev->lock */
3400 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3401 {
3402         struct hci_conn_params *params, *tmp;
3403
3404         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3405                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3406                         continue;
3407
3408                 /* If trying to estabilish one time connection to disabled
3409                  * device, leave the params, but mark them as just once.
3410                  */
3411                 if (params->explicit_connect) {
3412                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3413                         continue;
3414                 }
3415
3416                 list_del(&params->list);
3417                 kfree(params);
3418         }
3419
3420         BT_DBG("All LE disabled connection parameters were removed");
3421 }
3422
3423 /* This function requires the caller holds hdev->lock */
3424 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3425 {
3426         struct hci_conn_params *params, *tmp;
3427
3428         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3429                 hci_conn_params_free(params);
3430
3431         BT_DBG("All LE connection parameters were removed");
3432 }
3433
3434 /* Copy the Identity Address of the controller.
3435  *
3436  * If the controller has a public BD_ADDR, then by default use that one.
3437  * If this is a LE only controller without a public address, default to
3438  * the static random address.
3439  *
3440  * For debugging purposes it is possible to force controllers with a
3441  * public address to use the static random address instead.
3442  *
3443  * In case BR/EDR has been disabled on a dual-mode controller and
3444  * userspace has configured a static address, then that address
3445  * becomes the identity address instead of the public BR/EDR address.
3446  */
3447 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3448                                u8 *bdaddr_type)
3449 {
3450         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3451             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3452             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3453              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3454                 bacpy(bdaddr, &hdev->static_addr);
3455                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3456         } else {
3457                 bacpy(bdaddr, &hdev->bdaddr);
3458                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3459         }
3460 }
3461
3462 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3463 {
3464         int i;
3465
3466         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3467                 clear_bit(i, hdev->suspend_tasks);
3468
3469         wake_up(&hdev->suspend_wait_q);
3470 }
3471
3472 static int hci_suspend_wait_event(struct hci_dev *hdev)
3473 {
3474 #define WAKE_COND                                                              \
3475         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3476          __SUSPEND_NUM_TASKS)
3477
3478         int i;
3479         int ret = wait_event_timeout(hdev->suspend_wait_q,
3480                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3481
3482         if (ret == 0) {
3483                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3484                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3485                         if (test_bit(i, hdev->suspend_tasks))
3486                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3487                         clear_bit(i, hdev->suspend_tasks);
3488                 }
3489
3490                 ret = -ETIMEDOUT;
3491         } else {
3492                 ret = 0;
3493         }
3494
3495         return ret;
3496 }
3497
3498 static void hci_prepare_suspend(struct work_struct *work)
3499 {
3500         struct hci_dev *hdev =
3501                 container_of(work, struct hci_dev, suspend_prepare);
3502
3503         hci_dev_lock(hdev);
3504         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3505         hci_dev_unlock(hdev);
3506 }
3507
3508 static int hci_change_suspend_state(struct hci_dev *hdev,
3509                                     enum suspended_state next)
3510 {
3511         hdev->suspend_state_next = next;
3512         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3513         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3514         return hci_suspend_wait_event(hdev);
3515 }
3516
3517 static void hci_clear_wake_reason(struct hci_dev *hdev)
3518 {
3519         hci_dev_lock(hdev);
3520
3521         hdev->wake_reason = 0;
3522         bacpy(&hdev->wake_addr, BDADDR_ANY);
3523         hdev->wake_addr_type = 0;
3524
3525         hci_dev_unlock(hdev);
3526 }
3527
3528 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3529                                 void *data)
3530 {
3531         struct hci_dev *hdev =
3532                 container_of(nb, struct hci_dev, suspend_notifier);
3533         int ret = 0;
3534         u8 state = BT_RUNNING;
3535
3536         /* If powering down, wait for completion. */
3537         if (mgmt_powering_down(hdev)) {
3538                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3539                 ret = hci_suspend_wait_event(hdev);
3540                 if (ret)
3541                         goto done;
3542         }
3543
3544         /* Suspend notifier should only act on events when powered. */
3545         if (!hdev_is_powered(hdev) ||
3546             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3547                 goto done;
3548
3549         if (action == PM_SUSPEND_PREPARE) {
3550                 /* Suspend consists of two actions:
3551                  *  - First, disconnect everything and make the controller not
3552                  *    connectable (disabling scanning)
3553                  *  - Second, program event filter/accept list and enable scan
3554                  */
3555                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3556                 if (!ret)
3557                         state = BT_SUSPEND_DISCONNECT;
3558
3559                 /* Only configure accept list if disconnect succeeded and wake
3560                  * isn't being prevented.
3561                  */
3562                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3563                         ret = hci_change_suspend_state(hdev,
3564                                                 BT_SUSPEND_CONFIGURE_WAKE);
3565                         if (!ret)
3566                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3567                 }
3568
3569                 hci_clear_wake_reason(hdev);
3570                 mgmt_suspending(hdev, state);
3571
3572         } else if (action == PM_POST_SUSPEND) {
3573                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3574
3575                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3576                               hdev->wake_addr_type);
3577         }
3578
3579 done:
3580         /* We always allow suspend even if suspend preparation failed and
3581          * attempt to recover in resume.
3582          */
3583         if (ret)
3584                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3585                            action, ret);
3586
3587         return NOTIFY_DONE;
3588 }
3589
3590 /* Alloc HCI device */
3591 struct hci_dev *hci_alloc_dev(void)
3592 {
3593         struct hci_dev *hdev;
3594
3595         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3596         if (!hdev)
3597                 return NULL;
3598
3599         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3600         hdev->esco_type = (ESCO_HV1);
3601         hdev->link_mode = (HCI_LM_ACCEPT);
3602         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3603         hdev->io_capability = 0x03;     /* No Input No Output */
3604         hdev->manufacturer = 0xffff;    /* Default to internal use */
3605         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3606         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3607         hdev->adv_instance_cnt = 0;
3608         hdev->cur_adv_instance = 0x00;
3609         hdev->adv_instance_timeout = 0;
3610
3611         hdev->advmon_allowlist_duration = 300;
3612         hdev->advmon_no_filter_duration = 500;
3613
3614         hdev->sniff_max_interval = 800;
3615         hdev->sniff_min_interval = 80;
3616
3617         hdev->le_adv_channel_map = 0x07;
3618         hdev->le_adv_min_interval = 0x0800;
3619         hdev->le_adv_max_interval = 0x0800;
3620         hdev->le_scan_interval = 0x0060;
3621         hdev->le_scan_window = 0x0030;
3622         hdev->le_scan_int_suspend = 0x0400;
3623         hdev->le_scan_window_suspend = 0x0012;
3624         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3625         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3626         hdev->le_scan_int_connect = 0x0060;
3627         hdev->le_scan_window_connect = 0x0060;
3628         hdev->le_conn_min_interval = 0x0018;
3629         hdev->le_conn_max_interval = 0x0028;
3630         hdev->le_conn_latency = 0x0000;
3631         hdev->le_supv_timeout = 0x002a;
3632         hdev->le_def_tx_len = 0x001b;
3633         hdev->le_def_tx_time = 0x0148;
3634         hdev->le_max_tx_len = 0x001b;
3635         hdev->le_max_tx_time = 0x0148;
3636         hdev->le_max_rx_len = 0x001b;
3637         hdev->le_max_rx_time = 0x0148;
3638         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3639         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3640         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3641         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3642         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3643         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3644         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3645
3646         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3647         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3648         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3649         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3650         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3651         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3652
3653         /* default 1.28 sec page scan */
3654         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3655         hdev->def_page_scan_int = 0x0800;
3656         hdev->def_page_scan_window = 0x0012;
3657
3658         mutex_init(&hdev->lock);
3659         mutex_init(&hdev->req_lock);
3660
3661         INIT_LIST_HEAD(&hdev->mgmt_pending);
3662         INIT_LIST_HEAD(&hdev->reject_list);
3663         INIT_LIST_HEAD(&hdev->accept_list);
3664         INIT_LIST_HEAD(&hdev->uuids);
3665         INIT_LIST_HEAD(&hdev->link_keys);
3666         INIT_LIST_HEAD(&hdev->long_term_keys);
3667         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3668         INIT_LIST_HEAD(&hdev->remote_oob_data);
3669         INIT_LIST_HEAD(&hdev->le_accept_list);
3670         INIT_LIST_HEAD(&hdev->le_resolv_list);
3671         INIT_LIST_HEAD(&hdev->le_conn_params);
3672         INIT_LIST_HEAD(&hdev->pend_le_conns);
3673         INIT_LIST_HEAD(&hdev->pend_le_reports);
3674         INIT_LIST_HEAD(&hdev->conn_hash.list);
3675         INIT_LIST_HEAD(&hdev->adv_instances);
3676         INIT_LIST_HEAD(&hdev->blocked_keys);
3677
3678         INIT_WORK(&hdev->rx_work, hci_rx_work);
3679         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3680         INIT_WORK(&hdev->tx_work, hci_tx_work);
3681         INIT_WORK(&hdev->power_on, hci_power_on);
3682         INIT_WORK(&hdev->error_reset, hci_error_reset);
3683         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3684
3685         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3686
3687         skb_queue_head_init(&hdev->rx_q);
3688         skb_queue_head_init(&hdev->cmd_q);
3689         skb_queue_head_init(&hdev->raw_q);
3690
3691         init_waitqueue_head(&hdev->req_wait_q);
3692         init_waitqueue_head(&hdev->suspend_wait_q);
3693
3694         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3695
3696         hci_request_setup(hdev);
3697
3698         hci_init_sysfs(hdev);
3699         discovery_init(hdev);
3700
3701         return hdev;
3702 }
3703 EXPORT_SYMBOL(hci_alloc_dev);
3704
3705 /* Free HCI device */
3706 void hci_free_dev(struct hci_dev *hdev)
3707 {
3708         /* will free via device release */
3709         put_device(&hdev->dev);
3710 }
3711 EXPORT_SYMBOL(hci_free_dev);
3712
3713 /* Register HCI device */
3714 int hci_register_dev(struct hci_dev *hdev)
3715 {
3716         int id, error;
3717
3718         if (!hdev->open || !hdev->close || !hdev->send)
3719                 return -EINVAL;
3720
3721         /* Do not allow HCI_AMP devices to register at index 0,
3722          * so the index can be used as the AMP controller ID.
3723          */
3724         switch (hdev->dev_type) {
3725         case HCI_PRIMARY:
3726                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3727                 break;
3728         case HCI_AMP:
3729                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3730                 break;
3731         default:
3732                 return -EINVAL;
3733         }
3734
3735         if (id < 0)
3736                 return id;
3737
3738         error = dev_set_name(&hdev->dev, "hci%u", id);
3739         if (error)
3740                 return error;
3741
3742         hdev->name = dev_name(&hdev->dev);
3743         hdev->id = id;
3744
3745         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3746
3747         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3748         if (!hdev->workqueue) {
3749                 error = -ENOMEM;
3750                 goto err;
3751         }
3752
3753         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3754                                                       hdev->name);
3755         if (!hdev->req_workqueue) {
3756                 destroy_workqueue(hdev->workqueue);
3757                 error = -ENOMEM;
3758                 goto err;
3759         }
3760
3761         if (!IS_ERR_OR_NULL(bt_debugfs))
3762                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3763
3764         error = device_add(&hdev->dev);
3765         if (error < 0)
3766                 goto err_wqueue;
3767
3768         hci_leds_init(hdev);
3769
3770         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3771                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3772                                     hdev);
3773         if (hdev->rfkill) {
3774                 if (rfkill_register(hdev->rfkill) < 0) {
3775                         rfkill_destroy(hdev->rfkill);
3776                         hdev->rfkill = NULL;
3777                 }
3778         }
3779
3780         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3781                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3782
3783         hci_dev_set_flag(hdev, HCI_SETUP);
3784         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3785
3786         if (hdev->dev_type == HCI_PRIMARY) {
3787                 /* Assume BR/EDR support until proven otherwise (such as
3788                  * through reading supported features during init.
3789                  */
3790                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3791         }
3792
3793         write_lock(&hci_dev_list_lock);
3794         list_add(&hdev->list, &hci_dev_list);
3795         write_unlock(&hci_dev_list_lock);
3796
3797         /* Devices that are marked for raw-only usage are unconfigured
3798          * and should not be included in normal operation.
3799          */
3800         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3801                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3802
3803         hci_sock_dev_event(hdev, HCI_DEV_REG);
3804         hci_dev_hold(hdev);
3805
3806         if (!hdev->suspend_notifier.notifier_call &&
3807             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3808                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3809                 error = register_pm_notifier(&hdev->suspend_notifier);
3810                 if (error)
3811                         goto err_wqueue;
3812         }
3813
3814         queue_work(hdev->req_workqueue, &hdev->power_on);
3815
3816         idr_init(&hdev->adv_monitors_idr);
3817
3818         return id;
3819
3820 err_wqueue:
3821         debugfs_remove_recursive(hdev->debugfs);
3822         destroy_workqueue(hdev->workqueue);
3823         destroy_workqueue(hdev->req_workqueue);
3824 err:
3825         ida_simple_remove(&hci_index_ida, hdev->id);
3826
3827         return error;
3828 }
3829 EXPORT_SYMBOL(hci_register_dev);
3830
3831 /* Unregister HCI device */
3832 void hci_unregister_dev(struct hci_dev *hdev)
3833 {
3834         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3835
3836         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3837
3838         write_lock(&hci_dev_list_lock);
3839         list_del(&hdev->list);
3840         write_unlock(&hci_dev_list_lock);
3841
3842         cancel_work_sync(&hdev->power_on);
3843
3844         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3845                 hci_suspend_clear_tasks(hdev);
3846                 unregister_pm_notifier(&hdev->suspend_notifier);
3847                 cancel_work_sync(&hdev->suspend_prepare);
3848         }
3849
3850         hci_dev_do_close(hdev);
3851
3852         if (!test_bit(HCI_INIT, &hdev->flags) &&
3853             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3854             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3855                 hci_dev_lock(hdev);
3856                 mgmt_index_removed(hdev);
3857                 hci_dev_unlock(hdev);
3858         }
3859
3860         /* mgmt_index_removed should take care of emptying the
3861          * pending list */
3862         BUG_ON(!list_empty(&hdev->mgmt_pending));
3863
3864         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3865
3866         if (hdev->rfkill) {
3867                 rfkill_unregister(hdev->rfkill);
3868                 rfkill_destroy(hdev->rfkill);
3869         }
3870
3871         device_del(&hdev->dev);
3872         /* Actual cleanup is deferred until hci_cleanup_dev(). */
3873         hci_dev_put(hdev);
3874 }
3875 EXPORT_SYMBOL(hci_unregister_dev);
3876
3877 /* Cleanup HCI device */
3878 void hci_cleanup_dev(struct hci_dev *hdev)
3879 {
3880         debugfs_remove_recursive(hdev->debugfs);
3881         kfree_const(hdev->hw_info);
3882         kfree_const(hdev->fw_info);
3883
3884         destroy_workqueue(hdev->workqueue);
3885         destroy_workqueue(hdev->req_workqueue);
3886
3887         hci_dev_lock(hdev);
3888         hci_bdaddr_list_clear(&hdev->reject_list);
3889         hci_bdaddr_list_clear(&hdev->accept_list);
3890         hci_uuids_clear(hdev);
3891         hci_link_keys_clear(hdev);
3892         hci_smp_ltks_clear(hdev);
3893         hci_smp_irks_clear(hdev);
3894         hci_remote_oob_data_clear(hdev);
3895         hci_adv_instances_clear(hdev);
3896         hci_adv_monitors_clear(hdev);
3897         hci_bdaddr_list_clear(&hdev->le_accept_list);
3898         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3899         hci_conn_params_clear_all(hdev);
3900         hci_discovery_filter_clear(hdev);
3901         hci_blocked_keys_clear(hdev);
3902         hci_dev_unlock(hdev);
3903
3904         ida_simple_remove(&hci_index_ida, hdev->id);
3905 }
3906
3907 /* Suspend HCI device */
3908 int hci_suspend_dev(struct hci_dev *hdev)
3909 {
3910         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3911         return 0;
3912 }
3913 EXPORT_SYMBOL(hci_suspend_dev);
3914
3915 /* Resume HCI device */
3916 int hci_resume_dev(struct hci_dev *hdev)
3917 {
3918         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3919         return 0;
3920 }
3921 EXPORT_SYMBOL(hci_resume_dev);
3922
3923 /* Reset HCI device */
3924 int hci_reset_dev(struct hci_dev *hdev)
3925 {
3926         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3927         struct sk_buff *skb;
3928
3929         skb = bt_skb_alloc(3, GFP_ATOMIC);
3930         if (!skb)
3931                 return -ENOMEM;
3932
3933         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3934         skb_put_data(skb, hw_err, 3);
3935
3936         /* Send Hardware Error to upper stack */
3937         return hci_recv_frame(hdev, skb);
3938 }
3939 EXPORT_SYMBOL(hci_reset_dev);
3940
3941 /* Receive frame from HCI drivers */
3942 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3943 {
3944         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3945                       && !test_bit(HCI_INIT, &hdev->flags))) {
3946                 kfree_skb(skb);
3947                 return -ENXIO;
3948         }
3949
3950         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3951             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3952             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3953             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3954                 kfree_skb(skb);
3955                 return -EINVAL;
3956         }
3957
3958         /* Incoming skb */
3959         bt_cb(skb)->incoming = 1;
3960
3961         /* Time stamp */
3962         __net_timestamp(skb);
3963
3964         skb_queue_tail(&hdev->rx_q, skb);
3965         queue_work(hdev->workqueue, &hdev->rx_work);
3966
3967         return 0;
3968 }
3969 EXPORT_SYMBOL(hci_recv_frame);
3970
3971 /* Receive diagnostic message from HCI drivers */
3972 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3973 {
3974         /* Mark as diagnostic packet */
3975         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3976
3977         /* Time stamp */
3978         __net_timestamp(skb);
3979
3980         skb_queue_tail(&hdev->rx_q, skb);
3981         queue_work(hdev->workqueue, &hdev->rx_work);
3982
3983         return 0;
3984 }
3985 EXPORT_SYMBOL(hci_recv_diag);
3986
3987 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3988 {
3989         va_list vargs;
3990
3991         va_start(vargs, fmt);
3992         kfree_const(hdev->hw_info);
3993         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3994         va_end(vargs);
3995 }
3996 EXPORT_SYMBOL(hci_set_hw_info);
3997
3998 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3999 {
4000         va_list vargs;
4001
4002         va_start(vargs, fmt);
4003         kfree_const(hdev->fw_info);
4004         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4005         va_end(vargs);
4006 }
4007 EXPORT_SYMBOL(hci_set_fw_info);
4008
4009 /* ---- Interface to upper protocols ---- */
4010
4011 int hci_register_cb(struct hci_cb *cb)
4012 {
4013         BT_DBG("%p name %s", cb, cb->name);
4014
4015         mutex_lock(&hci_cb_list_lock);
4016         list_add_tail(&cb->list, &hci_cb_list);
4017         mutex_unlock(&hci_cb_list_lock);
4018
4019         return 0;
4020 }
4021 EXPORT_SYMBOL(hci_register_cb);
4022
4023 int hci_unregister_cb(struct hci_cb *cb)
4024 {
4025         BT_DBG("%p name %s", cb, cb->name);
4026
4027         mutex_lock(&hci_cb_list_lock);
4028         list_del(&cb->list);
4029         mutex_unlock(&hci_cb_list_lock);
4030
4031         return 0;
4032 }
4033 EXPORT_SYMBOL(hci_unregister_cb);
4034
4035 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4036 {
4037         int err;
4038
4039         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4040                skb->len);
4041
4042         /* Time stamp */
4043         __net_timestamp(skb);
4044
4045         /* Send copy to monitor */
4046         hci_send_to_monitor(hdev, skb);
4047
4048         if (atomic_read(&hdev->promisc)) {
4049                 /* Send copy to the sockets */
4050                 hci_send_to_sock(hdev, skb);
4051         }
4052
4053         /* Get rid of skb owner, prior to sending to the driver. */
4054         skb_orphan(skb);
4055
4056         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4057                 kfree_skb(skb);
4058                 return;
4059         }
4060
4061         err = hdev->send(hdev, skb);
4062         if (err < 0) {
4063                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4064                 kfree_skb(skb);
4065         }
4066 }
4067
4068 /* Send HCI command */
4069 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4070                  const void *param)
4071 {
4072         struct sk_buff *skb;
4073
4074         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4075
4076         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4077         if (!skb) {
4078                 bt_dev_err(hdev, "no memory for command");
4079                 return -ENOMEM;
4080         }
4081
4082         /* Stand-alone HCI commands must be flagged as
4083          * single-command requests.
4084          */
4085         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4086
4087         skb_queue_tail(&hdev->cmd_q, skb);
4088         queue_work(hdev->workqueue, &hdev->cmd_work);
4089
4090         return 0;
4091 }
4092
4093 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4094                    const void *param)
4095 {
4096         struct sk_buff *skb;
4097
4098         if (hci_opcode_ogf(opcode) != 0x3f) {
4099                 /* A controller receiving a command shall respond with either
4100                  * a Command Status Event or a Command Complete Event.
4101                  * Therefore, all standard HCI commands must be sent via the
4102                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4103                  * Some vendors do not comply with this rule for vendor-specific
4104                  * commands and do not return any event. We want to support
4105                  * unresponded commands for such cases only.
4106                  */
4107                 bt_dev_err(hdev, "unresponded command not supported");
4108                 return -EINVAL;
4109         }
4110
4111         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4112         if (!skb) {
4113                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4114                            opcode);
4115                 return -ENOMEM;
4116         }
4117
4118         hci_send_frame(hdev, skb);
4119
4120         return 0;
4121 }
4122 EXPORT_SYMBOL(__hci_cmd_send);
4123
4124 /* Get data from the previously sent command */
4125 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4126 {
4127         struct hci_command_hdr *hdr;
4128
4129         if (!hdev->sent_cmd)
4130                 return NULL;
4131
4132         hdr = (void *) hdev->sent_cmd->data;
4133
4134         if (hdr->opcode != cpu_to_le16(opcode))
4135                 return NULL;
4136
4137         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4138
4139         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4140 }
4141
4142 /* Send HCI command and wait for command commplete event */
4143 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4144                              const void *param, u32 timeout)
4145 {
4146         struct sk_buff *skb;
4147
4148         if (!test_bit(HCI_UP, &hdev->flags))
4149                 return ERR_PTR(-ENETDOWN);
4150
4151         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4152
4153         hci_req_sync_lock(hdev);
4154         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4155         hci_req_sync_unlock(hdev);
4156
4157         return skb;
4158 }
4159 EXPORT_SYMBOL(hci_cmd_sync);
4160
4161 /* Send ACL data */
4162 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4163 {
4164         struct hci_acl_hdr *hdr;
4165         int len = skb->len;
4166
4167         skb_push(skb, HCI_ACL_HDR_SIZE);
4168         skb_reset_transport_header(skb);
4169         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4170         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4171         hdr->dlen   = cpu_to_le16(len);
4172 }
4173
4174 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4175                           struct sk_buff *skb, __u16 flags)
4176 {
4177         struct hci_conn *conn = chan->conn;
4178         struct hci_dev *hdev = conn->hdev;
4179         struct sk_buff *list;
4180
4181         skb->len = skb_headlen(skb);
4182         skb->data_len = 0;
4183
4184         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4185
4186         switch (hdev->dev_type) {
4187         case HCI_PRIMARY:
4188                 hci_add_acl_hdr(skb, conn->handle, flags);
4189                 break;
4190         case HCI_AMP:
4191                 hci_add_acl_hdr(skb, chan->handle, flags);
4192                 break;
4193         default:
4194                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4195                 return;
4196         }
4197
4198         list = skb_shinfo(skb)->frag_list;
4199         if (!list) {
4200                 /* Non fragmented */
4201                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4202
4203                 skb_queue_tail(queue, skb);
4204         } else {
4205                 /* Fragmented */
4206                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4207
4208                 skb_shinfo(skb)->frag_list = NULL;
4209
4210                 /* Queue all fragments atomically. We need to use spin_lock_bh
4211                  * here because of 6LoWPAN links, as there this function is
4212                  * called from softirq and using normal spin lock could cause
4213                  * deadlocks.
4214                  */
4215                 spin_lock_bh(&queue->lock);
4216
4217                 __skb_queue_tail(queue, skb);
4218
4219                 flags &= ~ACL_START;
4220                 flags |= ACL_CONT;
4221                 do {
4222                         skb = list; list = list->next;
4223
4224                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4225                         hci_add_acl_hdr(skb, conn->handle, flags);
4226
4227                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4228
4229                         __skb_queue_tail(queue, skb);
4230                 } while (list);
4231
4232                 spin_unlock_bh(&queue->lock);
4233         }
4234 }
4235
4236 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4237 {
4238         struct hci_dev *hdev = chan->conn->hdev;
4239
4240         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4241
4242         hci_queue_acl(chan, &chan->data_q, skb, flags);
4243
4244         queue_work(hdev->workqueue, &hdev->tx_work);
4245 }
4246
4247 /* Send SCO data */
4248 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4249 {
4250         struct hci_dev *hdev = conn->hdev;
4251         struct hci_sco_hdr hdr;
4252
4253         BT_DBG("%s len %d", hdev->name, skb->len);
4254
4255         hdr.handle = cpu_to_le16(conn->handle);
4256         hdr.dlen   = skb->len;
4257
4258         skb_push(skb, HCI_SCO_HDR_SIZE);
4259         skb_reset_transport_header(skb);
4260         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4261
4262         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4263
4264         skb_queue_tail(&conn->data_q, skb);
4265         queue_work(hdev->workqueue, &hdev->tx_work);
4266 }
4267
4268 /* ---- HCI TX task (outgoing data) ---- */
4269
4270 /* HCI Connection scheduler */
4271 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4272                                      int *quote)
4273 {
4274         struct hci_conn_hash *h = &hdev->conn_hash;
4275         struct hci_conn *conn = NULL, *c;
4276         unsigned int num = 0, min = ~0;
4277
4278         /* We don't have to lock device here. Connections are always
4279          * added and removed with TX task disabled. */
4280
4281         rcu_read_lock();
4282
4283         list_for_each_entry_rcu(c, &h->list, list) {
4284                 if (c->type != type || skb_queue_empty(&c->data_q))
4285                         continue;
4286
4287                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4288                         continue;
4289
4290                 num++;
4291
4292                 if (c->sent < min) {
4293                         min  = c->sent;
4294                         conn = c;
4295                 }
4296
4297                 if (hci_conn_num(hdev, type) == num)
4298                         break;
4299         }
4300
4301         rcu_read_unlock();
4302
4303         if (conn) {
4304                 int cnt, q;
4305
4306                 switch (conn->type) {
4307                 case ACL_LINK:
4308                         cnt = hdev->acl_cnt;
4309                         break;
4310                 case SCO_LINK:
4311                 case ESCO_LINK:
4312                         cnt = hdev->sco_cnt;
4313                         break;
4314                 case LE_LINK:
4315                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4316                         break;
4317                 default:
4318                         cnt = 0;
4319                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4320                 }
4321
4322                 q = cnt / num;
4323                 *quote = q ? q : 1;
4324         } else
4325                 *quote = 0;
4326
4327         BT_DBG("conn %p quote %d", conn, *quote);
4328         return conn;
4329 }
4330
4331 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4332 {
4333         struct hci_conn_hash *h = &hdev->conn_hash;
4334         struct hci_conn *c;
4335
4336         bt_dev_err(hdev, "link tx timeout");
4337
4338         rcu_read_lock();
4339
4340         /* Kill stalled connections */
4341         list_for_each_entry_rcu(c, &h->list, list) {
4342                 if (c->type == type && c->sent) {
4343                         bt_dev_err(hdev, "killing stalled connection %pMR",
4344                                    &c->dst);
4345                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4346                 }
4347         }
4348
4349         rcu_read_unlock();
4350 }
4351
4352 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4353                                       int *quote)
4354 {
4355         struct hci_conn_hash *h = &hdev->conn_hash;
4356         struct hci_chan *chan = NULL;
4357         unsigned int num = 0, min = ~0, cur_prio = 0;
4358         struct hci_conn *conn;
4359         int cnt, q, conn_num = 0;
4360
4361         BT_DBG("%s", hdev->name);
4362
4363         rcu_read_lock();
4364
4365         list_for_each_entry_rcu(conn, &h->list, list) {
4366                 struct hci_chan *tmp;
4367
4368                 if (conn->type != type)
4369                         continue;
4370
4371                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4372                         continue;
4373
4374                 conn_num++;
4375
4376                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4377                         struct sk_buff *skb;
4378
4379                         if (skb_queue_empty(&tmp->data_q))
4380                                 continue;
4381
4382                         skb = skb_peek(&tmp->data_q);
4383                         if (skb->priority < cur_prio)
4384                                 continue;
4385
4386                         if (skb->priority > cur_prio) {
4387                                 num = 0;
4388                                 min = ~0;
4389                                 cur_prio = skb->priority;
4390                         }
4391
4392                         num++;
4393
4394                         if (conn->sent < min) {
4395                                 min  = conn->sent;
4396                                 chan = tmp;
4397                         }
4398                 }
4399
4400                 if (hci_conn_num(hdev, type) == conn_num)
4401                         break;
4402         }
4403
4404         rcu_read_unlock();
4405
4406         if (!chan)
4407                 return NULL;
4408
4409         switch (chan->conn->type) {
4410         case ACL_LINK:
4411                 cnt = hdev->acl_cnt;
4412                 break;
4413         case AMP_LINK:
4414                 cnt = hdev->block_cnt;
4415                 break;
4416         case SCO_LINK:
4417         case ESCO_LINK:
4418                 cnt = hdev->sco_cnt;
4419                 break;
4420         case LE_LINK:
4421                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4422                 break;
4423         default:
4424                 cnt = 0;
4425                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4426         }
4427
4428         q = cnt / num;
4429         *quote = q ? q : 1;
4430         BT_DBG("chan %p quote %d", chan, *quote);
4431         return chan;
4432 }
4433
4434 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4435 {
4436         struct hci_conn_hash *h = &hdev->conn_hash;
4437         struct hci_conn *conn;
4438         int num = 0;
4439
4440         BT_DBG("%s", hdev->name);
4441
4442         rcu_read_lock();
4443
4444         list_for_each_entry_rcu(conn, &h->list, list) {
4445                 struct hci_chan *chan;
4446
4447                 if (conn->type != type)
4448                         continue;
4449
4450                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4451                         continue;
4452
4453                 num++;
4454
4455                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4456                         struct sk_buff *skb;
4457
4458                         if (chan->sent) {
4459                                 chan->sent = 0;
4460                                 continue;
4461                         }
4462
4463                         if (skb_queue_empty(&chan->data_q))
4464                                 continue;
4465
4466                         skb = skb_peek(&chan->data_q);
4467                         if (skb->priority >= HCI_PRIO_MAX - 1)
4468                                 continue;
4469
4470                         skb->priority = HCI_PRIO_MAX - 1;
4471
4472                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4473                                skb->priority);
4474                 }
4475
4476                 if (hci_conn_num(hdev, type) == num)
4477                         break;
4478         }
4479
4480         rcu_read_unlock();
4481
4482 }
4483
4484 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4485 {
4486         /* Calculate count of blocks used by this packet */
4487         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4488 }
4489
4490 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4491 {
4492         unsigned long last_tx;
4493
4494         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4495                 return;
4496
4497         switch (type) {
4498         case LE_LINK:
4499                 last_tx = hdev->le_last_tx;
4500                 break;
4501         default:
4502                 last_tx = hdev->acl_last_tx;
4503                 break;
4504         }
4505
4506         /* tx timeout must be longer than maximum link supervision timeout
4507          * (40.9 seconds)
4508          */
4509         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4510                 hci_link_tx_to(hdev, type);
4511 }
4512
4513 /* Schedule SCO */
4514 static void hci_sched_sco(struct hci_dev *hdev)
4515 {
4516         struct hci_conn *conn;
4517         struct sk_buff *skb;
4518         int quote;
4519
4520         BT_DBG("%s", hdev->name);
4521
4522         if (!hci_conn_num(hdev, SCO_LINK))
4523                 return;
4524
4525         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4526                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4527                         BT_DBG("skb %p len %d", skb, skb->len);
4528                         hci_send_frame(hdev, skb);
4529
4530                         conn->sent++;
4531                         if (conn->sent == ~0)
4532                                 conn->sent = 0;
4533                 }
4534         }
4535 }
4536
4537 static void hci_sched_esco(struct hci_dev *hdev)
4538 {
4539         struct hci_conn *conn;
4540         struct sk_buff *skb;
4541         int quote;
4542
4543         BT_DBG("%s", hdev->name);
4544
4545         if (!hci_conn_num(hdev, ESCO_LINK))
4546                 return;
4547
4548         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4549                                                      &quote))) {
4550                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4551                         BT_DBG("skb %p len %d", skb, skb->len);
4552                         hci_send_frame(hdev, skb);
4553
4554                         conn->sent++;
4555                         if (conn->sent == ~0)
4556                                 conn->sent = 0;
4557                 }
4558         }
4559 }
4560
4561 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4562 {
4563         unsigned int cnt = hdev->acl_cnt;
4564         struct hci_chan *chan;
4565         struct sk_buff *skb;
4566         int quote;
4567
4568         __check_timeout(hdev, cnt, ACL_LINK);
4569
4570         while (hdev->acl_cnt &&
4571                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4572                 u32 priority = (skb_peek(&chan->data_q))->priority;
4573                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4574                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4575                                skb->len, skb->priority);
4576
4577                         /* Stop if priority has changed */
4578                         if (skb->priority < priority)
4579                                 break;
4580
4581                         skb = skb_dequeue(&chan->data_q);
4582
4583                         hci_conn_enter_active_mode(chan->conn,
4584                                                    bt_cb(skb)->force_active);
4585
4586                         hci_send_frame(hdev, skb);
4587                         hdev->acl_last_tx = jiffies;
4588
4589                         hdev->acl_cnt--;
4590                         chan->sent++;
4591                         chan->conn->sent++;
4592
4593                         /* Send pending SCO packets right away */
4594                         hci_sched_sco(hdev);
4595                         hci_sched_esco(hdev);
4596                 }
4597         }
4598
4599         if (cnt != hdev->acl_cnt)
4600                 hci_prio_recalculate(hdev, ACL_LINK);
4601 }
4602
4603 static void hci_sched_acl_blk(struct hci_dev *hdev)
4604 {
4605         unsigned int cnt = hdev->block_cnt;
4606         struct hci_chan *chan;
4607         struct sk_buff *skb;
4608         int quote;
4609         u8 type;
4610
4611         BT_DBG("%s", hdev->name);
4612
4613         if (hdev->dev_type == HCI_AMP)
4614                 type = AMP_LINK;
4615         else
4616                 type = ACL_LINK;
4617
4618         __check_timeout(hdev, cnt, type);
4619
4620         while (hdev->block_cnt > 0 &&
4621                (chan = hci_chan_sent(hdev, type, &quote))) {
4622                 u32 priority = (skb_peek(&chan->data_q))->priority;
4623                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4624                         int blocks;
4625
4626                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4627                                skb->len, skb->priority);
4628
4629                         /* Stop if priority has changed */
4630                         if (skb->priority < priority)
4631                                 break;
4632
4633                         skb = skb_dequeue(&chan->data_q);
4634
4635                         blocks = __get_blocks(hdev, skb);
4636                         if (blocks > hdev->block_cnt)
4637                                 return;
4638
4639                         hci_conn_enter_active_mode(chan->conn,
4640                                                    bt_cb(skb)->force_active);
4641
4642                         hci_send_frame(hdev, skb);
4643                         hdev->acl_last_tx = jiffies;
4644
4645                         hdev->block_cnt -= blocks;
4646                         quote -= blocks;
4647
4648                         chan->sent += blocks;
4649                         chan->conn->sent += blocks;
4650                 }
4651         }
4652
4653         if (cnt != hdev->block_cnt)
4654                 hci_prio_recalculate(hdev, type);
4655 }
4656
4657 static void hci_sched_acl(struct hci_dev *hdev)
4658 {
4659         BT_DBG("%s", hdev->name);
4660
4661         /* No ACL link over BR/EDR controller */
4662         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4663                 return;
4664
4665         /* No AMP link over AMP controller */
4666         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4667                 return;
4668
4669         switch (hdev->flow_ctl_mode) {
4670         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4671                 hci_sched_acl_pkt(hdev);
4672                 break;
4673
4674         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4675                 hci_sched_acl_blk(hdev);
4676                 break;
4677         }
4678 }
4679
4680 static void hci_sched_le(struct hci_dev *hdev)
4681 {
4682         struct hci_chan *chan;
4683         struct sk_buff *skb;
4684         int quote, cnt, tmp;
4685
4686         BT_DBG("%s", hdev->name);
4687
4688         if (!hci_conn_num(hdev, LE_LINK))
4689                 return;
4690
4691         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4692
4693         __check_timeout(hdev, cnt, LE_LINK);
4694
4695         tmp = cnt;
4696         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4697                 u32 priority = (skb_peek(&chan->data_q))->priority;
4698                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4699                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4700                                skb->len, skb->priority);
4701
4702                         /* Stop if priority has changed */
4703                         if (skb->priority < priority)
4704                                 break;
4705
4706                         skb = skb_dequeue(&chan->data_q);
4707
4708                         hci_send_frame(hdev, skb);
4709                         hdev->le_last_tx = jiffies;
4710
4711                         cnt--;
4712                         chan->sent++;
4713                         chan->conn->sent++;
4714
4715                         /* Send pending SCO packets right away */
4716                         hci_sched_sco(hdev);
4717                         hci_sched_esco(hdev);
4718                 }
4719         }
4720
4721         if (hdev->le_pkts)
4722                 hdev->le_cnt = cnt;
4723         else
4724                 hdev->acl_cnt = cnt;
4725
4726         if (cnt != tmp)
4727                 hci_prio_recalculate(hdev, LE_LINK);
4728 }
4729
4730 static void hci_tx_work(struct work_struct *work)
4731 {
4732         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4733         struct sk_buff *skb;
4734
4735         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4736                hdev->sco_cnt, hdev->le_cnt);
4737
4738         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4739                 /* Schedule queues and send stuff to HCI driver */
4740                 hci_sched_sco(hdev);
4741                 hci_sched_esco(hdev);
4742                 hci_sched_acl(hdev);
4743                 hci_sched_le(hdev);
4744         }
4745
4746         /* Send next queued raw (unknown type) packet */
4747         while ((skb = skb_dequeue(&hdev->raw_q)))
4748                 hci_send_frame(hdev, skb);
4749 }
4750
4751 /* ----- HCI RX task (incoming data processing) ----- */
4752
4753 /* ACL data packet */
4754 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4755 {
4756         struct hci_acl_hdr *hdr = (void *) skb->data;
4757         struct hci_conn *conn;
4758         __u16 handle, flags;
4759
4760         skb_pull(skb, HCI_ACL_HDR_SIZE);
4761
4762         handle = __le16_to_cpu(hdr->handle);
4763         flags  = hci_flags(handle);
4764         handle = hci_handle(handle);
4765
4766         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4767                handle, flags);
4768
4769         hdev->stat.acl_rx++;
4770
4771         hci_dev_lock(hdev);
4772         conn = hci_conn_hash_lookup_handle(hdev, handle);
4773         hci_dev_unlock(hdev);
4774
4775         if (conn) {
4776                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4777
4778                 /* Send to upper protocol */
4779                 l2cap_recv_acldata(conn, skb, flags);
4780                 return;
4781         } else {
4782                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4783                            handle);
4784         }
4785
4786         kfree_skb(skb);
4787 }
4788
4789 /* SCO data packet */
4790 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4791 {
4792         struct hci_sco_hdr *hdr = (void *) skb->data;
4793         struct hci_conn *conn;
4794         __u16 handle, flags;
4795
4796         skb_pull(skb, HCI_SCO_HDR_SIZE);
4797
4798         handle = __le16_to_cpu(hdr->handle);
4799         flags  = hci_flags(handle);
4800         handle = hci_handle(handle);
4801
4802         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4803                handle, flags);
4804
4805         hdev->stat.sco_rx++;
4806
4807         hci_dev_lock(hdev);
4808         conn = hci_conn_hash_lookup_handle(hdev, handle);
4809         hci_dev_unlock(hdev);
4810
4811         if (conn) {
4812                 /* Send to upper protocol */
4813                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4814                 sco_recv_scodata(conn, skb);
4815                 return;
4816         } else {
4817                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4818                            handle);
4819         }
4820
4821         kfree_skb(skb);
4822 }
4823
4824 static bool hci_req_is_complete(struct hci_dev *hdev)
4825 {
4826         struct sk_buff *skb;
4827
4828         skb = skb_peek(&hdev->cmd_q);
4829         if (!skb)
4830                 return true;
4831
4832         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4833 }
4834
4835 static void hci_resend_last(struct hci_dev *hdev)
4836 {
4837         struct hci_command_hdr *sent;
4838         struct sk_buff *skb;
4839         u16 opcode;
4840
4841         if (!hdev->sent_cmd)
4842                 return;
4843
4844         sent = (void *) hdev->sent_cmd->data;
4845         opcode = __le16_to_cpu(sent->opcode);
4846         if (opcode == HCI_OP_RESET)
4847                 return;
4848
4849         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4850         if (!skb)
4851                 return;
4852
4853         skb_queue_head(&hdev->cmd_q, skb);
4854         queue_work(hdev->workqueue, &hdev->cmd_work);
4855 }
4856
4857 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4858                           hci_req_complete_t *req_complete,
4859                           hci_req_complete_skb_t *req_complete_skb)
4860 {
4861         struct sk_buff *skb;
4862         unsigned long flags;
4863
4864         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4865
4866         /* If the completed command doesn't match the last one that was
4867          * sent we need to do special handling of it.
4868          */
4869         if (!hci_sent_cmd_data(hdev, opcode)) {
4870                 /* Some CSR based controllers generate a spontaneous
4871                  * reset complete event during init and any pending
4872                  * command will never be completed. In such a case we
4873                  * need to resend whatever was the last sent
4874                  * command.
4875                  */
4876                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4877                         hci_resend_last(hdev);
4878
4879                 return;
4880         }
4881
4882         /* If we reach this point this event matches the last command sent */
4883         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4884
4885         /* If the command succeeded and there's still more commands in
4886          * this request the request is not yet complete.
4887          */
4888         if (!status && !hci_req_is_complete(hdev))
4889                 return;
4890
4891         /* If this was the last command in a request the complete
4892          * callback would be found in hdev->sent_cmd instead of the
4893          * command queue (hdev->cmd_q).
4894          */
4895         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4896                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4897                 return;
4898         }
4899
4900         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4901                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4902                 return;
4903         }
4904
4905         /* Remove all pending commands belonging to this request */
4906         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4907         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4908                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4909                         __skb_queue_head(&hdev->cmd_q, skb);
4910                         break;
4911                 }
4912
4913                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4914                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4915                 else
4916                         *req_complete = bt_cb(skb)->hci.req_complete;
4917                 dev_kfree_skb_irq(skb);
4918         }
4919         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4920 }
4921
4922 static void hci_rx_work(struct work_struct *work)
4923 {
4924         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4925         struct sk_buff *skb;
4926
4927         BT_DBG("%s", hdev->name);
4928
4929         while ((skb = skb_dequeue(&hdev->rx_q))) {
4930                 /* Send copy to monitor */
4931                 hci_send_to_monitor(hdev, skb);
4932
4933                 if (atomic_read(&hdev->promisc)) {
4934                         /* Send copy to the sockets */
4935                         hci_send_to_sock(hdev, skb);
4936                 }
4937
4938                 /* If the device has been opened in HCI_USER_CHANNEL,
4939                  * the userspace has exclusive access to device.
4940                  * When device is HCI_INIT, we still need to process
4941                  * the data packets to the driver in order
4942                  * to complete its setup().
4943                  */
4944                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4945                     !test_bit(HCI_INIT, &hdev->flags)) {
4946                         kfree_skb(skb);
4947                         continue;
4948                 }
4949
4950                 if (test_bit(HCI_INIT, &hdev->flags)) {
4951                         /* Don't process data packets in this states. */
4952                         switch (hci_skb_pkt_type(skb)) {
4953                         case HCI_ACLDATA_PKT:
4954                         case HCI_SCODATA_PKT:
4955                         case HCI_ISODATA_PKT:
4956                                 kfree_skb(skb);
4957                                 continue;
4958                         }
4959                 }
4960
4961                 /* Process frame */
4962                 switch (hci_skb_pkt_type(skb)) {
4963                 case HCI_EVENT_PKT:
4964                         BT_DBG("%s Event packet", hdev->name);
4965                         hci_event_packet(hdev, skb);
4966                         break;
4967
4968                 case HCI_ACLDATA_PKT:
4969                         BT_DBG("%s ACL data packet", hdev->name);
4970                         hci_acldata_packet(hdev, skb);
4971                         break;
4972
4973                 case HCI_SCODATA_PKT:
4974                         BT_DBG("%s SCO data packet", hdev->name);
4975                         hci_scodata_packet(hdev, skb);
4976                         break;
4977
4978                 default:
4979                         kfree_skb(skb);
4980                         break;
4981                 }
4982         }
4983 }
4984
4985 static void hci_cmd_work(struct work_struct *work)
4986 {
4987         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4988         struct sk_buff *skb;
4989
4990         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4991                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4992
4993         /* Send queued commands */
4994         if (atomic_read(&hdev->cmd_cnt)) {
4995                 skb = skb_dequeue(&hdev->cmd_q);
4996                 if (!skb)
4997                         return;
4998
4999                 kfree_skb(hdev->sent_cmd);
5000
5001                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5002                 if (hdev->sent_cmd) {
5003                         if (hci_req_status_pend(hdev))
5004                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5005                         atomic_dec(&hdev->cmd_cnt);
5006                         hci_send_frame(hdev, skb);
5007                         if (test_bit(HCI_RESET, &hdev->flags))
5008                                 cancel_delayed_work(&hdev->cmd_timer);
5009                         else
5010                                 schedule_delayed_work(&hdev->cmd_timer,
5011                                                       HCI_CMD_TIMEOUT);
5012                 } else {
5013                         skb_queue_head(&hdev->cmd_q, skb);
5014                         queue_work(hdev->workqueue, &hdev->cmd_work);
5015                 }
5016         }
5017 }