GNU Linux-libre 6.1.90-gnu
[releases.git] / net / bluetooth / mgmt.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION    1
45 #define MGMT_REVISION   22
46
47 static const u16 mgmt_commands[] = {
48         MGMT_OP_READ_INDEX_LIST,
49         MGMT_OP_READ_INFO,
50         MGMT_OP_SET_POWERED,
51         MGMT_OP_SET_DISCOVERABLE,
52         MGMT_OP_SET_CONNECTABLE,
53         MGMT_OP_SET_FAST_CONNECTABLE,
54         MGMT_OP_SET_BONDABLE,
55         MGMT_OP_SET_LINK_SECURITY,
56         MGMT_OP_SET_SSP,
57         MGMT_OP_SET_HS,
58         MGMT_OP_SET_LE,
59         MGMT_OP_SET_DEV_CLASS,
60         MGMT_OP_SET_LOCAL_NAME,
61         MGMT_OP_ADD_UUID,
62         MGMT_OP_REMOVE_UUID,
63         MGMT_OP_LOAD_LINK_KEYS,
64         MGMT_OP_LOAD_LONG_TERM_KEYS,
65         MGMT_OP_DISCONNECT,
66         MGMT_OP_GET_CONNECTIONS,
67         MGMT_OP_PIN_CODE_REPLY,
68         MGMT_OP_PIN_CODE_NEG_REPLY,
69         MGMT_OP_SET_IO_CAPABILITY,
70         MGMT_OP_PAIR_DEVICE,
71         MGMT_OP_CANCEL_PAIR_DEVICE,
72         MGMT_OP_UNPAIR_DEVICE,
73         MGMT_OP_USER_CONFIRM_REPLY,
74         MGMT_OP_USER_CONFIRM_NEG_REPLY,
75         MGMT_OP_USER_PASSKEY_REPLY,
76         MGMT_OP_USER_PASSKEY_NEG_REPLY,
77         MGMT_OP_READ_LOCAL_OOB_DATA,
78         MGMT_OP_ADD_REMOTE_OOB_DATA,
79         MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80         MGMT_OP_START_DISCOVERY,
81         MGMT_OP_STOP_DISCOVERY,
82         MGMT_OP_CONFIRM_NAME,
83         MGMT_OP_BLOCK_DEVICE,
84         MGMT_OP_UNBLOCK_DEVICE,
85         MGMT_OP_SET_DEVICE_ID,
86         MGMT_OP_SET_ADVERTISING,
87         MGMT_OP_SET_BREDR,
88         MGMT_OP_SET_STATIC_ADDRESS,
89         MGMT_OP_SET_SCAN_PARAMS,
90         MGMT_OP_SET_SECURE_CONN,
91         MGMT_OP_SET_DEBUG_KEYS,
92         MGMT_OP_SET_PRIVACY,
93         MGMT_OP_LOAD_IRKS,
94         MGMT_OP_GET_CONN_INFO,
95         MGMT_OP_GET_CLOCK_INFO,
96         MGMT_OP_ADD_DEVICE,
97         MGMT_OP_REMOVE_DEVICE,
98         MGMT_OP_LOAD_CONN_PARAM,
99         MGMT_OP_READ_UNCONF_INDEX_LIST,
100         MGMT_OP_READ_CONFIG_INFO,
101         MGMT_OP_SET_EXTERNAL_CONFIG,
102         MGMT_OP_SET_PUBLIC_ADDRESS,
103         MGMT_OP_START_SERVICE_DISCOVERY,
104         MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105         MGMT_OP_READ_EXT_INDEX_LIST,
106         MGMT_OP_READ_ADV_FEATURES,
107         MGMT_OP_ADD_ADVERTISING,
108         MGMT_OP_REMOVE_ADVERTISING,
109         MGMT_OP_GET_ADV_SIZE_INFO,
110         MGMT_OP_START_LIMITED_DISCOVERY,
111         MGMT_OP_READ_EXT_INFO,
112         MGMT_OP_SET_APPEARANCE,
113         MGMT_OP_GET_PHY_CONFIGURATION,
114         MGMT_OP_SET_PHY_CONFIGURATION,
115         MGMT_OP_SET_BLOCKED_KEYS,
116         MGMT_OP_SET_WIDEBAND_SPEECH,
117         MGMT_OP_READ_CONTROLLER_CAP,
118         MGMT_OP_READ_EXP_FEATURES_INFO,
119         MGMT_OP_SET_EXP_FEATURE,
120         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121         MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123         MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124         MGMT_OP_GET_DEVICE_FLAGS,
125         MGMT_OP_SET_DEVICE_FLAGS,
126         MGMT_OP_READ_ADV_MONITOR_FEATURES,
127         MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128         MGMT_OP_REMOVE_ADV_MONITOR,
129         MGMT_OP_ADD_EXT_ADV_PARAMS,
130         MGMT_OP_ADD_EXT_ADV_DATA,
131         MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132         MGMT_OP_SET_MESH_RECEIVER,
133         MGMT_OP_MESH_READ_FEATURES,
134         MGMT_OP_MESH_SEND,
135         MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139         MGMT_EV_CONTROLLER_ERROR,
140         MGMT_EV_INDEX_ADDED,
141         MGMT_EV_INDEX_REMOVED,
142         MGMT_EV_NEW_SETTINGS,
143         MGMT_EV_CLASS_OF_DEV_CHANGED,
144         MGMT_EV_LOCAL_NAME_CHANGED,
145         MGMT_EV_NEW_LINK_KEY,
146         MGMT_EV_NEW_LONG_TERM_KEY,
147         MGMT_EV_DEVICE_CONNECTED,
148         MGMT_EV_DEVICE_DISCONNECTED,
149         MGMT_EV_CONNECT_FAILED,
150         MGMT_EV_PIN_CODE_REQUEST,
151         MGMT_EV_USER_CONFIRM_REQUEST,
152         MGMT_EV_USER_PASSKEY_REQUEST,
153         MGMT_EV_AUTH_FAILED,
154         MGMT_EV_DEVICE_FOUND,
155         MGMT_EV_DISCOVERING,
156         MGMT_EV_DEVICE_BLOCKED,
157         MGMT_EV_DEVICE_UNBLOCKED,
158         MGMT_EV_DEVICE_UNPAIRED,
159         MGMT_EV_PASSKEY_NOTIFY,
160         MGMT_EV_NEW_IRK,
161         MGMT_EV_NEW_CSRK,
162         MGMT_EV_DEVICE_ADDED,
163         MGMT_EV_DEVICE_REMOVED,
164         MGMT_EV_NEW_CONN_PARAM,
165         MGMT_EV_UNCONF_INDEX_ADDED,
166         MGMT_EV_UNCONF_INDEX_REMOVED,
167         MGMT_EV_NEW_CONFIG_OPTIONS,
168         MGMT_EV_EXT_INDEX_ADDED,
169         MGMT_EV_EXT_INDEX_REMOVED,
170         MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171         MGMT_EV_ADVERTISING_ADDED,
172         MGMT_EV_ADVERTISING_REMOVED,
173         MGMT_EV_EXT_INFO_CHANGED,
174         MGMT_EV_PHY_CONFIGURATION_CHANGED,
175         MGMT_EV_EXP_FEATURE_CHANGED,
176         MGMT_EV_DEVICE_FLAGS_CHANGED,
177         MGMT_EV_ADV_MONITOR_ADDED,
178         MGMT_EV_ADV_MONITOR_REMOVED,
179         MGMT_EV_CONTROLLER_SUSPEND,
180         MGMT_EV_CONTROLLER_RESUME,
181         MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182         MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186         MGMT_OP_READ_INDEX_LIST,
187         MGMT_OP_READ_INFO,
188         MGMT_OP_READ_UNCONF_INDEX_LIST,
189         MGMT_OP_READ_CONFIG_INFO,
190         MGMT_OP_READ_EXT_INDEX_LIST,
191         MGMT_OP_READ_EXT_INFO,
192         MGMT_OP_READ_CONTROLLER_CAP,
193         MGMT_OP_READ_EXP_FEATURES_INFO,
194         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199         MGMT_EV_INDEX_ADDED,
200         MGMT_EV_INDEX_REMOVED,
201         MGMT_EV_NEW_SETTINGS,
202         MGMT_EV_CLASS_OF_DEV_CHANGED,
203         MGMT_EV_LOCAL_NAME_CHANGED,
204         MGMT_EV_UNCONF_INDEX_ADDED,
205         MGMT_EV_UNCONF_INDEX_REMOVED,
206         MGMT_EV_NEW_CONFIG_OPTIONS,
207         MGMT_EV_EXT_INDEX_ADDED,
208         MGMT_EV_EXT_INDEX_REMOVED,
209         MGMT_EV_EXT_INFO_CHANGED,
210         MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT   msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216                  "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220         MGMT_STATUS_SUCCESS,
221         MGMT_STATUS_UNKNOWN_COMMAND,    /* Unknown Command */
222         MGMT_STATUS_NOT_CONNECTED,      /* No Connection */
223         MGMT_STATUS_FAILED,             /* Hardware Failure */
224         MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
225         MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
226         MGMT_STATUS_AUTH_FAILED,        /* PIN or Key Missing */
227         MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
228         MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
229         MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
230         MGMT_STATUS_NO_RESOURCES,       /* Max Number of SCO Connections */
231         MGMT_STATUS_ALREADY_CONNECTED,  /* ACL Connection Exists */
232         MGMT_STATUS_BUSY,               /* Command Disallowed */
233         MGMT_STATUS_NO_RESOURCES,       /* Rejected Limited Resources */
234         MGMT_STATUS_REJECTED,           /* Rejected Security */
235         MGMT_STATUS_REJECTED,           /* Rejected Personal */
236         MGMT_STATUS_TIMEOUT,            /* Host Timeout */
237         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Feature */
238         MGMT_STATUS_INVALID_PARAMS,     /* Invalid Parameters */
239         MGMT_STATUS_DISCONNECTED,       /* OE User Ended Connection */
240         MGMT_STATUS_NO_RESOURCES,       /* OE Low Resources */
241         MGMT_STATUS_DISCONNECTED,       /* OE Power Off */
242         MGMT_STATUS_DISCONNECTED,       /* Connection Terminated */
243         MGMT_STATUS_BUSY,               /* Repeated Attempts */
244         MGMT_STATUS_REJECTED,           /* Pairing Not Allowed */
245         MGMT_STATUS_FAILED,             /* Unknown LMP PDU */
246         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Remote Feature */
247         MGMT_STATUS_REJECTED,           /* SCO Offset Rejected */
248         MGMT_STATUS_REJECTED,           /* SCO Interval Rejected */
249         MGMT_STATUS_REJECTED,           /* Air Mode Rejected */
250         MGMT_STATUS_INVALID_PARAMS,     /* Invalid LMP Parameters */
251         MGMT_STATUS_FAILED,             /* Unspecified Error */
252         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported LMP Parameter Value */
253         MGMT_STATUS_FAILED,             /* Role Change Not Allowed */
254         MGMT_STATUS_TIMEOUT,            /* LMP Response Timeout */
255         MGMT_STATUS_FAILED,             /* LMP Error Transaction Collision */
256         MGMT_STATUS_FAILED,             /* LMP PDU Not Allowed */
257         MGMT_STATUS_REJECTED,           /* Encryption Mode Not Accepted */
258         MGMT_STATUS_FAILED,             /* Unit Link Key Used */
259         MGMT_STATUS_NOT_SUPPORTED,      /* QoS Not Supported */
260         MGMT_STATUS_TIMEOUT,            /* Instant Passed */
261         MGMT_STATUS_NOT_SUPPORTED,      /* Pairing Not Supported */
262         MGMT_STATUS_FAILED,             /* Transaction Collision */
263         MGMT_STATUS_FAILED,             /* Reserved for future use */
264         MGMT_STATUS_INVALID_PARAMS,     /* Unacceptable Parameter */
265         MGMT_STATUS_REJECTED,           /* QoS Rejected */
266         MGMT_STATUS_NOT_SUPPORTED,      /* Classification Not Supported */
267         MGMT_STATUS_REJECTED,           /* Insufficient Security */
268         MGMT_STATUS_INVALID_PARAMS,     /* Parameter Out Of Range */
269         MGMT_STATUS_FAILED,             /* Reserved for future use */
270         MGMT_STATUS_BUSY,               /* Role Switch Pending */
271         MGMT_STATUS_FAILED,             /* Reserved for future use */
272         MGMT_STATUS_FAILED,             /* Slot Violation */
273         MGMT_STATUS_FAILED,             /* Role Switch Failed */
274         MGMT_STATUS_INVALID_PARAMS,     /* EIR Too Large */
275         MGMT_STATUS_NOT_SUPPORTED,      /* Simple Pairing Not Supported */
276         MGMT_STATUS_BUSY,               /* Host Busy Pairing */
277         MGMT_STATUS_REJECTED,           /* Rejected, No Suitable Channel */
278         MGMT_STATUS_BUSY,               /* Controller Busy */
279         MGMT_STATUS_INVALID_PARAMS,     /* Unsuitable Connection Interval */
280         MGMT_STATUS_TIMEOUT,            /* Directed Advertising Timeout */
281         MGMT_STATUS_AUTH_FAILED,        /* Terminated Due to MIC Failure */
282         MGMT_STATUS_CONNECT_FAILED,     /* Connection Establishment Failed */
283         MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
284 };
285
286 static u8 mgmt_errno_status(int err)
287 {
288         switch (err) {
289         case 0:
290                 return MGMT_STATUS_SUCCESS;
291         case -EPERM:
292                 return MGMT_STATUS_REJECTED;
293         case -EINVAL:
294                 return MGMT_STATUS_INVALID_PARAMS;
295         case -EOPNOTSUPP:
296                 return MGMT_STATUS_NOT_SUPPORTED;
297         case -EBUSY:
298                 return MGMT_STATUS_BUSY;
299         case -ETIMEDOUT:
300                 return MGMT_STATUS_AUTH_FAILED;
301         case -ENOMEM:
302                 return MGMT_STATUS_NO_RESOURCES;
303         case -EISCONN:
304                 return MGMT_STATUS_ALREADY_CONNECTED;
305         case -ENOTCONN:
306                 return MGMT_STATUS_DISCONNECTED;
307         }
308
309         return MGMT_STATUS_FAILED;
310 }
311
312 static u8 mgmt_status(int err)
313 {
314         if (err < 0)
315                 return mgmt_errno_status(err);
316
317         if (err < ARRAY_SIZE(mgmt_status_table))
318                 return mgmt_status_table[err];
319
320         return MGMT_STATUS_FAILED;
321 }
322
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324                             u16 len, int flag)
325 {
326         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327                                flag, NULL);
328 }
329
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331                               u16 len, int flag, struct sock *skip_sk)
332 {
333         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334                                flag, skip_sk);
335 }
336
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338                       struct sock *skip_sk)
339 {
340         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341                                HCI_SOCK_TRUSTED, skip_sk);
342 }
343
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346         return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347                                    skip_sk);
348 }
349
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352         if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353                 return ADDR_LE_DEV_PUBLIC;
354         else
355                 return ADDR_LE_DEV_RANDOM;
356 }
357
358 void mgmt_fill_version_info(void *ver)
359 {
360         struct mgmt_rp_read_version *rp = ver;
361
362         rp->version = MGMT_VERSION;
363         rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367                         u16 data_len)
368 {
369         struct mgmt_rp_read_version rp;
370
371         bt_dev_dbg(hdev, "sock %p", sk);
372
373         mgmt_fill_version_info(&rp);
374
375         return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376                                  &rp, sizeof(rp));
377 }
378
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380                          u16 data_len)
381 {
382         struct mgmt_rp_read_commands *rp;
383         u16 num_commands, num_events;
384         size_t rp_size;
385         int i, err;
386
387         bt_dev_dbg(hdev, "sock %p", sk);
388
389         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390                 num_commands = ARRAY_SIZE(mgmt_commands);
391                 num_events = ARRAY_SIZE(mgmt_events);
392         } else {
393                 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394                 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395         }
396
397         rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399         rp = kmalloc(rp_size, GFP_KERNEL);
400         if (!rp)
401                 return -ENOMEM;
402
403         rp->num_commands = cpu_to_le16(num_commands);
404         rp->num_events = cpu_to_le16(num_events);
405
406         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407                 __le16 *opcode = rp->opcodes;
408
409                 for (i = 0; i < num_commands; i++, opcode++)
410                         put_unaligned_le16(mgmt_commands[i], opcode);
411
412                 for (i = 0; i < num_events; i++, opcode++)
413                         put_unaligned_le16(mgmt_events[i], opcode);
414         } else {
415                 __le16 *opcode = rp->opcodes;
416
417                 for (i = 0; i < num_commands; i++, opcode++)
418                         put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420                 for (i = 0; i < num_events; i++, opcode++)
421                         put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422         }
423
424         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425                                 rp, rp_size);
426         kfree(rp);
427
428         return err;
429 }
430
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432                            u16 data_len)
433 {
434         struct mgmt_rp_read_index_list *rp;
435         struct hci_dev *d;
436         size_t rp_len;
437         u16 count;
438         int err;
439
440         bt_dev_dbg(hdev, "sock %p", sk);
441
442         read_lock(&hci_dev_list_lock);
443
444         count = 0;
445         list_for_each_entry(d, &hci_dev_list, list) {
446                 if (d->dev_type == HCI_PRIMARY &&
447                     !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448                         count++;
449         }
450
451         rp_len = sizeof(*rp) + (2 * count);
452         rp = kmalloc(rp_len, GFP_ATOMIC);
453         if (!rp) {
454                 read_unlock(&hci_dev_list_lock);
455                 return -ENOMEM;
456         }
457
458         count = 0;
459         list_for_each_entry(d, &hci_dev_list, list) {
460                 if (hci_dev_test_flag(d, HCI_SETUP) ||
461                     hci_dev_test_flag(d, HCI_CONFIG) ||
462                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
463                         continue;
464
465                 /* Devices marked as raw-only are neither configured
466                  * nor unconfigured controllers.
467                  */
468                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469                         continue;
470
471                 if (d->dev_type == HCI_PRIMARY &&
472                     !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473                         rp->index[count++] = cpu_to_le16(d->id);
474                         bt_dev_dbg(hdev, "Added hci%u", d->id);
475                 }
476         }
477
478         rp->num_controllers = cpu_to_le16(count);
479         rp_len = sizeof(*rp) + (2 * count);
480
481         read_unlock(&hci_dev_list_lock);
482
483         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484                                 0, rp, rp_len);
485
486         kfree(rp);
487
488         return err;
489 }
490
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492                                   void *data, u16 data_len)
493 {
494         struct mgmt_rp_read_unconf_index_list *rp;
495         struct hci_dev *d;
496         size_t rp_len;
497         u16 count;
498         int err;
499
500         bt_dev_dbg(hdev, "sock %p", sk);
501
502         read_lock(&hci_dev_list_lock);
503
504         count = 0;
505         list_for_each_entry(d, &hci_dev_list, list) {
506                 if (d->dev_type == HCI_PRIMARY &&
507                     hci_dev_test_flag(d, HCI_UNCONFIGURED))
508                         count++;
509         }
510
511         rp_len = sizeof(*rp) + (2 * count);
512         rp = kmalloc(rp_len, GFP_ATOMIC);
513         if (!rp) {
514                 read_unlock(&hci_dev_list_lock);
515                 return -ENOMEM;
516         }
517
518         count = 0;
519         list_for_each_entry(d, &hci_dev_list, list) {
520                 if (hci_dev_test_flag(d, HCI_SETUP) ||
521                     hci_dev_test_flag(d, HCI_CONFIG) ||
522                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
523                         continue;
524
525                 /* Devices marked as raw-only are neither configured
526                  * nor unconfigured controllers.
527                  */
528                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529                         continue;
530
531                 if (d->dev_type == HCI_PRIMARY &&
532                     hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533                         rp->index[count++] = cpu_to_le16(d->id);
534                         bt_dev_dbg(hdev, "Added hci%u", d->id);
535                 }
536         }
537
538         rp->num_controllers = cpu_to_le16(count);
539         rp_len = sizeof(*rp) + (2 * count);
540
541         read_unlock(&hci_dev_list_lock);
542
543         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544                                 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545
546         kfree(rp);
547
548         return err;
549 }
550
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552                                void *data, u16 data_len)
553 {
554         struct mgmt_rp_read_ext_index_list *rp;
555         struct hci_dev *d;
556         u16 count;
557         int err;
558
559         bt_dev_dbg(hdev, "sock %p", sk);
560
561         read_lock(&hci_dev_list_lock);
562
563         count = 0;
564         list_for_each_entry(d, &hci_dev_list, list) {
565                 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566                         count++;
567         }
568
569         rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570         if (!rp) {
571                 read_unlock(&hci_dev_list_lock);
572                 return -ENOMEM;
573         }
574
575         count = 0;
576         list_for_each_entry(d, &hci_dev_list, list) {
577                 if (hci_dev_test_flag(d, HCI_SETUP) ||
578                     hci_dev_test_flag(d, HCI_CONFIG) ||
579                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
580                         continue;
581
582                 /* Devices marked as raw-only are neither configured
583                  * nor unconfigured controllers.
584                  */
585                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586                         continue;
587
588                 if (d->dev_type == HCI_PRIMARY) {
589                         if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590                                 rp->entry[count].type = 0x01;
591                         else
592                                 rp->entry[count].type = 0x00;
593                 } else if (d->dev_type == HCI_AMP) {
594                         rp->entry[count].type = 0x02;
595                 } else {
596                         continue;
597                 }
598
599                 rp->entry[count].bus = d->bus;
600                 rp->entry[count++].index = cpu_to_le16(d->id);
601                 bt_dev_dbg(hdev, "Added hci%u", d->id);
602         }
603
604         rp->num_controllers = cpu_to_le16(count);
605
606         read_unlock(&hci_dev_list_lock);
607
608         /* If this command is called at least once, then all the
609          * default index and unconfigured index events are disabled
610          * and from now on only extended index events are used.
611          */
612         hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613         hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614         hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615
616         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617                                 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618                                 struct_size(rp, entry, count));
619
620         kfree(rp);
621
622         return err;
623 }
624
625 static bool is_configured(struct hci_dev *hdev)
626 {
627         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629                 return false;
630
631         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633             !bacmp(&hdev->public_addr, BDADDR_ANY))
634                 return false;
635
636         return true;
637 }
638
639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641         u32 options = 0;
642
643         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
646
647         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649             !bacmp(&hdev->public_addr, BDADDR_ANY))
650                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651
652         return cpu_to_le32(options);
653 }
654
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657         __le32 options = get_missing_options(hdev);
658
659         return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660                                   sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665         __le32 options = get_missing_options(hdev);
666
667         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668                                  sizeof(options));
669 }
670
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672                             void *data, u16 data_len)
673 {
674         struct mgmt_rp_read_config_info rp;
675         u32 options = 0;
676
677         bt_dev_dbg(hdev, "sock %p", sk);
678
679         hci_dev_lock(hdev);
680
681         memset(&rp, 0, sizeof(rp));
682         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683
684         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
686
687         if (hdev->set_bdaddr)
688                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
689
690         rp.supported_options = cpu_to_le32(options);
691         rp.missing_options = get_missing_options(hdev);
692
693         hci_dev_unlock(hdev);
694
695         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696                                  &rp, sizeof(rp));
697 }
698
699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701         u32 supported_phys = 0;
702
703         if (lmp_bredr_capable(hdev)) {
704                 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705
706                 if (hdev->features[0][0] & LMP_3SLOT)
707                         supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708
709                 if (hdev->features[0][0] & LMP_5SLOT)
710                         supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711
712                 if (lmp_edr_2m_capable(hdev)) {
713                         supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714
715                         if (lmp_edr_3slot_capable(hdev))
716                                 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717
718                         if (lmp_edr_5slot_capable(hdev))
719                                 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720
721                         if (lmp_edr_3m_capable(hdev)) {
722                                 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723
724                                 if (lmp_edr_3slot_capable(hdev))
725                                         supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726
727                                 if (lmp_edr_5slot_capable(hdev))
728                                         supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729                         }
730                 }
731         }
732
733         if (lmp_le_capable(hdev)) {
734                 supported_phys |= MGMT_PHY_LE_1M_TX;
735                 supported_phys |= MGMT_PHY_LE_1M_RX;
736
737                 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738                         supported_phys |= MGMT_PHY_LE_2M_TX;
739                         supported_phys |= MGMT_PHY_LE_2M_RX;
740                 }
741
742                 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743                         supported_phys |= MGMT_PHY_LE_CODED_TX;
744                         supported_phys |= MGMT_PHY_LE_CODED_RX;
745                 }
746         }
747
748         return supported_phys;
749 }
750
751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753         u32 selected_phys = 0;
754
755         if (lmp_bredr_capable(hdev)) {
756                 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757
758                 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759                         selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760
761                 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762                         selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763
764                 if (lmp_edr_2m_capable(hdev)) {
765                         if (!(hdev->pkt_type & HCI_2DH1))
766                                 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767
768                         if (lmp_edr_3slot_capable(hdev) &&
769                             !(hdev->pkt_type & HCI_2DH3))
770                                 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771
772                         if (lmp_edr_5slot_capable(hdev) &&
773                             !(hdev->pkt_type & HCI_2DH5))
774                                 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775
776                         if (lmp_edr_3m_capable(hdev)) {
777                                 if (!(hdev->pkt_type & HCI_3DH1))
778                                         selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779
780                                 if (lmp_edr_3slot_capable(hdev) &&
781                                     !(hdev->pkt_type & HCI_3DH3))
782                                         selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783
784                                 if (lmp_edr_5slot_capable(hdev) &&
785                                     !(hdev->pkt_type & HCI_3DH5))
786                                         selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787                         }
788                 }
789         }
790
791         if (lmp_le_capable(hdev)) {
792                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793                         selected_phys |= MGMT_PHY_LE_1M_TX;
794
795                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796                         selected_phys |= MGMT_PHY_LE_1M_RX;
797
798                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799                         selected_phys |= MGMT_PHY_LE_2M_TX;
800
801                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802                         selected_phys |= MGMT_PHY_LE_2M_RX;
803
804                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805                         selected_phys |= MGMT_PHY_LE_CODED_TX;
806
807                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808                         selected_phys |= MGMT_PHY_LE_CODED_RX;
809         }
810
811         return selected_phys;
812 }
813
814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816         return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817                 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819
820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822         u32 settings = 0;
823
824         settings |= MGMT_SETTING_POWERED;
825         settings |= MGMT_SETTING_BONDABLE;
826         settings |= MGMT_SETTING_DEBUG_KEYS;
827         settings |= MGMT_SETTING_CONNECTABLE;
828         settings |= MGMT_SETTING_DISCOVERABLE;
829
830         if (lmp_bredr_capable(hdev)) {
831                 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832                         settings |= MGMT_SETTING_FAST_CONNECTABLE;
833                 settings |= MGMT_SETTING_BREDR;
834                 settings |= MGMT_SETTING_LINK_SECURITY;
835
836                 if (lmp_ssp_capable(hdev)) {
837                         settings |= MGMT_SETTING_SSP;
838                         if (IS_ENABLED(CONFIG_BT_HS))
839                                 settings |= MGMT_SETTING_HS;
840                 }
841
842                 if (lmp_sc_capable(hdev))
843                         settings |= MGMT_SETTING_SECURE_CONN;
844
845                 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846                              &hdev->quirks))
847                         settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848         }
849
850         if (lmp_le_capable(hdev)) {
851                 settings |= MGMT_SETTING_LE;
852                 settings |= MGMT_SETTING_SECURE_CONN;
853                 settings |= MGMT_SETTING_PRIVACY;
854                 settings |= MGMT_SETTING_STATIC_ADDRESS;
855                 settings |= MGMT_SETTING_ADVERTISING;
856         }
857
858         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859             hdev->set_bdaddr)
860                 settings |= MGMT_SETTING_CONFIGURATION;
861
862         if (cis_central_capable(hdev))
863                 settings |= MGMT_SETTING_CIS_CENTRAL;
864
865         if (cis_peripheral_capable(hdev))
866                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
867
868         settings |= MGMT_SETTING_PHY_CONFIGURATION;
869
870         return settings;
871 }
872
873 static u32 get_current_settings(struct hci_dev *hdev)
874 {
875         u32 settings = 0;
876
877         if (hdev_is_powered(hdev))
878                 settings |= MGMT_SETTING_POWERED;
879
880         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881                 settings |= MGMT_SETTING_CONNECTABLE;
882
883         if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884                 settings |= MGMT_SETTING_FAST_CONNECTABLE;
885
886         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887                 settings |= MGMT_SETTING_DISCOVERABLE;
888
889         if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890                 settings |= MGMT_SETTING_BONDABLE;
891
892         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893                 settings |= MGMT_SETTING_BREDR;
894
895         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896                 settings |= MGMT_SETTING_LE;
897
898         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899                 settings |= MGMT_SETTING_LINK_SECURITY;
900
901         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902                 settings |= MGMT_SETTING_SSP;
903
904         if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905                 settings |= MGMT_SETTING_HS;
906
907         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908                 settings |= MGMT_SETTING_ADVERTISING;
909
910         if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911                 settings |= MGMT_SETTING_SECURE_CONN;
912
913         if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914                 settings |= MGMT_SETTING_DEBUG_KEYS;
915
916         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917                 settings |= MGMT_SETTING_PRIVACY;
918
919         /* The current setting for static address has two purposes. The
920          * first is to indicate if the static address will be used and
921          * the second is to indicate if it is actually set.
922          *
923          * This means if the static address is not configured, this flag
924          * will never be set. If the address is configured, then if the
925          * address is actually used decides if the flag is set or not.
926          *
927          * For single mode LE only controllers and dual-mode controllers
928          * with BR/EDR disabled, the existence of the static address will
929          * be evaluated.
930          */
931         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934                 if (bacmp(&hdev->static_addr, BDADDR_ANY))
935                         settings |= MGMT_SETTING_STATIC_ADDRESS;
936         }
937
938         if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939                 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
940
941         if (cis_central_capable(hdev))
942                 settings |= MGMT_SETTING_CIS_CENTRAL;
943
944         if (cis_peripheral_capable(hdev))
945                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
946
947         return settings;
948 }
949
950 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
951 {
952         return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
953 }
954
955 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
956 {
957         struct mgmt_pending_cmd *cmd;
958
959         /* If there's a pending mgmt command the flags will not yet have
960          * their final values, so check for this first.
961          */
962         cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
963         if (cmd) {
964                 struct mgmt_mode *cp = cmd->param;
965                 if (cp->val == 0x01)
966                         return LE_AD_GENERAL;
967                 else if (cp->val == 0x02)
968                         return LE_AD_LIMITED;
969         } else {
970                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
971                         return LE_AD_LIMITED;
972                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
973                         return LE_AD_GENERAL;
974         }
975
976         return 0;
977 }
978
979 bool mgmt_get_connectable(struct hci_dev *hdev)
980 {
981         struct mgmt_pending_cmd *cmd;
982
983         /* If there's a pending mgmt command the flag will not yet have
984          * it's final value, so check for this first.
985          */
986         cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
987         if (cmd) {
988                 struct mgmt_mode *cp = cmd->param;
989
990                 return cp->val;
991         }
992
993         return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
994 }
995
996 static int service_cache_sync(struct hci_dev *hdev, void *data)
997 {
998         hci_update_eir_sync(hdev);
999         hci_update_class_sync(hdev);
1000
1001         return 0;
1002 }
1003
1004 static void service_cache_off(struct work_struct *work)
1005 {
1006         struct hci_dev *hdev = container_of(work, struct hci_dev,
1007                                             service_cache.work);
1008
1009         if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1010                 return;
1011
1012         hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1013 }
1014
1015 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1016 {
1017         /* The generation of a new RPA and programming it into the
1018          * controller happens in the hci_req_enable_advertising()
1019          * function.
1020          */
1021         if (ext_adv_capable(hdev))
1022                 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1023         else
1024                 return hci_enable_advertising_sync(hdev);
1025 }
1026
1027 static void rpa_expired(struct work_struct *work)
1028 {
1029         struct hci_dev *hdev = container_of(work, struct hci_dev,
1030                                             rpa_expired.work);
1031
1032         bt_dev_dbg(hdev, "");
1033
1034         hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1035
1036         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1037                 return;
1038
1039         hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1040 }
1041
1042 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1043
1044 static void discov_off(struct work_struct *work)
1045 {
1046         struct hci_dev *hdev = container_of(work, struct hci_dev,
1047                                             discov_off.work);
1048
1049         bt_dev_dbg(hdev, "");
1050
1051         hci_dev_lock(hdev);
1052
1053         /* When discoverable timeout triggers, then just make sure
1054          * the limited discoverable flag is cleared. Even in the case
1055          * of a timeout triggered from general discoverable, it is
1056          * safe to unconditionally clear the flag.
1057          */
1058         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1059         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1060         hdev->discov_timeout = 0;
1061
1062         hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1063
1064         mgmt_new_settings(hdev);
1065
1066         hci_dev_unlock(hdev);
1067 }
1068
1069 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1070
1071 static void mesh_send_complete(struct hci_dev *hdev,
1072                                struct mgmt_mesh_tx *mesh_tx, bool silent)
1073 {
1074         u8 handle = mesh_tx->handle;
1075
1076         if (!silent)
1077                 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1078                            sizeof(handle), NULL);
1079
1080         mgmt_mesh_remove(mesh_tx);
1081 }
1082
1083 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1084 {
1085         struct mgmt_mesh_tx *mesh_tx;
1086
1087         hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1088         hci_disable_advertising_sync(hdev);
1089         mesh_tx = mgmt_mesh_next(hdev, NULL);
1090
1091         if (mesh_tx)
1092                 mesh_send_complete(hdev, mesh_tx, false);
1093
1094         return 0;
1095 }
1096
1097 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1098 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1099 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1100 {
1101         struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1102
1103         if (!mesh_tx)
1104                 return;
1105
1106         err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1107                                  mesh_send_start_complete);
1108
1109         if (err < 0)
1110                 mesh_send_complete(hdev, mesh_tx, false);
1111         else
1112                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1113 }
1114
1115 static void mesh_send_done(struct work_struct *work)
1116 {
1117         struct hci_dev *hdev = container_of(work, struct hci_dev,
1118                                             mesh_send_done.work);
1119
1120         if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1121                 return;
1122
1123         hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1124 }
1125
1126 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1127 {
1128         if (hci_dev_test_flag(hdev, HCI_MGMT))
1129                 return;
1130
1131         BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1132
1133         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1134         INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1135         INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1136         INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1137
1138         /* Non-mgmt controlled devices get this bit set
1139          * implicitly so that pairing works for them, however
1140          * for mgmt we require user-space to explicitly enable
1141          * it
1142          */
1143         hci_dev_clear_flag(hdev, HCI_BONDABLE);
1144
1145         hci_dev_set_flag(hdev, HCI_MGMT);
1146 }
1147
1148 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1149                                 void *data, u16 data_len)
1150 {
1151         struct mgmt_rp_read_info rp;
1152
1153         bt_dev_dbg(hdev, "sock %p", sk);
1154
1155         hci_dev_lock(hdev);
1156
1157         memset(&rp, 0, sizeof(rp));
1158
1159         bacpy(&rp.bdaddr, &hdev->bdaddr);
1160
1161         rp.version = hdev->hci_ver;
1162         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1163
1164         rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1165         rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1166
1167         memcpy(rp.dev_class, hdev->dev_class, 3);
1168
1169         memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1170         memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1171
1172         hci_dev_unlock(hdev);
1173
1174         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1175                                  sizeof(rp));
1176 }
1177
1178 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1179 {
1180         u16 eir_len = 0;
1181         size_t name_len;
1182
1183         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1184                 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1185                                           hdev->dev_class, 3);
1186
1187         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1188                 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1189                                           hdev->appearance);
1190
1191         name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1192         eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1193                                   hdev->dev_name, name_len);
1194
1195         name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1196         eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1197                                   hdev->short_name, name_len);
1198
1199         return eir_len;
1200 }
1201
1202 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1203                                     void *data, u16 data_len)
1204 {
1205         char buf[512];
1206         struct mgmt_rp_read_ext_info *rp = (void *)buf;
1207         u16 eir_len;
1208
1209         bt_dev_dbg(hdev, "sock %p", sk);
1210
1211         memset(&buf, 0, sizeof(buf));
1212
1213         hci_dev_lock(hdev);
1214
1215         bacpy(&rp->bdaddr, &hdev->bdaddr);
1216
1217         rp->version = hdev->hci_ver;
1218         rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1219
1220         rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1221         rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1222
1223
1224         eir_len = append_eir_data_to_buf(hdev, rp->eir);
1225         rp->eir_len = cpu_to_le16(eir_len);
1226
1227         hci_dev_unlock(hdev);
1228
1229         /* If this command is called at least once, then the events
1230          * for class of device and local name changes are disabled
1231          * and only the new extended controller information event
1232          * is used.
1233          */
1234         hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1235         hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1236         hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1237
1238         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1239                                  sizeof(*rp) + eir_len);
1240 }
1241
1242 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1243 {
1244         char buf[512];
1245         struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1246         u16 eir_len;
1247
1248         memset(buf, 0, sizeof(buf));
1249
1250         eir_len = append_eir_data_to_buf(hdev, ev->eir);
1251         ev->eir_len = cpu_to_le16(eir_len);
1252
1253         return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1254                                   sizeof(*ev) + eir_len,
1255                                   HCI_MGMT_EXT_INFO_EVENTS, skip);
1256 }
1257
1258 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1259 {
1260         __le32 settings = cpu_to_le32(get_current_settings(hdev));
1261
1262         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1263                                  sizeof(settings));
1264 }
1265
1266 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1267 {
1268         struct mgmt_ev_advertising_added ev;
1269
1270         ev.instance = instance;
1271
1272         mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1273 }
1274
1275 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1276                               u8 instance)
1277 {
1278         struct mgmt_ev_advertising_removed ev;
1279
1280         ev.instance = instance;
1281
1282         mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1283 }
1284
1285 static void cancel_adv_timeout(struct hci_dev *hdev)
1286 {
1287         if (hdev->adv_instance_timeout) {
1288                 hdev->adv_instance_timeout = 0;
1289                 cancel_delayed_work(&hdev->adv_instance_expire);
1290         }
1291 }
1292
1293 /* This function requires the caller holds hdev->lock */
1294 static void restart_le_actions(struct hci_dev *hdev)
1295 {
1296         struct hci_conn_params *p;
1297
1298         list_for_each_entry(p, &hdev->le_conn_params, list) {
1299                 /* Needed for AUTO_OFF case where might not "really"
1300                  * have been powered off.
1301                  */
1302                 hci_pend_le_list_del_init(p);
1303
1304                 switch (p->auto_connect) {
1305                 case HCI_AUTO_CONN_DIRECT:
1306                 case HCI_AUTO_CONN_ALWAYS:
1307                         hci_pend_le_list_add(p, &hdev->pend_le_conns);
1308                         break;
1309                 case HCI_AUTO_CONN_REPORT:
1310                         hci_pend_le_list_add(p, &hdev->pend_le_reports);
1311                         break;
1312                 default:
1313                         break;
1314                 }
1315         }
1316 }
1317
1318 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1319 {
1320         __le32 ev = cpu_to_le32(get_current_settings(hdev));
1321
1322         return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1323                                   sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1324 }
1325
1326 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1327 {
1328         struct mgmt_pending_cmd *cmd = data;
1329         struct mgmt_mode *cp;
1330
1331         /* Make sure cmd still outstanding. */
1332         if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1333                 return;
1334
1335         cp = cmd->param;
1336
1337         bt_dev_dbg(hdev, "err %d", err);
1338
1339         if (!err) {
1340                 if (cp->val) {
1341                         hci_dev_lock(hdev);
1342                         restart_le_actions(hdev);
1343                         hci_update_passive_scan(hdev);
1344                         hci_dev_unlock(hdev);
1345                 }
1346
1347                 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1348
1349                 /* Only call new_setting for power on as power off is deferred
1350                  * to hdev->power_off work which does call hci_dev_do_close.
1351                  */
1352                 if (cp->val)
1353                         new_settings(hdev, cmd->sk);
1354         } else {
1355                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1356                                 mgmt_status(err));
1357         }
1358
1359         mgmt_pending_remove(cmd);
1360 }
1361
1362 static int set_powered_sync(struct hci_dev *hdev, void *data)
1363 {
1364         struct mgmt_pending_cmd *cmd = data;
1365         struct mgmt_mode *cp = cmd->param;
1366
1367         BT_DBG("%s", hdev->name);
1368
1369         return hci_set_powered_sync(hdev, cp->val);
1370 }
1371
1372 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1373                        u16 len)
1374 {
1375         struct mgmt_mode *cp = data;
1376         struct mgmt_pending_cmd *cmd;
1377         int err;
1378
1379         bt_dev_dbg(hdev, "sock %p", sk);
1380
1381         if (cp->val != 0x00 && cp->val != 0x01)
1382                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1383                                        MGMT_STATUS_INVALID_PARAMS);
1384
1385         hci_dev_lock(hdev);
1386
1387         if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1388                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1389                                       MGMT_STATUS_BUSY);
1390                 goto failed;
1391         }
1392
1393         if (!!cp->val == hdev_is_powered(hdev)) {
1394                 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1395                 goto failed;
1396         }
1397
1398         cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1399         if (!cmd) {
1400                 err = -ENOMEM;
1401                 goto failed;
1402         }
1403
1404         /* Cancel potentially blocking sync operation before power off */
1405         if (cp->val == 0x00) {
1406                 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1407                 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1408                                          mgmt_set_powered_complete);
1409         } else {
1410                 /* Use hci_cmd_sync_submit since hdev might not be running */
1411                 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1412                                           mgmt_set_powered_complete);
1413         }
1414
1415         if (err < 0)
1416                 mgmt_pending_remove(cmd);
1417
1418 failed:
1419         hci_dev_unlock(hdev);
1420         return err;
1421 }
1422
1423 int mgmt_new_settings(struct hci_dev *hdev)
1424 {
1425         return new_settings(hdev, NULL);
1426 }
1427
1428 struct cmd_lookup {
1429         struct sock *sk;
1430         struct hci_dev *hdev;
1431         u8 mgmt_status;
1432 };
1433
1434 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1435 {
1436         struct cmd_lookup *match = data;
1437
1438         send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1439
1440         list_del(&cmd->list);
1441
1442         if (match->sk == NULL) {
1443                 match->sk = cmd->sk;
1444                 sock_hold(match->sk);
1445         }
1446
1447         mgmt_pending_free(cmd);
1448 }
1449
1450 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1451 {
1452         u8 *status = data;
1453
1454         mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1455         mgmt_pending_remove(cmd);
1456 }
1457
1458 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1459 {
1460         if (cmd->cmd_complete) {
1461                 u8 *status = data;
1462
1463                 cmd->cmd_complete(cmd, *status);
1464                 mgmt_pending_remove(cmd);
1465
1466                 return;
1467         }
1468
1469         cmd_status_rsp(cmd, data);
1470 }
1471
1472 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1473 {
1474         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1475                                  cmd->param, cmd->param_len);
1476 }
1477
1478 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1479 {
1480         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1481                                  cmd->param, sizeof(struct mgmt_addr_info));
1482 }
1483
1484 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1485 {
1486         if (!lmp_bredr_capable(hdev))
1487                 return MGMT_STATUS_NOT_SUPPORTED;
1488         else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1489                 return MGMT_STATUS_REJECTED;
1490         else
1491                 return MGMT_STATUS_SUCCESS;
1492 }
1493
1494 static u8 mgmt_le_support(struct hci_dev *hdev)
1495 {
1496         if (!lmp_le_capable(hdev))
1497                 return MGMT_STATUS_NOT_SUPPORTED;
1498         else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1499                 return MGMT_STATUS_REJECTED;
1500         else
1501                 return MGMT_STATUS_SUCCESS;
1502 }
1503
1504 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1505                                            int err)
1506 {
1507         struct mgmt_pending_cmd *cmd = data;
1508
1509         bt_dev_dbg(hdev, "err %d", err);
1510
1511         /* Make sure cmd still outstanding. */
1512         if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1513                 return;
1514
1515         hci_dev_lock(hdev);
1516
1517         if (err) {
1518                 u8 mgmt_err = mgmt_status(err);
1519                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1520                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1521                 goto done;
1522         }
1523
1524         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1525             hdev->discov_timeout > 0) {
1526                 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1527                 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1528         }
1529
1530         send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1531         new_settings(hdev, cmd->sk);
1532
1533 done:
1534         mgmt_pending_remove(cmd);
1535         hci_dev_unlock(hdev);
1536 }
1537
1538 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1539 {
1540         BT_DBG("%s", hdev->name);
1541
1542         return hci_update_discoverable_sync(hdev);
1543 }
1544
1545 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1546                             u16 len)
1547 {
1548         struct mgmt_cp_set_discoverable *cp = data;
1549         struct mgmt_pending_cmd *cmd;
1550         u16 timeout;
1551         int err;
1552
1553         bt_dev_dbg(hdev, "sock %p", sk);
1554
1555         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1556             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1557                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1558                                        MGMT_STATUS_REJECTED);
1559
1560         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1561                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562                                        MGMT_STATUS_INVALID_PARAMS);
1563
1564         timeout = __le16_to_cpu(cp->timeout);
1565
1566         /* Disabling discoverable requires that no timeout is set,
1567          * and enabling limited discoverable requires a timeout.
1568          */
1569         if ((cp->val == 0x00 && timeout > 0) ||
1570             (cp->val == 0x02 && timeout == 0))
1571                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572                                        MGMT_STATUS_INVALID_PARAMS);
1573
1574         hci_dev_lock(hdev);
1575
1576         if (!hdev_is_powered(hdev) && timeout > 0) {
1577                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1578                                       MGMT_STATUS_NOT_POWERED);
1579                 goto failed;
1580         }
1581
1582         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1583             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1584                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585                                       MGMT_STATUS_BUSY);
1586                 goto failed;
1587         }
1588
1589         if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1590                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1591                                       MGMT_STATUS_REJECTED);
1592                 goto failed;
1593         }
1594
1595         if (hdev->advertising_paused) {
1596                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1597                                       MGMT_STATUS_BUSY);
1598                 goto failed;
1599         }
1600
1601         if (!hdev_is_powered(hdev)) {
1602                 bool changed = false;
1603
1604                 /* Setting limited discoverable when powered off is
1605                  * not a valid operation since it requires a timeout
1606                  * and so no need to check HCI_LIMITED_DISCOVERABLE.
1607                  */
1608                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1609                         hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1610                         changed = true;
1611                 }
1612
1613                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1614                 if (err < 0)
1615                         goto failed;
1616
1617                 if (changed)
1618                         err = new_settings(hdev, sk);
1619
1620                 goto failed;
1621         }
1622
1623         /* If the current mode is the same, then just update the timeout
1624          * value with the new value. And if only the timeout gets updated,
1625          * then no need for any HCI transactions.
1626          */
1627         if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1628             (cp->val == 0x02) == hci_dev_test_flag(hdev,
1629                                                    HCI_LIMITED_DISCOVERABLE)) {
1630                 cancel_delayed_work(&hdev->discov_off);
1631                 hdev->discov_timeout = timeout;
1632
1633                 if (cp->val && hdev->discov_timeout > 0) {
1634                         int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1635                         queue_delayed_work(hdev->req_workqueue,
1636                                            &hdev->discov_off, to);
1637                 }
1638
1639                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1640                 goto failed;
1641         }
1642
1643         cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1644         if (!cmd) {
1645                 err = -ENOMEM;
1646                 goto failed;
1647         }
1648
1649         /* Cancel any potential discoverable timeout that might be
1650          * still active and store new timeout value. The arming of
1651          * the timeout happens in the complete handler.
1652          */
1653         cancel_delayed_work(&hdev->discov_off);
1654         hdev->discov_timeout = timeout;
1655
1656         if (cp->val)
1657                 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1658         else
1659                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1660
1661         /* Limited discoverable mode */
1662         if (cp->val == 0x02)
1663                 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1664         else
1665                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1666
1667         err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1668                                  mgmt_set_discoverable_complete);
1669
1670         if (err < 0)
1671                 mgmt_pending_remove(cmd);
1672
1673 failed:
1674         hci_dev_unlock(hdev);
1675         return err;
1676 }
1677
1678 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1679                                           int err)
1680 {
1681         struct mgmt_pending_cmd *cmd = data;
1682
1683         bt_dev_dbg(hdev, "err %d", err);
1684
1685         /* Make sure cmd still outstanding. */
1686         if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1687                 return;
1688
1689         hci_dev_lock(hdev);
1690
1691         if (err) {
1692                 u8 mgmt_err = mgmt_status(err);
1693                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1694                 goto done;
1695         }
1696
1697         send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1698         new_settings(hdev, cmd->sk);
1699
1700 done:
1701         if (cmd)
1702                 mgmt_pending_remove(cmd);
1703
1704         hci_dev_unlock(hdev);
1705 }
1706
1707 static int set_connectable_update_settings(struct hci_dev *hdev,
1708                                            struct sock *sk, u8 val)
1709 {
1710         bool changed = false;
1711         int err;
1712
1713         if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1714                 changed = true;
1715
1716         if (val) {
1717                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1718         } else {
1719                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1720                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1721         }
1722
1723         err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1724         if (err < 0)
1725                 return err;
1726
1727         if (changed) {
1728                 hci_update_scan(hdev);
1729                 hci_update_passive_scan(hdev);
1730                 return new_settings(hdev, sk);
1731         }
1732
1733         return 0;
1734 }
1735
1736 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1737 {
1738         BT_DBG("%s", hdev->name);
1739
1740         return hci_update_connectable_sync(hdev);
1741 }
1742
1743 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1744                            u16 len)
1745 {
1746         struct mgmt_mode *cp = data;
1747         struct mgmt_pending_cmd *cmd;
1748         int err;
1749
1750         bt_dev_dbg(hdev, "sock %p", sk);
1751
1752         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1753             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1754                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1755                                        MGMT_STATUS_REJECTED);
1756
1757         if (cp->val != 0x00 && cp->val != 0x01)
1758                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1759                                        MGMT_STATUS_INVALID_PARAMS);
1760
1761         hci_dev_lock(hdev);
1762
1763         if (!hdev_is_powered(hdev)) {
1764                 err = set_connectable_update_settings(hdev, sk, cp->val);
1765                 goto failed;
1766         }
1767
1768         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1769             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1770                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1771                                       MGMT_STATUS_BUSY);
1772                 goto failed;
1773         }
1774
1775         cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1776         if (!cmd) {
1777                 err = -ENOMEM;
1778                 goto failed;
1779         }
1780
1781         if (cp->val) {
1782                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1783         } else {
1784                 if (hdev->discov_timeout > 0)
1785                         cancel_delayed_work(&hdev->discov_off);
1786
1787                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1788                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1789                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1790         }
1791
1792         err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1793                                  mgmt_set_connectable_complete);
1794
1795         if (err < 0)
1796                 mgmt_pending_remove(cmd);
1797
1798 failed:
1799         hci_dev_unlock(hdev);
1800         return err;
1801 }
1802
1803 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1804                         u16 len)
1805 {
1806         struct mgmt_mode *cp = data;
1807         bool changed;
1808         int err;
1809
1810         bt_dev_dbg(hdev, "sock %p", sk);
1811
1812         if (cp->val != 0x00 && cp->val != 0x01)
1813                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1814                                        MGMT_STATUS_INVALID_PARAMS);
1815
1816         hci_dev_lock(hdev);
1817
1818         if (cp->val)
1819                 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1820         else
1821                 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1822
1823         err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1824         if (err < 0)
1825                 goto unlock;
1826
1827         if (changed) {
1828                 /* In limited privacy mode the change of bondable mode
1829                  * may affect the local advertising address.
1830                  */
1831                 hci_update_discoverable(hdev);
1832
1833                 err = new_settings(hdev, sk);
1834         }
1835
1836 unlock:
1837         hci_dev_unlock(hdev);
1838         return err;
1839 }
1840
1841 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1842                              u16 len)
1843 {
1844         struct mgmt_mode *cp = data;
1845         struct mgmt_pending_cmd *cmd;
1846         u8 val, status;
1847         int err;
1848
1849         bt_dev_dbg(hdev, "sock %p", sk);
1850
1851         status = mgmt_bredr_support(hdev);
1852         if (status)
1853                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1854                                        status);
1855
1856         if (cp->val != 0x00 && cp->val != 0x01)
1857                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1858                                        MGMT_STATUS_INVALID_PARAMS);
1859
1860         hci_dev_lock(hdev);
1861
1862         if (!hdev_is_powered(hdev)) {
1863                 bool changed = false;
1864
1865                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1866                         hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1867                         changed = true;
1868                 }
1869
1870                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1871                 if (err < 0)
1872                         goto failed;
1873
1874                 if (changed)
1875                         err = new_settings(hdev, sk);
1876
1877                 goto failed;
1878         }
1879
1880         if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1881                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1882                                       MGMT_STATUS_BUSY);
1883                 goto failed;
1884         }
1885
1886         val = !!cp->val;
1887
1888         if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1889                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1890                 goto failed;
1891         }
1892
1893         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1894         if (!cmd) {
1895                 err = -ENOMEM;
1896                 goto failed;
1897         }
1898
1899         err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1900         if (err < 0) {
1901                 mgmt_pending_remove(cmd);
1902                 goto failed;
1903         }
1904
1905 failed:
1906         hci_dev_unlock(hdev);
1907         return err;
1908 }
1909
1910 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1911 {
1912         struct cmd_lookup match = { NULL, hdev };
1913         struct mgmt_pending_cmd *cmd = data;
1914         struct mgmt_mode *cp = cmd->param;
1915         u8 enable = cp->val;
1916         bool changed;
1917
1918         /* Make sure cmd still outstanding. */
1919         if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1920                 return;
1921
1922         if (err) {
1923                 u8 mgmt_err = mgmt_status(err);
1924
1925                 if (enable && hci_dev_test_and_clear_flag(hdev,
1926                                                           HCI_SSP_ENABLED)) {
1927                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1928                         new_settings(hdev, NULL);
1929                 }
1930
1931                 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1932                                      &mgmt_err);
1933                 return;
1934         }
1935
1936         if (enable) {
1937                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1938         } else {
1939                 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1940
1941                 if (!changed)
1942                         changed = hci_dev_test_and_clear_flag(hdev,
1943                                                               HCI_HS_ENABLED);
1944                 else
1945                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1946         }
1947
1948         mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1949
1950         if (changed)
1951                 new_settings(hdev, match.sk);
1952
1953         if (match.sk)
1954                 sock_put(match.sk);
1955
1956         hci_update_eir_sync(hdev);
1957 }
1958
1959 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1960 {
1961         struct mgmt_pending_cmd *cmd = data;
1962         struct mgmt_mode *cp = cmd->param;
1963         bool changed = false;
1964         int err;
1965
1966         if (cp->val)
1967                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1968
1969         err = hci_write_ssp_mode_sync(hdev, cp->val);
1970
1971         if (!err && changed)
1972                 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1973
1974         return err;
1975 }
1976
1977 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1978 {
1979         struct mgmt_mode *cp = data;
1980         struct mgmt_pending_cmd *cmd;
1981         u8 status;
1982         int err;
1983
1984         bt_dev_dbg(hdev, "sock %p", sk);
1985
1986         status = mgmt_bredr_support(hdev);
1987         if (status)
1988                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1989
1990         if (!lmp_ssp_capable(hdev))
1991                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1992                                        MGMT_STATUS_NOT_SUPPORTED);
1993
1994         if (cp->val != 0x00 && cp->val != 0x01)
1995                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1996                                        MGMT_STATUS_INVALID_PARAMS);
1997
1998         hci_dev_lock(hdev);
1999
2000         if (!hdev_is_powered(hdev)) {
2001                 bool changed;
2002
2003                 if (cp->val) {
2004                         changed = !hci_dev_test_and_set_flag(hdev,
2005                                                              HCI_SSP_ENABLED);
2006                 } else {
2007                         changed = hci_dev_test_and_clear_flag(hdev,
2008                                                               HCI_SSP_ENABLED);
2009                         if (!changed)
2010                                 changed = hci_dev_test_and_clear_flag(hdev,
2011                                                                       HCI_HS_ENABLED);
2012                         else
2013                                 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2014                 }
2015
2016                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2017                 if (err < 0)
2018                         goto failed;
2019
2020                 if (changed)
2021                         err = new_settings(hdev, sk);
2022
2023                 goto failed;
2024         }
2025
2026         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2027                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2028                                       MGMT_STATUS_BUSY);
2029                 goto failed;
2030         }
2031
2032         if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2033                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2034                 goto failed;
2035         }
2036
2037         cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2038         if (!cmd)
2039                 err = -ENOMEM;
2040         else
2041                 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2042                                          set_ssp_complete);
2043
2044         if (err < 0) {
2045                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2046                                       MGMT_STATUS_FAILED);
2047
2048                 if (cmd)
2049                         mgmt_pending_remove(cmd);
2050         }
2051
2052 failed:
2053         hci_dev_unlock(hdev);
2054         return err;
2055 }
2056
2057 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2058 {
2059         struct mgmt_mode *cp = data;
2060         bool changed;
2061         u8 status;
2062         int err;
2063
2064         bt_dev_dbg(hdev, "sock %p", sk);
2065
2066         if (!IS_ENABLED(CONFIG_BT_HS))
2067                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2068                                        MGMT_STATUS_NOT_SUPPORTED);
2069
2070         status = mgmt_bredr_support(hdev);
2071         if (status)
2072                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2073
2074         if (!lmp_ssp_capable(hdev))
2075                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2076                                        MGMT_STATUS_NOT_SUPPORTED);
2077
2078         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2079                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080                                        MGMT_STATUS_REJECTED);
2081
2082         if (cp->val != 0x00 && cp->val != 0x01)
2083                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084                                        MGMT_STATUS_INVALID_PARAMS);
2085
2086         hci_dev_lock(hdev);
2087
2088         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2089                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2090                                       MGMT_STATUS_BUSY);
2091                 goto unlock;
2092         }
2093
2094         if (cp->val) {
2095                 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2096         } else {
2097                 if (hdev_is_powered(hdev)) {
2098                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2099                                               MGMT_STATUS_REJECTED);
2100                         goto unlock;
2101                 }
2102
2103                 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2104         }
2105
2106         err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2107         if (err < 0)
2108                 goto unlock;
2109
2110         if (changed)
2111                 err = new_settings(hdev, sk);
2112
2113 unlock:
2114         hci_dev_unlock(hdev);
2115         return err;
2116 }
2117
2118 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2119 {
2120         struct cmd_lookup match = { NULL, hdev };
2121         u8 status = mgmt_status(err);
2122
2123         bt_dev_dbg(hdev, "err %d", err);
2124
2125         if (status) {
2126                 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2127                                                         &status);
2128                 return;
2129         }
2130
2131         mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2132
2133         new_settings(hdev, match.sk);
2134
2135         if (match.sk)
2136                 sock_put(match.sk);
2137 }
2138
2139 static int set_le_sync(struct hci_dev *hdev, void *data)
2140 {
2141         struct mgmt_pending_cmd *cmd = data;
2142         struct mgmt_mode *cp = cmd->param;
2143         u8 val = !!cp->val;
2144         int err;
2145
2146         if (!val) {
2147                 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2148
2149                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2150                         hci_disable_advertising_sync(hdev);
2151
2152                 if (ext_adv_capable(hdev))
2153                         hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2154         } else {
2155                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2156         }
2157
2158         err = hci_write_le_host_supported_sync(hdev, val, 0);
2159
2160         /* Make sure the controller has a good default for
2161          * advertising data. Restrict the update to when LE
2162          * has actually been enabled. During power on, the
2163          * update in powered_update_hci will take care of it.
2164          */
2165         if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2166                 if (ext_adv_capable(hdev)) {
2167                         int status;
2168
2169                         status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2170                         if (!status)
2171                                 hci_update_scan_rsp_data_sync(hdev, 0x00);
2172                 } else {
2173                         hci_update_adv_data_sync(hdev, 0x00);
2174                         hci_update_scan_rsp_data_sync(hdev, 0x00);
2175                 }
2176
2177                 hci_update_passive_scan(hdev);
2178         }
2179
2180         return err;
2181 }
2182
2183 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2184 {
2185         struct mgmt_pending_cmd *cmd = data;
2186         u8 status = mgmt_status(err);
2187         struct sock *sk = cmd->sk;
2188
2189         if (status) {
2190                 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2191                                      cmd_status_rsp, &status);
2192                 return;
2193         }
2194
2195         mgmt_pending_remove(cmd);
2196         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2197 }
2198
2199 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2200 {
2201         struct mgmt_pending_cmd *cmd = data;
2202         struct mgmt_cp_set_mesh *cp = cmd->param;
2203         size_t len = cmd->param_len;
2204
2205         memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2206
2207         if (cp->enable)
2208                 hci_dev_set_flag(hdev, HCI_MESH);
2209         else
2210                 hci_dev_clear_flag(hdev, HCI_MESH);
2211
2212         len -= sizeof(*cp);
2213
2214         /* If filters don't fit, forward all adv pkts */
2215         if (len <= sizeof(hdev->mesh_ad_types))
2216                 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2217
2218         hci_update_passive_scan_sync(hdev);
2219         return 0;
2220 }
2221
2222 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2223 {
2224         struct mgmt_cp_set_mesh *cp = data;
2225         struct mgmt_pending_cmd *cmd;
2226         int err = 0;
2227
2228         bt_dev_dbg(hdev, "sock %p", sk);
2229
2230         if (!lmp_le_capable(hdev) ||
2231             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2232                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2233                                        MGMT_STATUS_NOT_SUPPORTED);
2234
2235         if (cp->enable != 0x00 && cp->enable != 0x01)
2236                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2237                                        MGMT_STATUS_INVALID_PARAMS);
2238
2239         hci_dev_lock(hdev);
2240
2241         cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2242         if (!cmd)
2243                 err = -ENOMEM;
2244         else
2245                 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2246                                          set_mesh_complete);
2247
2248         if (err < 0) {
2249                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2250                                       MGMT_STATUS_FAILED);
2251
2252                 if (cmd)
2253                         mgmt_pending_remove(cmd);
2254         }
2255
2256         hci_dev_unlock(hdev);
2257         return err;
2258 }
2259
2260 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2261 {
2262         struct mgmt_mesh_tx *mesh_tx = data;
2263         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2264         unsigned long mesh_send_interval;
2265         u8 mgmt_err = mgmt_status(err);
2266
2267         /* Report any errors here, but don't report completion */
2268
2269         if (mgmt_err) {
2270                 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2271                 /* Send Complete Error Code for handle */
2272                 mesh_send_complete(hdev, mesh_tx, false);
2273                 return;
2274         }
2275
2276         mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2277         queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2278                            mesh_send_interval);
2279 }
2280
2281 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2282 {
2283         struct mgmt_mesh_tx *mesh_tx = data;
2284         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2285         struct adv_info *adv, *next_instance;
2286         u8 instance = hdev->le_num_of_adv_sets + 1;
2287         u16 timeout, duration;
2288         int err = 0;
2289
2290         if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2291                 return MGMT_STATUS_BUSY;
2292
2293         timeout = 1000;
2294         duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2295         adv = hci_add_adv_instance(hdev, instance, 0,
2296                                    send->adv_data_len, send->adv_data,
2297                                    0, NULL,
2298                                    timeout, duration,
2299                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
2300                                    hdev->le_adv_min_interval,
2301                                    hdev->le_adv_max_interval,
2302                                    mesh_tx->handle);
2303
2304         if (!IS_ERR(adv))
2305                 mesh_tx->instance = instance;
2306         else
2307                 err = PTR_ERR(adv);
2308
2309         if (hdev->cur_adv_instance == instance) {
2310                 /* If the currently advertised instance is being changed then
2311                  * cancel the current advertising and schedule the next
2312                  * instance. If there is only one instance then the overridden
2313                  * advertising data will be visible right away.
2314                  */
2315                 cancel_adv_timeout(hdev);
2316
2317                 next_instance = hci_get_next_instance(hdev, instance);
2318                 if (next_instance)
2319                         instance = next_instance->instance;
2320                 else
2321                         instance = 0;
2322         } else if (hdev->adv_instance_timeout) {
2323                 /* Immediately advertise the new instance if no other, or
2324                  * let it go naturally from queue if ADV is already happening
2325                  */
2326                 instance = 0;
2327         }
2328
2329         if (instance)
2330                 return hci_schedule_adv_instance_sync(hdev, instance, true);
2331
2332         return err;
2333 }
2334
2335 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2336 {
2337         struct mgmt_rp_mesh_read_features *rp = data;
2338
2339         if (rp->used_handles >= rp->max_handles)
2340                 return;
2341
2342         rp->handles[rp->used_handles++] = mesh_tx->handle;
2343 }
2344
2345 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2346                          void *data, u16 len)
2347 {
2348         struct mgmt_rp_mesh_read_features rp;
2349
2350         if (!lmp_le_capable(hdev) ||
2351             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2352                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2353                                        MGMT_STATUS_NOT_SUPPORTED);
2354
2355         memset(&rp, 0, sizeof(rp));
2356         rp.index = cpu_to_le16(hdev->id);
2357         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2358                 rp.max_handles = MESH_HANDLES_MAX;
2359
2360         hci_dev_lock(hdev);
2361
2362         if (rp.max_handles)
2363                 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2364
2365         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2366                           rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2367
2368         hci_dev_unlock(hdev);
2369         return 0;
2370 }
2371
2372 static int send_cancel(struct hci_dev *hdev, void *data)
2373 {
2374         struct mgmt_pending_cmd *cmd = data;
2375         struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2376         struct mgmt_mesh_tx *mesh_tx;
2377
2378         if (!cancel->handle) {
2379                 do {
2380                         mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2381
2382                         if (mesh_tx)
2383                                 mesh_send_complete(hdev, mesh_tx, false);
2384                 } while (mesh_tx);
2385         } else {
2386                 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2387
2388                 if (mesh_tx && mesh_tx->sk == cmd->sk)
2389                         mesh_send_complete(hdev, mesh_tx, false);
2390         }
2391
2392         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2393                           0, NULL, 0);
2394         mgmt_pending_free(cmd);
2395
2396         return 0;
2397 }
2398
2399 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2400                             void *data, u16 len)
2401 {
2402         struct mgmt_pending_cmd *cmd;
2403         int err;
2404
2405         if (!lmp_le_capable(hdev) ||
2406             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2407                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2408                                        MGMT_STATUS_NOT_SUPPORTED);
2409
2410         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2411                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2412                                        MGMT_STATUS_REJECTED);
2413
2414         hci_dev_lock(hdev);
2415         cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2416         if (!cmd)
2417                 err = -ENOMEM;
2418         else
2419                 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2420
2421         if (err < 0) {
2422                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2423                                       MGMT_STATUS_FAILED);
2424
2425                 if (cmd)
2426                         mgmt_pending_free(cmd);
2427         }
2428
2429         hci_dev_unlock(hdev);
2430         return err;
2431 }
2432
2433 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2434 {
2435         struct mgmt_mesh_tx *mesh_tx;
2436         struct mgmt_cp_mesh_send *send = data;
2437         struct mgmt_rp_mesh_read_features rp;
2438         bool sending;
2439         int err = 0;
2440
2441         if (!lmp_le_capable(hdev) ||
2442             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2443                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2444                                        MGMT_STATUS_NOT_SUPPORTED);
2445         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2446             len <= MGMT_MESH_SEND_SIZE ||
2447             len > (MGMT_MESH_SEND_SIZE + 31))
2448                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2449                                        MGMT_STATUS_REJECTED);
2450
2451         hci_dev_lock(hdev);
2452
2453         memset(&rp, 0, sizeof(rp));
2454         rp.max_handles = MESH_HANDLES_MAX;
2455
2456         mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2457
2458         if (rp.max_handles <= rp.used_handles) {
2459                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2460                                       MGMT_STATUS_BUSY);
2461                 goto done;
2462         }
2463
2464         sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2465         mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2466
2467         if (!mesh_tx)
2468                 err = -ENOMEM;
2469         else if (!sending)
2470                 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2471                                          mesh_send_start_complete);
2472
2473         if (err < 0) {
2474                 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2475                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2476                                       MGMT_STATUS_FAILED);
2477
2478                 if (mesh_tx) {
2479                         if (sending)
2480                                 mgmt_mesh_remove(mesh_tx);
2481                 }
2482         } else {
2483                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2484
2485                 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2486                                   &mesh_tx->handle, 1);
2487         }
2488
2489 done:
2490         hci_dev_unlock(hdev);
2491         return err;
2492 }
2493
2494 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2495 {
2496         struct mgmt_mode *cp = data;
2497         struct mgmt_pending_cmd *cmd;
2498         int err;
2499         u8 val, enabled;
2500
2501         bt_dev_dbg(hdev, "sock %p", sk);
2502
2503         if (!lmp_le_capable(hdev))
2504                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2505                                        MGMT_STATUS_NOT_SUPPORTED);
2506
2507         if (cp->val != 0x00 && cp->val != 0x01)
2508                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2509                                        MGMT_STATUS_INVALID_PARAMS);
2510
2511         /* Bluetooth single mode LE only controllers or dual-mode
2512          * controllers configured as LE only devices, do not allow
2513          * switching LE off. These have either LE enabled explicitly
2514          * or BR/EDR has been previously switched off.
2515          *
2516          * When trying to enable an already enabled LE, then gracefully
2517          * send a positive response. Trying to disable it however will
2518          * result into rejection.
2519          */
2520         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2521                 if (cp->val == 0x01)
2522                         return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2523
2524                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2525                                        MGMT_STATUS_REJECTED);
2526         }
2527
2528         hci_dev_lock(hdev);
2529
2530         val = !!cp->val;
2531         enabled = lmp_host_le_capable(hdev);
2532
2533         if (!hdev_is_powered(hdev) || val == enabled) {
2534                 bool changed = false;
2535
2536                 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2537                         hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2538                         changed = true;
2539                 }
2540
2541                 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2542                         hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2543                         changed = true;
2544                 }
2545
2546                 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2547                 if (err < 0)
2548                         goto unlock;
2549
2550                 if (changed)
2551                         err = new_settings(hdev, sk);
2552
2553                 goto unlock;
2554         }
2555
2556         if (pending_find(MGMT_OP_SET_LE, hdev) ||
2557             pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2558                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2559                                       MGMT_STATUS_BUSY);
2560                 goto unlock;
2561         }
2562
2563         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2564         if (!cmd)
2565                 err = -ENOMEM;
2566         else
2567                 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2568                                          set_le_complete);
2569
2570         if (err < 0) {
2571                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2572                                       MGMT_STATUS_FAILED);
2573
2574                 if (cmd)
2575                         mgmt_pending_remove(cmd);
2576         }
2577
2578 unlock:
2579         hci_dev_unlock(hdev);
2580         return err;
2581 }
2582
2583 /* This is a helper function to test for pending mgmt commands that can
2584  * cause CoD or EIR HCI commands. We can only allow one such pending
2585  * mgmt command at a time since otherwise we cannot easily track what
2586  * the current values are, will be, and based on that calculate if a new
2587  * HCI command needs to be sent and if yes with what value.
2588  */
2589 static bool pending_eir_or_class(struct hci_dev *hdev)
2590 {
2591         struct mgmt_pending_cmd *cmd;
2592
2593         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2594                 switch (cmd->opcode) {
2595                 case MGMT_OP_ADD_UUID:
2596                 case MGMT_OP_REMOVE_UUID:
2597                 case MGMT_OP_SET_DEV_CLASS:
2598                 case MGMT_OP_SET_POWERED:
2599                         return true;
2600                 }
2601         }
2602
2603         return false;
2604 }
2605
2606 static const u8 bluetooth_base_uuid[] = {
2607                         0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2608                         0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2609 };
2610
2611 static u8 get_uuid_size(const u8 *uuid)
2612 {
2613         u32 val;
2614
2615         if (memcmp(uuid, bluetooth_base_uuid, 12))
2616                 return 128;
2617
2618         val = get_unaligned_le32(&uuid[12]);
2619         if (val > 0xffff)
2620                 return 32;
2621
2622         return 16;
2623 }
2624
2625 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2626 {
2627         struct mgmt_pending_cmd *cmd = data;
2628
2629         bt_dev_dbg(hdev, "err %d", err);
2630
2631         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2632                           mgmt_status(err), hdev->dev_class, 3);
2633
2634         mgmt_pending_free(cmd);
2635 }
2636
2637 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2638 {
2639         int err;
2640
2641         err = hci_update_class_sync(hdev);
2642         if (err)
2643                 return err;
2644
2645         return hci_update_eir_sync(hdev);
2646 }
2647
2648 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2649 {
2650         struct mgmt_cp_add_uuid *cp = data;
2651         struct mgmt_pending_cmd *cmd;
2652         struct bt_uuid *uuid;
2653         int err;
2654
2655         bt_dev_dbg(hdev, "sock %p", sk);
2656
2657         hci_dev_lock(hdev);
2658
2659         if (pending_eir_or_class(hdev)) {
2660                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2661                                       MGMT_STATUS_BUSY);
2662                 goto failed;
2663         }
2664
2665         uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2666         if (!uuid) {
2667                 err = -ENOMEM;
2668                 goto failed;
2669         }
2670
2671         memcpy(uuid->uuid, cp->uuid, 16);
2672         uuid->svc_hint = cp->svc_hint;
2673         uuid->size = get_uuid_size(cp->uuid);
2674
2675         list_add_tail(&uuid->list, &hdev->uuids);
2676
2677         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2678         if (!cmd) {
2679                 err = -ENOMEM;
2680                 goto failed;
2681         }
2682
2683         /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2684          * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2685          */
2686         err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2687                                   mgmt_class_complete);
2688         if (err < 0) {
2689                 mgmt_pending_free(cmd);
2690                 goto failed;
2691         }
2692
2693 failed:
2694         hci_dev_unlock(hdev);
2695         return err;
2696 }
2697
2698 static bool enable_service_cache(struct hci_dev *hdev)
2699 {
2700         if (!hdev_is_powered(hdev))
2701                 return false;
2702
2703         if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2704                 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2705                                    CACHE_TIMEOUT);
2706                 return true;
2707         }
2708
2709         return false;
2710 }
2711
2712 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2713 {
2714         int err;
2715
2716         err = hci_update_class_sync(hdev);
2717         if (err)
2718                 return err;
2719
2720         return hci_update_eir_sync(hdev);
2721 }
2722
2723 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2724                        u16 len)
2725 {
2726         struct mgmt_cp_remove_uuid *cp = data;
2727         struct mgmt_pending_cmd *cmd;
2728         struct bt_uuid *match, *tmp;
2729         static const u8 bt_uuid_any[] = {
2730                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2731         };
2732         int err, found;
2733
2734         bt_dev_dbg(hdev, "sock %p", sk);
2735
2736         hci_dev_lock(hdev);
2737
2738         if (pending_eir_or_class(hdev)) {
2739                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2740                                       MGMT_STATUS_BUSY);
2741                 goto unlock;
2742         }
2743
2744         if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2745                 hci_uuids_clear(hdev);
2746
2747                 if (enable_service_cache(hdev)) {
2748                         err = mgmt_cmd_complete(sk, hdev->id,
2749                                                 MGMT_OP_REMOVE_UUID,
2750                                                 0, hdev->dev_class, 3);
2751                         goto unlock;
2752                 }
2753
2754                 goto update_class;
2755         }
2756
2757         found = 0;
2758
2759         list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2760                 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2761                         continue;
2762
2763                 list_del(&match->list);
2764                 kfree(match);
2765                 found++;
2766         }
2767
2768         if (found == 0) {
2769                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2770                                       MGMT_STATUS_INVALID_PARAMS);
2771                 goto unlock;
2772         }
2773
2774 update_class:
2775         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2776         if (!cmd) {
2777                 err = -ENOMEM;
2778                 goto unlock;
2779         }
2780
2781         /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2782          * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2783          */
2784         err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2785                                   mgmt_class_complete);
2786         if (err < 0)
2787                 mgmt_pending_free(cmd);
2788
2789 unlock:
2790         hci_dev_unlock(hdev);
2791         return err;
2792 }
2793
2794 static int set_class_sync(struct hci_dev *hdev, void *data)
2795 {
2796         int err = 0;
2797
2798         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2799                 cancel_delayed_work_sync(&hdev->service_cache);
2800                 err = hci_update_eir_sync(hdev);
2801         }
2802
2803         if (err)
2804                 return err;
2805
2806         return hci_update_class_sync(hdev);
2807 }
2808
2809 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2810                          u16 len)
2811 {
2812         struct mgmt_cp_set_dev_class *cp = data;
2813         struct mgmt_pending_cmd *cmd;
2814         int err;
2815
2816         bt_dev_dbg(hdev, "sock %p", sk);
2817
2818         if (!lmp_bredr_capable(hdev))
2819                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2820                                        MGMT_STATUS_NOT_SUPPORTED);
2821
2822         hci_dev_lock(hdev);
2823
2824         if (pending_eir_or_class(hdev)) {
2825                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2826                                       MGMT_STATUS_BUSY);
2827                 goto unlock;
2828         }
2829
2830         if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2831                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2832                                       MGMT_STATUS_INVALID_PARAMS);
2833                 goto unlock;
2834         }
2835
2836         hdev->major_class = cp->major;
2837         hdev->minor_class = cp->minor;
2838
2839         if (!hdev_is_powered(hdev)) {
2840                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2841                                         hdev->dev_class, 3);
2842                 goto unlock;
2843         }
2844
2845         cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2846         if (!cmd) {
2847                 err = -ENOMEM;
2848                 goto unlock;
2849         }
2850
2851         /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2852          * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2853          */
2854         err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2855                                   mgmt_class_complete);
2856         if (err < 0)
2857                 mgmt_pending_free(cmd);
2858
2859 unlock:
2860         hci_dev_unlock(hdev);
2861         return err;
2862 }
2863
2864 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2865                           u16 len)
2866 {
2867         struct mgmt_cp_load_link_keys *cp = data;
2868         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2869                                    sizeof(struct mgmt_link_key_info));
2870         u16 key_count, expected_len;
2871         bool changed;
2872         int i;
2873
2874         bt_dev_dbg(hdev, "sock %p", sk);
2875
2876         if (!lmp_bredr_capable(hdev))
2877                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2878                                        MGMT_STATUS_NOT_SUPPORTED);
2879
2880         key_count = __le16_to_cpu(cp->key_count);
2881         if (key_count > max_key_count) {
2882                 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2883                            key_count);
2884                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2885                                        MGMT_STATUS_INVALID_PARAMS);
2886         }
2887
2888         expected_len = struct_size(cp, keys, key_count);
2889         if (expected_len != len) {
2890                 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2891                            expected_len, len);
2892                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2893                                        MGMT_STATUS_INVALID_PARAMS);
2894         }
2895
2896         if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2897                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2898                                        MGMT_STATUS_INVALID_PARAMS);
2899
2900         bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2901                    key_count);
2902
2903         for (i = 0; i < key_count; i++) {
2904                 struct mgmt_link_key_info *key = &cp->keys[i];
2905
2906                 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2907                 if (key->type > 0x08)
2908                         return mgmt_cmd_status(sk, hdev->id,
2909                                                MGMT_OP_LOAD_LINK_KEYS,
2910                                                MGMT_STATUS_INVALID_PARAMS);
2911         }
2912
2913         hci_dev_lock(hdev);
2914
2915         hci_link_keys_clear(hdev);
2916
2917         if (cp->debug_keys)
2918                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2919         else
2920                 changed = hci_dev_test_and_clear_flag(hdev,
2921                                                       HCI_KEEP_DEBUG_KEYS);
2922
2923         if (changed)
2924                 new_settings(hdev, NULL);
2925
2926         for (i = 0; i < key_count; i++) {
2927                 struct mgmt_link_key_info *key = &cp->keys[i];
2928
2929                 if (hci_is_blocked_key(hdev,
2930                                        HCI_BLOCKED_KEY_TYPE_LINKKEY,
2931                                        key->val)) {
2932                         bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2933                                     &key->addr.bdaddr);
2934                         continue;
2935                 }
2936
2937                 /* Always ignore debug keys and require a new pairing if
2938                  * the user wants to use them.
2939                  */
2940                 if (key->type == HCI_LK_DEBUG_COMBINATION)
2941                         continue;
2942
2943                 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2944                                  key->type, key->pin_len, NULL);
2945         }
2946
2947         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2948
2949         hci_dev_unlock(hdev);
2950
2951         return 0;
2952 }
2953
2954 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2955                            u8 addr_type, struct sock *skip_sk)
2956 {
2957         struct mgmt_ev_device_unpaired ev;
2958
2959         bacpy(&ev.addr.bdaddr, bdaddr);
2960         ev.addr.type = addr_type;
2961
2962         return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2963                           skip_sk);
2964 }
2965
2966 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2967 {
2968         struct mgmt_pending_cmd *cmd = data;
2969         struct mgmt_cp_unpair_device *cp = cmd->param;
2970
2971         if (!err)
2972                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2973
2974         cmd->cmd_complete(cmd, err);
2975         mgmt_pending_free(cmd);
2976 }
2977
2978 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2979 {
2980         struct mgmt_pending_cmd *cmd = data;
2981         struct mgmt_cp_unpair_device *cp = cmd->param;
2982         struct hci_conn *conn;
2983
2984         if (cp->addr.type == BDADDR_BREDR)
2985                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2986                                                &cp->addr.bdaddr);
2987         else
2988                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2989                                                le_addr_type(cp->addr.type));
2990
2991         if (!conn)
2992                 return 0;
2993
2994         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2995 }
2996
2997 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2998                          u16 len)
2999 {
3000         struct mgmt_cp_unpair_device *cp = data;
3001         struct mgmt_rp_unpair_device rp;
3002         struct hci_conn_params *params;
3003         struct mgmt_pending_cmd *cmd;
3004         struct hci_conn *conn;
3005         u8 addr_type;
3006         int err;
3007
3008         memset(&rp, 0, sizeof(rp));
3009         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3010         rp.addr.type = cp->addr.type;
3011
3012         if (!bdaddr_type_is_valid(cp->addr.type))
3013                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3014                                          MGMT_STATUS_INVALID_PARAMS,
3015                                          &rp, sizeof(rp));
3016
3017         if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3018                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3019                                          MGMT_STATUS_INVALID_PARAMS,
3020                                          &rp, sizeof(rp));
3021
3022         hci_dev_lock(hdev);
3023
3024         if (!hdev_is_powered(hdev)) {
3025                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3026                                         MGMT_STATUS_NOT_POWERED, &rp,
3027                                         sizeof(rp));
3028                 goto unlock;
3029         }
3030
3031         if (cp->addr.type == BDADDR_BREDR) {
3032                 /* If disconnection is requested, then look up the
3033                  * connection. If the remote device is connected, it
3034                  * will be later used to terminate the link.
3035                  *
3036                  * Setting it to NULL explicitly will cause no
3037                  * termination of the link.
3038                  */
3039                 if (cp->disconnect)
3040                         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3041                                                        &cp->addr.bdaddr);
3042                 else
3043                         conn = NULL;
3044
3045                 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3046                 if (err < 0) {
3047                         err = mgmt_cmd_complete(sk, hdev->id,
3048                                                 MGMT_OP_UNPAIR_DEVICE,
3049                                                 MGMT_STATUS_NOT_PAIRED, &rp,
3050                                                 sizeof(rp));
3051                         goto unlock;
3052                 }
3053
3054                 goto done;
3055         }
3056
3057         /* LE address type */
3058         addr_type = le_addr_type(cp->addr.type);
3059
3060         /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3061         err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3062         if (err < 0) {
3063                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3064                                         MGMT_STATUS_NOT_PAIRED, &rp,
3065                                         sizeof(rp));
3066                 goto unlock;
3067         }
3068
3069         conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3070         if (!conn) {
3071                 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3072                 goto done;
3073         }
3074
3075
3076         /* Defer clearing up the connection parameters until closing to
3077          * give a chance of keeping them if a repairing happens.
3078          */
3079         set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3080
3081         /* Disable auto-connection parameters if present */
3082         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3083         if (params) {
3084                 if (params->explicit_connect)
3085                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3086                 else
3087                         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3088         }
3089
3090         /* If disconnection is not requested, then clear the connection
3091          * variable so that the link is not terminated.
3092          */
3093         if (!cp->disconnect)
3094                 conn = NULL;
3095
3096 done:
3097         /* If the connection variable is set, then termination of the
3098          * link is requested.
3099          */
3100         if (!conn) {
3101                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3102                                         &rp, sizeof(rp));
3103                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3104                 goto unlock;
3105         }
3106
3107         cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3108                                sizeof(*cp));
3109         if (!cmd) {
3110                 err = -ENOMEM;
3111                 goto unlock;
3112         }
3113
3114         cmd->cmd_complete = addr_cmd_complete;
3115
3116         err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3117                                  unpair_device_complete);
3118         if (err < 0)
3119                 mgmt_pending_free(cmd);
3120
3121 unlock:
3122         hci_dev_unlock(hdev);
3123         return err;
3124 }
3125
3126 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3127                       u16 len)
3128 {
3129         struct mgmt_cp_disconnect *cp = data;
3130         struct mgmt_rp_disconnect rp;
3131         struct mgmt_pending_cmd *cmd;
3132         struct hci_conn *conn;
3133         int err;
3134
3135         bt_dev_dbg(hdev, "sock %p", sk);
3136
3137         memset(&rp, 0, sizeof(rp));
3138         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3139         rp.addr.type = cp->addr.type;
3140
3141         if (!bdaddr_type_is_valid(cp->addr.type))
3142                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3143                                          MGMT_STATUS_INVALID_PARAMS,
3144                                          &rp, sizeof(rp));
3145
3146         hci_dev_lock(hdev);
3147
3148         if (!test_bit(HCI_UP, &hdev->flags)) {
3149                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3150                                         MGMT_STATUS_NOT_POWERED, &rp,
3151                                         sizeof(rp));
3152                 goto failed;
3153         }
3154
3155         if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3156                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3157                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3158                 goto failed;
3159         }
3160
3161         if (cp->addr.type == BDADDR_BREDR)
3162                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3163                                                &cp->addr.bdaddr);
3164         else
3165                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3166                                                le_addr_type(cp->addr.type));
3167
3168         if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3169                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3170                                         MGMT_STATUS_NOT_CONNECTED, &rp,
3171                                         sizeof(rp));
3172                 goto failed;
3173         }
3174
3175         cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3176         if (!cmd) {
3177                 err = -ENOMEM;
3178                 goto failed;
3179         }
3180
3181         cmd->cmd_complete = generic_cmd_complete;
3182
3183         err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3184         if (err < 0)
3185                 mgmt_pending_remove(cmd);
3186
3187 failed:
3188         hci_dev_unlock(hdev);
3189         return err;
3190 }
3191
3192 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3193 {
3194         switch (link_type) {
3195         case LE_LINK:
3196                 switch (addr_type) {
3197                 case ADDR_LE_DEV_PUBLIC:
3198                         return BDADDR_LE_PUBLIC;
3199
3200                 default:
3201                         /* Fallback to LE Random address type */
3202                         return BDADDR_LE_RANDOM;
3203                 }
3204
3205         default:
3206                 /* Fallback to BR/EDR type */
3207                 return BDADDR_BREDR;
3208         }
3209 }
3210
3211 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3212                            u16 data_len)
3213 {
3214         struct mgmt_rp_get_connections *rp;
3215         struct hci_conn *c;
3216         int err;
3217         u16 i;
3218
3219         bt_dev_dbg(hdev, "sock %p", sk);
3220
3221         hci_dev_lock(hdev);
3222
3223         if (!hdev_is_powered(hdev)) {
3224                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3225                                       MGMT_STATUS_NOT_POWERED);
3226                 goto unlock;
3227         }
3228
3229         i = 0;
3230         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3231                 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3232                         i++;
3233         }
3234
3235         rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3236         if (!rp) {
3237                 err = -ENOMEM;
3238                 goto unlock;
3239         }
3240
3241         i = 0;
3242         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3243                 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3244                         continue;
3245                 bacpy(&rp->addr[i].bdaddr, &c->dst);
3246                 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3247                 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3248                         continue;
3249                 i++;
3250         }
3251
3252         rp->conn_count = cpu_to_le16(i);
3253
3254         /* Recalculate length in case of filtered SCO connections, etc */
3255         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3256                                 struct_size(rp, addr, i));
3257
3258         kfree(rp);
3259
3260 unlock:
3261         hci_dev_unlock(hdev);
3262         return err;
3263 }
3264
3265 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3266                                    struct mgmt_cp_pin_code_neg_reply *cp)
3267 {
3268         struct mgmt_pending_cmd *cmd;
3269         int err;
3270
3271         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3272                                sizeof(*cp));
3273         if (!cmd)
3274                 return -ENOMEM;
3275
3276         cmd->cmd_complete = addr_cmd_complete;
3277
3278         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3279                            sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3280         if (err < 0)
3281                 mgmt_pending_remove(cmd);
3282
3283         return err;
3284 }
3285
3286 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3287                           u16 len)
3288 {
3289         struct hci_conn *conn;
3290         struct mgmt_cp_pin_code_reply *cp = data;
3291         struct hci_cp_pin_code_reply reply;
3292         struct mgmt_pending_cmd *cmd;
3293         int err;
3294
3295         bt_dev_dbg(hdev, "sock %p", sk);
3296
3297         hci_dev_lock(hdev);
3298
3299         if (!hdev_is_powered(hdev)) {
3300                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3301                                       MGMT_STATUS_NOT_POWERED);
3302                 goto failed;
3303         }
3304
3305         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3306         if (!conn) {
3307                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3308                                       MGMT_STATUS_NOT_CONNECTED);
3309                 goto failed;
3310         }
3311
3312         if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3313                 struct mgmt_cp_pin_code_neg_reply ncp;
3314
3315                 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3316
3317                 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3318
3319                 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3320                 if (err >= 0)
3321                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3322                                               MGMT_STATUS_INVALID_PARAMS);
3323
3324                 goto failed;
3325         }
3326
3327         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3328         if (!cmd) {
3329                 err = -ENOMEM;
3330                 goto failed;
3331         }
3332
3333         cmd->cmd_complete = addr_cmd_complete;
3334
3335         bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3336         reply.pin_len = cp->pin_len;
3337         memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3338
3339         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3340         if (err < 0)
3341                 mgmt_pending_remove(cmd);
3342
3343 failed:
3344         hci_dev_unlock(hdev);
3345         return err;
3346 }
3347
3348 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3349                              u16 len)
3350 {
3351         struct mgmt_cp_set_io_capability *cp = data;
3352
3353         bt_dev_dbg(hdev, "sock %p", sk);
3354
3355         if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3356                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3357                                        MGMT_STATUS_INVALID_PARAMS);
3358
3359         hci_dev_lock(hdev);
3360
3361         hdev->io_capability = cp->io_capability;
3362
3363         bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3364
3365         hci_dev_unlock(hdev);
3366
3367         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3368                                  NULL, 0);
3369 }
3370
3371 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3372 {
3373         struct hci_dev *hdev = conn->hdev;
3374         struct mgmt_pending_cmd *cmd;
3375
3376         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3377                 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3378                         continue;
3379
3380                 if (cmd->user_data != conn)
3381                         continue;
3382
3383                 return cmd;
3384         }
3385
3386         return NULL;
3387 }
3388
3389 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3390 {
3391         struct mgmt_rp_pair_device rp;
3392         struct hci_conn *conn = cmd->user_data;
3393         int err;
3394
3395         bacpy(&rp.addr.bdaddr, &conn->dst);
3396         rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3397
3398         err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3399                                 status, &rp, sizeof(rp));
3400
3401         /* So we don't get further callbacks for this connection */
3402         conn->connect_cfm_cb = NULL;
3403         conn->security_cfm_cb = NULL;
3404         conn->disconn_cfm_cb = NULL;
3405
3406         hci_conn_drop(conn);
3407
3408         /* The device is paired so there is no need to remove
3409          * its connection parameters anymore.
3410          */
3411         clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3412
3413         hci_conn_put(conn);
3414
3415         return err;
3416 }
3417
3418 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3419 {
3420         u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3421         struct mgmt_pending_cmd *cmd;
3422
3423         cmd = find_pairing(conn);
3424         if (cmd) {
3425                 cmd->cmd_complete(cmd, status);
3426                 mgmt_pending_remove(cmd);
3427         }
3428 }
3429
3430 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3431 {
3432         struct mgmt_pending_cmd *cmd;
3433
3434         BT_DBG("status %u", status);
3435
3436         cmd = find_pairing(conn);
3437         if (!cmd) {
3438                 BT_DBG("Unable to find a pending command");
3439                 return;
3440         }
3441
3442         cmd->cmd_complete(cmd, mgmt_status(status));
3443         mgmt_pending_remove(cmd);
3444 }
3445
3446 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3447 {
3448         struct mgmt_pending_cmd *cmd;
3449
3450         BT_DBG("status %u", status);
3451
3452         if (!status)
3453                 return;
3454
3455         cmd = find_pairing(conn);
3456         if (!cmd) {
3457                 BT_DBG("Unable to find a pending command");
3458                 return;
3459         }
3460
3461         cmd->cmd_complete(cmd, mgmt_status(status));
3462         mgmt_pending_remove(cmd);
3463 }
3464
3465 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3466                        u16 len)
3467 {
3468         struct mgmt_cp_pair_device *cp = data;
3469         struct mgmt_rp_pair_device rp;
3470         struct mgmt_pending_cmd *cmd;
3471         u8 sec_level, auth_type;
3472         struct hci_conn *conn;
3473         int err;
3474
3475         bt_dev_dbg(hdev, "sock %p", sk);
3476
3477         memset(&rp, 0, sizeof(rp));
3478         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3479         rp.addr.type = cp->addr.type;
3480
3481         if (!bdaddr_type_is_valid(cp->addr.type))
3482                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3483                                          MGMT_STATUS_INVALID_PARAMS,
3484                                          &rp, sizeof(rp));
3485
3486         if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3487                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3488                                          MGMT_STATUS_INVALID_PARAMS,
3489                                          &rp, sizeof(rp));
3490
3491         hci_dev_lock(hdev);
3492
3493         if (!hdev_is_powered(hdev)) {
3494                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3495                                         MGMT_STATUS_NOT_POWERED, &rp,
3496                                         sizeof(rp));
3497                 goto unlock;
3498         }
3499
3500         if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3501                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3502                                         MGMT_STATUS_ALREADY_PAIRED, &rp,
3503                                         sizeof(rp));
3504                 goto unlock;
3505         }
3506
3507         sec_level = BT_SECURITY_MEDIUM;
3508         auth_type = HCI_AT_DEDICATED_BONDING;
3509
3510         if (cp->addr.type == BDADDR_BREDR) {
3511                 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3512                                        auth_type, CONN_REASON_PAIR_DEVICE);
3513         } else {
3514                 u8 addr_type = le_addr_type(cp->addr.type);
3515                 struct hci_conn_params *p;
3516
3517                 /* When pairing a new device, it is expected to remember
3518                  * this device for future connections. Adding the connection
3519                  * parameter information ahead of time allows tracking
3520                  * of the peripheral preferred values and will speed up any
3521                  * further connection establishment.
3522                  *
3523                  * If connection parameters already exist, then they
3524                  * will be kept and this function does nothing.
3525                  */
3526                 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3527
3528                 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3529                         p->auto_connect = HCI_AUTO_CONN_DISABLED;
3530
3531                 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3532                                            sec_level, HCI_LE_CONN_TIMEOUT,
3533                                            CONN_REASON_PAIR_DEVICE);
3534         }
3535
3536         if (IS_ERR(conn)) {
3537                 int status;
3538
3539                 if (PTR_ERR(conn) == -EBUSY)
3540                         status = MGMT_STATUS_BUSY;
3541                 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3542                         status = MGMT_STATUS_NOT_SUPPORTED;
3543                 else if (PTR_ERR(conn) == -ECONNREFUSED)
3544                         status = MGMT_STATUS_REJECTED;
3545                 else
3546                         status = MGMT_STATUS_CONNECT_FAILED;
3547
3548                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3549                                         status, &rp, sizeof(rp));
3550                 goto unlock;
3551         }
3552
3553         if (conn->connect_cfm_cb) {
3554                 hci_conn_drop(conn);
3555                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3556                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3557                 goto unlock;
3558         }
3559
3560         cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3561         if (!cmd) {
3562                 err = -ENOMEM;
3563                 hci_conn_drop(conn);
3564                 goto unlock;
3565         }
3566
3567         cmd->cmd_complete = pairing_complete;
3568
3569         /* For LE, just connecting isn't a proof that the pairing finished */
3570         if (cp->addr.type == BDADDR_BREDR) {
3571                 conn->connect_cfm_cb = pairing_complete_cb;
3572                 conn->security_cfm_cb = pairing_complete_cb;
3573                 conn->disconn_cfm_cb = pairing_complete_cb;
3574         } else {
3575                 conn->connect_cfm_cb = le_pairing_complete_cb;
3576                 conn->security_cfm_cb = le_pairing_complete_cb;
3577                 conn->disconn_cfm_cb = le_pairing_complete_cb;
3578         }
3579
3580         conn->io_capability = cp->io_cap;
3581         cmd->user_data = hci_conn_get(conn);
3582
3583         if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3584             hci_conn_security(conn, sec_level, auth_type, true)) {
3585                 cmd->cmd_complete(cmd, 0);
3586                 mgmt_pending_remove(cmd);
3587         }
3588
3589         err = 0;
3590
3591 unlock:
3592         hci_dev_unlock(hdev);
3593         return err;
3594 }
3595
3596 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3597                               u16 len)
3598 {
3599         struct mgmt_addr_info *addr = data;
3600         struct mgmt_pending_cmd *cmd;
3601         struct hci_conn *conn;
3602         int err;
3603
3604         bt_dev_dbg(hdev, "sock %p", sk);
3605
3606         hci_dev_lock(hdev);
3607
3608         if (!hdev_is_powered(hdev)) {
3609                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3610                                       MGMT_STATUS_NOT_POWERED);
3611                 goto unlock;
3612         }
3613
3614         cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3615         if (!cmd) {
3616                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3617                                       MGMT_STATUS_INVALID_PARAMS);
3618                 goto unlock;
3619         }
3620
3621         conn = cmd->user_data;
3622
3623         if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3624                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3625                                       MGMT_STATUS_INVALID_PARAMS);
3626                 goto unlock;
3627         }
3628
3629         cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3630         mgmt_pending_remove(cmd);
3631
3632         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3633                                 addr, sizeof(*addr));
3634
3635         /* Since user doesn't want to proceed with the connection, abort any
3636          * ongoing pairing and then terminate the link if it was created
3637          * because of the pair device action.
3638          */
3639         if (addr->type == BDADDR_BREDR)
3640                 hci_remove_link_key(hdev, &addr->bdaddr);
3641         else
3642                 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3643                                               le_addr_type(addr->type));
3644
3645         if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3646                 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3647
3648 unlock:
3649         hci_dev_unlock(hdev);
3650         return err;
3651 }
3652
3653 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3654                              struct mgmt_addr_info *addr, u16 mgmt_op,
3655                              u16 hci_op, __le32 passkey)
3656 {
3657         struct mgmt_pending_cmd *cmd;
3658         struct hci_conn *conn;
3659         int err;
3660
3661         hci_dev_lock(hdev);
3662
3663         if (!hdev_is_powered(hdev)) {
3664                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3665                                         MGMT_STATUS_NOT_POWERED, addr,
3666                                         sizeof(*addr));
3667                 goto done;
3668         }
3669
3670         if (addr->type == BDADDR_BREDR)
3671                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3672         else
3673                 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3674                                                le_addr_type(addr->type));
3675
3676         if (!conn) {
3677                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3678                                         MGMT_STATUS_NOT_CONNECTED, addr,
3679                                         sizeof(*addr));
3680                 goto done;
3681         }
3682
3683         if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3684                 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3685                 if (!err)
3686                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3687                                                 MGMT_STATUS_SUCCESS, addr,
3688                                                 sizeof(*addr));
3689                 else
3690                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3691                                                 MGMT_STATUS_FAILED, addr,
3692                                                 sizeof(*addr));
3693
3694                 goto done;
3695         }
3696
3697         cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3698         if (!cmd) {
3699                 err = -ENOMEM;
3700                 goto done;
3701         }
3702
3703         cmd->cmd_complete = addr_cmd_complete;
3704
3705         /* Continue with pairing via HCI */
3706         if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3707                 struct hci_cp_user_passkey_reply cp;
3708
3709                 bacpy(&cp.bdaddr, &addr->bdaddr);
3710                 cp.passkey = passkey;
3711                 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3712         } else
3713                 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3714                                    &addr->bdaddr);
3715
3716         if (err < 0)
3717                 mgmt_pending_remove(cmd);
3718
3719 done:
3720         hci_dev_unlock(hdev);
3721         return err;
3722 }
3723
3724 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3725                               void *data, u16 len)
3726 {
3727         struct mgmt_cp_pin_code_neg_reply *cp = data;
3728
3729         bt_dev_dbg(hdev, "sock %p", sk);
3730
3731         return user_pairing_resp(sk, hdev, &cp->addr,
3732                                 MGMT_OP_PIN_CODE_NEG_REPLY,
3733                                 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3734 }
3735
3736 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3737                               u16 len)
3738 {
3739         struct mgmt_cp_user_confirm_reply *cp = data;
3740
3741         bt_dev_dbg(hdev, "sock %p", sk);
3742
3743         if (len != sizeof(*cp))
3744                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3745                                        MGMT_STATUS_INVALID_PARAMS);
3746
3747         return user_pairing_resp(sk, hdev, &cp->addr,
3748                                  MGMT_OP_USER_CONFIRM_REPLY,
3749                                  HCI_OP_USER_CONFIRM_REPLY, 0);
3750 }
3751
3752 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3753                                   void *data, u16 len)
3754 {
3755         struct mgmt_cp_user_confirm_neg_reply *cp = data;
3756
3757         bt_dev_dbg(hdev, "sock %p", sk);
3758
3759         return user_pairing_resp(sk, hdev, &cp->addr,
3760                                  MGMT_OP_USER_CONFIRM_NEG_REPLY,
3761                                  HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3762 }
3763
3764 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3765                               u16 len)
3766 {
3767         struct mgmt_cp_user_passkey_reply *cp = data;
3768
3769         bt_dev_dbg(hdev, "sock %p", sk);
3770
3771         return user_pairing_resp(sk, hdev, &cp->addr,
3772                                  MGMT_OP_USER_PASSKEY_REPLY,
3773                                  HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3774 }
3775
3776 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3777                                   void *data, u16 len)
3778 {
3779         struct mgmt_cp_user_passkey_neg_reply *cp = data;
3780
3781         bt_dev_dbg(hdev, "sock %p", sk);
3782
3783         return user_pairing_resp(sk, hdev, &cp->addr,
3784                                  MGMT_OP_USER_PASSKEY_NEG_REPLY,
3785                                  HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3786 }
3787
3788 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3789 {
3790         struct adv_info *adv_instance;
3791
3792         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3793         if (!adv_instance)
3794                 return 0;
3795
3796         /* stop if current instance doesn't need to be changed */
3797         if (!(adv_instance->flags & flags))
3798                 return 0;
3799
3800         cancel_adv_timeout(hdev);
3801
3802         adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3803         if (!adv_instance)
3804                 return 0;
3805
3806         hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3807
3808         return 0;
3809 }
3810
3811 static int name_changed_sync(struct hci_dev *hdev, void *data)
3812 {
3813         return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3814 }
3815
3816 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3817 {
3818         struct mgmt_pending_cmd *cmd = data;
3819         struct mgmt_cp_set_local_name *cp = cmd->param;
3820         u8 status = mgmt_status(err);
3821
3822         bt_dev_dbg(hdev, "err %d", err);
3823
3824         if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3825                 return;
3826
3827         if (status) {
3828                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3829                                 status);
3830         } else {
3831                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3832                                   cp, sizeof(*cp));
3833
3834                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3835                         hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3836         }
3837
3838         mgmt_pending_remove(cmd);
3839 }
3840
3841 static int set_name_sync(struct hci_dev *hdev, void *data)
3842 {
3843         if (lmp_bredr_capable(hdev)) {
3844                 hci_update_name_sync(hdev);
3845                 hci_update_eir_sync(hdev);
3846         }
3847
3848         /* The name is stored in the scan response data and so
3849          * no need to update the advertising data here.
3850          */
3851         if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3852                 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3853
3854         return 0;
3855 }
3856
3857 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3858                           u16 len)
3859 {
3860         struct mgmt_cp_set_local_name *cp = data;
3861         struct mgmt_pending_cmd *cmd;
3862         int err;
3863
3864         bt_dev_dbg(hdev, "sock %p", sk);
3865
3866         hci_dev_lock(hdev);
3867
3868         /* If the old values are the same as the new ones just return a
3869          * direct command complete event.
3870          */
3871         if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3872             !memcmp(hdev->short_name, cp->short_name,
3873                     sizeof(hdev->short_name))) {
3874                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3875                                         data, len);
3876                 goto failed;
3877         }
3878
3879         memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3880
3881         if (!hdev_is_powered(hdev)) {
3882                 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3883
3884                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3885                                         data, len);
3886                 if (err < 0)
3887                         goto failed;
3888
3889                 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3890                                          len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3891                 ext_info_changed(hdev, sk);
3892
3893                 goto failed;
3894         }
3895
3896         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3897         if (!cmd)
3898                 err = -ENOMEM;
3899         else
3900                 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3901                                          set_name_complete);
3902
3903         if (err < 0) {
3904                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3905                                       MGMT_STATUS_FAILED);
3906
3907                 if (cmd)
3908                         mgmt_pending_remove(cmd);
3909
3910                 goto failed;
3911         }
3912
3913         memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3914
3915 failed:
3916         hci_dev_unlock(hdev);
3917         return err;
3918 }
3919
3920 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3921 {
3922         return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3923 }
3924
3925 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3926                           u16 len)
3927 {
3928         struct mgmt_cp_set_appearance *cp = data;
3929         u16 appearance;
3930         int err;
3931
3932         bt_dev_dbg(hdev, "sock %p", sk);
3933
3934         if (!lmp_le_capable(hdev))
3935                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3936                                        MGMT_STATUS_NOT_SUPPORTED);
3937
3938         appearance = le16_to_cpu(cp->appearance);
3939
3940         hci_dev_lock(hdev);
3941
3942         if (hdev->appearance != appearance) {
3943                 hdev->appearance = appearance;
3944
3945                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3946                         hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3947                                            NULL);
3948
3949                 ext_info_changed(hdev, sk);
3950         }
3951
3952         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3953                                 0);
3954
3955         hci_dev_unlock(hdev);
3956
3957         return err;
3958 }
3959
3960 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3961                                  void *data, u16 len)
3962 {
3963         struct mgmt_rp_get_phy_configuration rp;
3964
3965         bt_dev_dbg(hdev, "sock %p", sk);
3966
3967         hci_dev_lock(hdev);
3968
3969         memset(&rp, 0, sizeof(rp));
3970
3971         rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3972         rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3973         rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3974
3975         hci_dev_unlock(hdev);
3976
3977         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3978                                  &rp, sizeof(rp));
3979 }
3980
3981 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3982 {
3983         struct mgmt_ev_phy_configuration_changed ev;
3984
3985         memset(&ev, 0, sizeof(ev));
3986
3987         ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3988
3989         return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3990                           sizeof(ev), skip);
3991 }
3992
3993 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3994 {
3995         struct mgmt_pending_cmd *cmd = data;
3996         struct sk_buff *skb = cmd->skb;
3997         u8 status = mgmt_status(err);
3998
3999         if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4000                 return;
4001
4002         if (!status) {
4003                 if (!skb)
4004                         status = MGMT_STATUS_FAILED;
4005                 else if (IS_ERR(skb))
4006                         status = mgmt_status(PTR_ERR(skb));
4007                 else
4008                         status = mgmt_status(skb->data[0]);
4009         }
4010
4011         bt_dev_dbg(hdev, "status %d", status);
4012
4013         if (status) {
4014                 mgmt_cmd_status(cmd->sk, hdev->id,
4015                                 MGMT_OP_SET_PHY_CONFIGURATION, status);
4016         } else {
4017                 mgmt_cmd_complete(cmd->sk, hdev->id,
4018                                   MGMT_OP_SET_PHY_CONFIGURATION, 0,
4019                                   NULL, 0);
4020
4021                 mgmt_phy_configuration_changed(hdev, cmd->sk);
4022         }
4023
4024         if (skb && !IS_ERR(skb))
4025                 kfree_skb(skb);
4026
4027         mgmt_pending_remove(cmd);
4028 }
4029
4030 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4031 {
4032         struct mgmt_pending_cmd *cmd = data;
4033         struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4034         struct hci_cp_le_set_default_phy cp_phy;
4035         u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4036
4037         memset(&cp_phy, 0, sizeof(cp_phy));
4038
4039         if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4040                 cp_phy.all_phys |= 0x01;
4041
4042         if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4043                 cp_phy.all_phys |= 0x02;
4044
4045         if (selected_phys & MGMT_PHY_LE_1M_TX)
4046                 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4047
4048         if (selected_phys & MGMT_PHY_LE_2M_TX)
4049                 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4050
4051         if (selected_phys & MGMT_PHY_LE_CODED_TX)
4052                 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4053
4054         if (selected_phys & MGMT_PHY_LE_1M_RX)
4055                 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4056
4057         if (selected_phys & MGMT_PHY_LE_2M_RX)
4058                 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4059
4060         if (selected_phys & MGMT_PHY_LE_CODED_RX)
4061                 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4062
4063         cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4064                                    sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4065
4066         return 0;
4067 }
4068
4069 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4070                                  void *data, u16 len)
4071 {
4072         struct mgmt_cp_set_phy_configuration *cp = data;
4073         struct mgmt_pending_cmd *cmd;
4074         u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4075         u16 pkt_type = (HCI_DH1 | HCI_DM1);
4076         bool changed = false;
4077         int err;
4078
4079         bt_dev_dbg(hdev, "sock %p", sk);
4080
4081         configurable_phys = get_configurable_phys(hdev);
4082         supported_phys = get_supported_phys(hdev);
4083         selected_phys = __le32_to_cpu(cp->selected_phys);
4084
4085         if (selected_phys & ~supported_phys)
4086                 return mgmt_cmd_status(sk, hdev->id,
4087                                        MGMT_OP_SET_PHY_CONFIGURATION,
4088                                        MGMT_STATUS_INVALID_PARAMS);
4089
4090         unconfigure_phys = supported_phys & ~configurable_phys;
4091
4092         if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4093                 return mgmt_cmd_status(sk, hdev->id,
4094                                        MGMT_OP_SET_PHY_CONFIGURATION,
4095                                        MGMT_STATUS_INVALID_PARAMS);
4096
4097         if (selected_phys == get_selected_phys(hdev))
4098                 return mgmt_cmd_complete(sk, hdev->id,
4099                                          MGMT_OP_SET_PHY_CONFIGURATION,
4100                                          0, NULL, 0);
4101
4102         hci_dev_lock(hdev);
4103
4104         if (!hdev_is_powered(hdev)) {
4105                 err = mgmt_cmd_status(sk, hdev->id,
4106                                       MGMT_OP_SET_PHY_CONFIGURATION,
4107                                       MGMT_STATUS_REJECTED);
4108                 goto unlock;
4109         }
4110
4111         if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4112                 err = mgmt_cmd_status(sk, hdev->id,
4113                                       MGMT_OP_SET_PHY_CONFIGURATION,
4114                                       MGMT_STATUS_BUSY);
4115                 goto unlock;
4116         }
4117
4118         if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4119                 pkt_type |= (HCI_DH3 | HCI_DM3);
4120         else
4121                 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4122
4123         if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4124                 pkt_type |= (HCI_DH5 | HCI_DM5);
4125         else
4126                 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4127
4128         if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4129                 pkt_type &= ~HCI_2DH1;
4130         else
4131                 pkt_type |= HCI_2DH1;
4132
4133         if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4134                 pkt_type &= ~HCI_2DH3;
4135         else
4136                 pkt_type |= HCI_2DH3;
4137
4138         if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4139                 pkt_type &= ~HCI_2DH5;
4140         else
4141                 pkt_type |= HCI_2DH5;
4142
4143         if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4144                 pkt_type &= ~HCI_3DH1;
4145         else
4146                 pkt_type |= HCI_3DH1;
4147
4148         if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4149                 pkt_type &= ~HCI_3DH3;
4150         else
4151                 pkt_type |= HCI_3DH3;
4152
4153         if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4154                 pkt_type &= ~HCI_3DH5;
4155         else
4156                 pkt_type |= HCI_3DH5;
4157
4158         if (pkt_type != hdev->pkt_type) {
4159                 hdev->pkt_type = pkt_type;
4160                 changed = true;
4161         }
4162
4163         if ((selected_phys & MGMT_PHY_LE_MASK) ==
4164             (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4165                 if (changed)
4166                         mgmt_phy_configuration_changed(hdev, sk);
4167
4168                 err = mgmt_cmd_complete(sk, hdev->id,
4169                                         MGMT_OP_SET_PHY_CONFIGURATION,
4170                                         0, NULL, 0);
4171
4172                 goto unlock;
4173         }
4174
4175         cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4176                                len);
4177         if (!cmd)
4178                 err = -ENOMEM;
4179         else
4180                 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4181                                          set_default_phy_complete);
4182
4183         if (err < 0) {
4184                 err = mgmt_cmd_status(sk, hdev->id,
4185                                       MGMT_OP_SET_PHY_CONFIGURATION,
4186                                       MGMT_STATUS_FAILED);
4187
4188                 if (cmd)
4189                         mgmt_pending_remove(cmd);
4190         }
4191
4192 unlock:
4193         hci_dev_unlock(hdev);
4194
4195         return err;
4196 }
4197
4198 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4199                             u16 len)
4200 {
4201         int err = MGMT_STATUS_SUCCESS;
4202         struct mgmt_cp_set_blocked_keys *keys = data;
4203         const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4204                                    sizeof(struct mgmt_blocked_key_info));
4205         u16 key_count, expected_len;
4206         int i;
4207
4208         bt_dev_dbg(hdev, "sock %p", sk);
4209
4210         key_count = __le16_to_cpu(keys->key_count);
4211         if (key_count > max_key_count) {
4212                 bt_dev_err(hdev, "too big key_count value %u", key_count);
4213                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4214                                        MGMT_STATUS_INVALID_PARAMS);
4215         }
4216
4217         expected_len = struct_size(keys, keys, key_count);
4218         if (expected_len != len) {
4219                 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4220                            expected_len, len);
4221                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4222                                        MGMT_STATUS_INVALID_PARAMS);
4223         }
4224
4225         hci_dev_lock(hdev);
4226
4227         hci_blocked_keys_clear(hdev);
4228
4229         for (i = 0; i < key_count; ++i) {
4230                 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4231
4232                 if (!b) {
4233                         err = MGMT_STATUS_NO_RESOURCES;
4234                         break;
4235                 }
4236
4237                 b->type = keys->keys[i].type;
4238                 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4239                 list_add_rcu(&b->list, &hdev->blocked_keys);
4240         }
4241         hci_dev_unlock(hdev);
4242
4243         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4244                                 err, NULL, 0);
4245 }
4246
4247 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4248                                void *data, u16 len)
4249 {
4250         struct mgmt_mode *cp = data;
4251         int err;
4252         bool changed = false;
4253
4254         bt_dev_dbg(hdev, "sock %p", sk);
4255
4256         if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4257                 return mgmt_cmd_status(sk, hdev->id,
4258                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4259                                        MGMT_STATUS_NOT_SUPPORTED);
4260
4261         if (cp->val != 0x00 && cp->val != 0x01)
4262                 return mgmt_cmd_status(sk, hdev->id,
4263                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4264                                        MGMT_STATUS_INVALID_PARAMS);
4265
4266         hci_dev_lock(hdev);
4267
4268         if (hdev_is_powered(hdev) &&
4269             !!cp->val != hci_dev_test_flag(hdev,
4270                                            HCI_WIDEBAND_SPEECH_ENABLED)) {
4271                 err = mgmt_cmd_status(sk, hdev->id,
4272                                       MGMT_OP_SET_WIDEBAND_SPEECH,
4273                                       MGMT_STATUS_REJECTED);
4274                 goto unlock;
4275         }
4276
4277         if (cp->val)
4278                 changed = !hci_dev_test_and_set_flag(hdev,
4279                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4280         else
4281                 changed = hci_dev_test_and_clear_flag(hdev,
4282                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4283
4284         err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4285         if (err < 0)
4286                 goto unlock;
4287
4288         if (changed)
4289                 err = new_settings(hdev, sk);
4290
4291 unlock:
4292         hci_dev_unlock(hdev);
4293         return err;
4294 }
4295
4296 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4297                                void *data, u16 data_len)
4298 {
4299         char buf[20];
4300         struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4301         u16 cap_len = 0;
4302         u8 flags = 0;
4303         u8 tx_power_range[2];
4304
4305         bt_dev_dbg(hdev, "sock %p", sk);
4306
4307         memset(&buf, 0, sizeof(buf));
4308
4309         hci_dev_lock(hdev);
4310
4311         /* When the Read Simple Pairing Options command is supported, then
4312          * the remote public key validation is supported.
4313          *
4314          * Alternatively, when Microsoft extensions are available, they can
4315          * indicate support for public key validation as well.
4316          */
4317         if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4318                 flags |= 0x01;  /* Remote public key validation (BR/EDR) */
4319
4320         flags |= 0x02;          /* Remote public key validation (LE) */
4321
4322         /* When the Read Encryption Key Size command is supported, then the
4323          * encryption key size is enforced.
4324          */
4325         if (hdev->commands[20] & 0x10)
4326                 flags |= 0x04;  /* Encryption key size enforcement (BR/EDR) */
4327
4328         flags |= 0x08;          /* Encryption key size enforcement (LE) */
4329
4330         cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4331                                   &flags, 1);
4332
4333         /* When the Read Simple Pairing Options command is supported, then
4334          * also max encryption key size information is provided.
4335          */
4336         if (hdev->commands[41] & 0x08)
4337                 cap_len = eir_append_le16(rp->cap, cap_len,
4338                                           MGMT_CAP_MAX_ENC_KEY_SIZE,
4339                                           hdev->max_enc_key_size);
4340
4341         cap_len = eir_append_le16(rp->cap, cap_len,
4342                                   MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4343                                   SMP_MAX_ENC_KEY_SIZE);
4344
4345         /* Append the min/max LE tx power parameters if we were able to fetch
4346          * it from the controller
4347          */
4348         if (hdev->commands[38] & 0x80) {
4349                 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4350                 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4351                 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4352                                           tx_power_range, 2);
4353         }
4354
4355         rp->cap_len = cpu_to_le16(cap_len);
4356
4357         hci_dev_unlock(hdev);
4358
4359         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4360                                  rp, sizeof(*rp) + cap_len);
4361 }
4362
4363 #ifdef CONFIG_BT_FEATURE_DEBUG
4364 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4365 static const u8 debug_uuid[16] = {
4366         0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4367         0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4368 };
4369 #endif
4370
4371 /* 330859bc-7506-492d-9370-9a6f0614037f */
4372 static const u8 quality_report_uuid[16] = {
4373         0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4374         0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4375 };
4376
4377 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4378 static const u8 offload_codecs_uuid[16] = {
4379         0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4380         0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4381 };
4382
4383 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4384 static const u8 le_simultaneous_roles_uuid[16] = {
4385         0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4386         0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4387 };
4388
4389 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4390 static const u8 rpa_resolution_uuid[16] = {
4391         0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4392         0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4393 };
4394
4395 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4396 static const u8 iso_socket_uuid[16] = {
4397         0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4398         0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4399 };
4400
4401 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4402 static const u8 mgmt_mesh_uuid[16] = {
4403         0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4404         0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4405 };
4406
4407 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4408                                   void *data, u16 data_len)
4409 {
4410         struct mgmt_rp_read_exp_features_info *rp;
4411         size_t len;
4412         u16 idx = 0;
4413         u32 flags;
4414         int status;
4415
4416         bt_dev_dbg(hdev, "sock %p", sk);
4417
4418         /* Enough space for 7 features */
4419         len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4420         rp = kzalloc(len, GFP_KERNEL);
4421         if (!rp)
4422                 return -ENOMEM;
4423
4424 #ifdef CONFIG_BT_FEATURE_DEBUG
4425         if (!hdev) {
4426                 flags = bt_dbg_get() ? BIT(0) : 0;
4427
4428                 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4429                 rp->features[idx].flags = cpu_to_le32(flags);
4430                 idx++;
4431         }
4432 #endif
4433
4434         if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4435                 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4436                         flags = BIT(0);
4437                 else
4438                         flags = 0;
4439
4440                 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4441                 rp->features[idx].flags = cpu_to_le32(flags);
4442                 idx++;
4443         }
4444
4445         if (hdev && ll_privacy_capable(hdev)) {
4446                 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4447                         flags = BIT(0) | BIT(1);
4448                 else
4449                         flags = BIT(1);
4450
4451                 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4452                 rp->features[idx].flags = cpu_to_le32(flags);
4453                 idx++;
4454         }
4455
4456         if (hdev && (aosp_has_quality_report(hdev) ||
4457                      hdev->set_quality_report)) {
4458                 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4459                         flags = BIT(0);
4460                 else
4461                         flags = 0;
4462
4463                 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4464                 rp->features[idx].flags = cpu_to_le32(flags);
4465                 idx++;
4466         }
4467
4468         if (hdev && hdev->get_data_path_id) {
4469                 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4470                         flags = BIT(0);
4471                 else
4472                         flags = 0;
4473
4474                 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4475                 rp->features[idx].flags = cpu_to_le32(flags);
4476                 idx++;
4477         }
4478
4479         if (IS_ENABLED(CONFIG_BT_LE)) {
4480                 flags = iso_enabled() ? BIT(0) : 0;
4481                 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4482                 rp->features[idx].flags = cpu_to_le32(flags);
4483                 idx++;
4484         }
4485
4486         if (hdev && lmp_le_capable(hdev)) {
4487                 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4488                         flags = BIT(0);
4489                 else
4490                         flags = 0;
4491
4492                 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4493                 rp->features[idx].flags = cpu_to_le32(flags);
4494                 idx++;
4495         }
4496
4497         rp->feature_count = cpu_to_le16(idx);
4498
4499         /* After reading the experimental features information, enable
4500          * the events to update client on any future change.
4501          */
4502         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4503
4504         status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4505                                    MGMT_OP_READ_EXP_FEATURES_INFO,
4506                                    0, rp, sizeof(*rp) + (20 * idx));
4507
4508         kfree(rp);
4509         return status;
4510 }
4511
4512 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4513                                           struct sock *skip)
4514 {
4515         struct mgmt_ev_exp_feature_changed ev;
4516
4517         memset(&ev, 0, sizeof(ev));
4518         memcpy(ev.uuid, rpa_resolution_uuid, 16);
4519         ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4520
4521         // Do we need to be atomic with the conn_flags?
4522         if (enabled && privacy_mode_capable(hdev))
4523                 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4524         else
4525                 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4526
4527         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4528                                   &ev, sizeof(ev),
4529                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4530
4531 }
4532
4533 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4534                                bool enabled, struct sock *skip)
4535 {
4536         struct mgmt_ev_exp_feature_changed ev;
4537
4538         memset(&ev, 0, sizeof(ev));
4539         memcpy(ev.uuid, uuid, 16);
4540         ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4541
4542         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4543                                   &ev, sizeof(ev),
4544                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4545 }
4546
4547 #define EXP_FEAT(_uuid, _set_func)      \
4548 {                                       \
4549         .uuid = _uuid,                  \
4550         .set_func = _set_func,          \
4551 }
4552
4553 /* The zero key uuid is special. Multiple exp features are set through it. */
4554 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4555                              struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4556 {
4557         struct mgmt_rp_set_exp_feature rp;
4558
4559         memset(rp.uuid, 0, 16);
4560         rp.flags = cpu_to_le32(0);
4561
4562 #ifdef CONFIG_BT_FEATURE_DEBUG
4563         if (!hdev) {
4564                 bool changed = bt_dbg_get();
4565
4566                 bt_dbg_set(false);
4567
4568                 if (changed)
4569                         exp_feature_changed(NULL, ZERO_KEY, false, sk);
4570         }
4571 #endif
4572
4573         if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4574                 bool changed;
4575
4576                 changed = hci_dev_test_and_clear_flag(hdev,
4577                                                       HCI_ENABLE_LL_PRIVACY);
4578                 if (changed)
4579                         exp_feature_changed(hdev, rpa_resolution_uuid, false,
4580                                             sk);
4581         }
4582
4583         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4584
4585         return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4586                                  MGMT_OP_SET_EXP_FEATURE, 0,
4587                                  &rp, sizeof(rp));
4588 }
4589
4590 #ifdef CONFIG_BT_FEATURE_DEBUG
4591 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4592                           struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4593 {
4594         struct mgmt_rp_set_exp_feature rp;
4595
4596         bool val, changed;
4597         int err;
4598
4599         /* Command requires to use the non-controller index */
4600         if (hdev)
4601                 return mgmt_cmd_status(sk, hdev->id,
4602                                        MGMT_OP_SET_EXP_FEATURE,
4603                                        MGMT_STATUS_INVALID_INDEX);
4604
4605         /* Parameters are limited to a single octet */
4606         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4607                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4608                                        MGMT_OP_SET_EXP_FEATURE,
4609                                        MGMT_STATUS_INVALID_PARAMS);
4610
4611         /* Only boolean on/off is supported */
4612         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4613                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4614                                        MGMT_OP_SET_EXP_FEATURE,
4615                                        MGMT_STATUS_INVALID_PARAMS);
4616
4617         val = !!cp->param[0];
4618         changed = val ? !bt_dbg_get() : bt_dbg_get();
4619         bt_dbg_set(val);
4620
4621         memcpy(rp.uuid, debug_uuid, 16);
4622         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4623
4624         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4625
4626         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4627                                 MGMT_OP_SET_EXP_FEATURE, 0,
4628                                 &rp, sizeof(rp));
4629
4630         if (changed)
4631                 exp_feature_changed(hdev, debug_uuid, val, sk);
4632
4633         return err;
4634 }
4635 #endif
4636
4637 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4638                               struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4639 {
4640         struct mgmt_rp_set_exp_feature rp;
4641         bool val, changed;
4642         int err;
4643
4644         /* Command requires to use the controller index */
4645         if (!hdev)
4646                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4647                                        MGMT_OP_SET_EXP_FEATURE,
4648                                        MGMT_STATUS_INVALID_INDEX);
4649
4650         /* Parameters are limited to a single octet */
4651         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4652                 return mgmt_cmd_status(sk, hdev->id,
4653                                        MGMT_OP_SET_EXP_FEATURE,
4654                                        MGMT_STATUS_INVALID_PARAMS);
4655
4656         /* Only boolean on/off is supported */
4657         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4658                 return mgmt_cmd_status(sk, hdev->id,
4659                                        MGMT_OP_SET_EXP_FEATURE,
4660                                        MGMT_STATUS_INVALID_PARAMS);
4661
4662         val = !!cp->param[0];
4663
4664         if (val) {
4665                 changed = !hci_dev_test_and_set_flag(hdev,
4666                                                      HCI_MESH_EXPERIMENTAL);
4667         } else {
4668                 hci_dev_clear_flag(hdev, HCI_MESH);
4669                 changed = hci_dev_test_and_clear_flag(hdev,
4670                                                       HCI_MESH_EXPERIMENTAL);
4671         }
4672
4673         memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4674         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4675
4676         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4677
4678         err = mgmt_cmd_complete(sk, hdev->id,
4679                                 MGMT_OP_SET_EXP_FEATURE, 0,
4680                                 &rp, sizeof(rp));
4681
4682         if (changed)
4683                 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4684
4685         return err;
4686 }
4687
4688 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4689                                    struct mgmt_cp_set_exp_feature *cp,
4690                                    u16 data_len)
4691 {
4692         struct mgmt_rp_set_exp_feature rp;
4693         bool val, changed;
4694         int err;
4695         u32 flags;
4696
4697         /* Command requires to use the controller index */
4698         if (!hdev)
4699                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4700                                        MGMT_OP_SET_EXP_FEATURE,
4701                                        MGMT_STATUS_INVALID_INDEX);
4702
4703         /* Changes can only be made when controller is powered down */
4704         if (hdev_is_powered(hdev))
4705                 return mgmt_cmd_status(sk, hdev->id,
4706                                        MGMT_OP_SET_EXP_FEATURE,
4707                                        MGMT_STATUS_REJECTED);
4708
4709         /* Parameters are limited to a single octet */
4710         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4711                 return mgmt_cmd_status(sk, hdev->id,
4712                                        MGMT_OP_SET_EXP_FEATURE,
4713                                        MGMT_STATUS_INVALID_PARAMS);
4714
4715         /* Only boolean on/off is supported */
4716         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4717                 return mgmt_cmd_status(sk, hdev->id,
4718                                        MGMT_OP_SET_EXP_FEATURE,
4719                                        MGMT_STATUS_INVALID_PARAMS);
4720
4721         val = !!cp->param[0];
4722
4723         if (val) {
4724                 changed = !hci_dev_test_and_set_flag(hdev,
4725                                                      HCI_ENABLE_LL_PRIVACY);
4726                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4727
4728                 /* Enable LL privacy + supported settings changed */
4729                 flags = BIT(0) | BIT(1);
4730         } else {
4731                 changed = hci_dev_test_and_clear_flag(hdev,
4732                                                       HCI_ENABLE_LL_PRIVACY);
4733
4734                 /* Disable LL privacy + supported settings changed */
4735                 flags = BIT(1);
4736         }
4737
4738         memcpy(rp.uuid, rpa_resolution_uuid, 16);
4739         rp.flags = cpu_to_le32(flags);
4740
4741         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4742
4743         err = mgmt_cmd_complete(sk, hdev->id,
4744                                 MGMT_OP_SET_EXP_FEATURE, 0,
4745                                 &rp, sizeof(rp));
4746
4747         if (changed)
4748                 exp_ll_privacy_feature_changed(val, hdev, sk);
4749
4750         return err;
4751 }
4752
4753 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4754                                    struct mgmt_cp_set_exp_feature *cp,
4755                                    u16 data_len)
4756 {
4757         struct mgmt_rp_set_exp_feature rp;
4758         bool val, changed;
4759         int err;
4760
4761         /* Command requires to use a valid controller index */
4762         if (!hdev)
4763                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4764                                        MGMT_OP_SET_EXP_FEATURE,
4765                                        MGMT_STATUS_INVALID_INDEX);
4766
4767         /* Parameters are limited to a single octet */
4768         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4769                 return mgmt_cmd_status(sk, hdev->id,
4770                                        MGMT_OP_SET_EXP_FEATURE,
4771                                        MGMT_STATUS_INVALID_PARAMS);
4772
4773         /* Only boolean on/off is supported */
4774         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4775                 return mgmt_cmd_status(sk, hdev->id,
4776                                        MGMT_OP_SET_EXP_FEATURE,
4777                                        MGMT_STATUS_INVALID_PARAMS);
4778
4779         hci_req_sync_lock(hdev);
4780
4781         val = !!cp->param[0];
4782         changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4783
4784         if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4785                 err = mgmt_cmd_status(sk, hdev->id,
4786                                       MGMT_OP_SET_EXP_FEATURE,
4787                                       MGMT_STATUS_NOT_SUPPORTED);
4788                 goto unlock_quality_report;
4789         }
4790
4791         if (changed) {
4792                 if (hdev->set_quality_report)
4793                         err = hdev->set_quality_report(hdev, val);
4794                 else
4795                         err = aosp_set_quality_report(hdev, val);
4796
4797                 if (err) {
4798                         err = mgmt_cmd_status(sk, hdev->id,
4799                                               MGMT_OP_SET_EXP_FEATURE,
4800                                               MGMT_STATUS_FAILED);
4801                         goto unlock_quality_report;
4802                 }
4803
4804                 if (val)
4805                         hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4806                 else
4807                         hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4808         }
4809
4810         bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4811
4812         memcpy(rp.uuid, quality_report_uuid, 16);
4813         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4814         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4815
4816         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4817                                 &rp, sizeof(rp));
4818
4819         if (changed)
4820                 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4821
4822 unlock_quality_report:
4823         hci_req_sync_unlock(hdev);
4824         return err;
4825 }
4826
4827 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4828                                   struct mgmt_cp_set_exp_feature *cp,
4829                                   u16 data_len)
4830 {
4831         bool val, changed;
4832         int err;
4833         struct mgmt_rp_set_exp_feature rp;
4834
4835         /* Command requires to use a valid controller index */
4836         if (!hdev)
4837                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4838                                        MGMT_OP_SET_EXP_FEATURE,
4839                                        MGMT_STATUS_INVALID_INDEX);
4840
4841         /* Parameters are limited to a single octet */
4842         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4843                 return mgmt_cmd_status(sk, hdev->id,
4844                                        MGMT_OP_SET_EXP_FEATURE,
4845                                        MGMT_STATUS_INVALID_PARAMS);
4846
4847         /* Only boolean on/off is supported */
4848         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4849                 return mgmt_cmd_status(sk, hdev->id,
4850                                        MGMT_OP_SET_EXP_FEATURE,
4851                                        MGMT_STATUS_INVALID_PARAMS);
4852
4853         val = !!cp->param[0];
4854         changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4855
4856         if (!hdev->get_data_path_id) {
4857                 return mgmt_cmd_status(sk, hdev->id,
4858                                        MGMT_OP_SET_EXP_FEATURE,
4859                                        MGMT_STATUS_NOT_SUPPORTED);
4860         }
4861
4862         if (changed) {
4863                 if (val)
4864                         hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4865                 else
4866                         hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4867         }
4868
4869         bt_dev_info(hdev, "offload codecs enable %d changed %d",
4870                     val, changed);
4871
4872         memcpy(rp.uuid, offload_codecs_uuid, 16);
4873         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4874         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4875         err = mgmt_cmd_complete(sk, hdev->id,
4876                                 MGMT_OP_SET_EXP_FEATURE, 0,
4877                                 &rp, sizeof(rp));
4878
4879         if (changed)
4880                 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4881
4882         return err;
4883 }
4884
4885 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4886                                           struct mgmt_cp_set_exp_feature *cp,
4887                                           u16 data_len)
4888 {
4889         bool val, changed;
4890         int err;
4891         struct mgmt_rp_set_exp_feature rp;
4892
4893         /* Command requires to use a valid controller index */
4894         if (!hdev)
4895                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4896                                        MGMT_OP_SET_EXP_FEATURE,
4897                                        MGMT_STATUS_INVALID_INDEX);
4898
4899         /* Parameters are limited to a single octet */
4900         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4901                 return mgmt_cmd_status(sk, hdev->id,
4902                                        MGMT_OP_SET_EXP_FEATURE,
4903                                        MGMT_STATUS_INVALID_PARAMS);
4904
4905         /* Only boolean on/off is supported */
4906         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4907                 return mgmt_cmd_status(sk, hdev->id,
4908                                        MGMT_OP_SET_EXP_FEATURE,
4909                                        MGMT_STATUS_INVALID_PARAMS);
4910
4911         val = !!cp->param[0];
4912         changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4913
4914         if (!hci_dev_le_state_simultaneous(hdev)) {
4915                 return mgmt_cmd_status(sk, hdev->id,
4916                                        MGMT_OP_SET_EXP_FEATURE,
4917                                        MGMT_STATUS_NOT_SUPPORTED);
4918         }
4919
4920         if (changed) {
4921                 if (val)
4922                         hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4923                 else
4924                         hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4925         }
4926
4927         bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4928                     val, changed);
4929
4930         memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4931         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4932         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4933         err = mgmt_cmd_complete(sk, hdev->id,
4934                                 MGMT_OP_SET_EXP_FEATURE, 0,
4935                                 &rp, sizeof(rp));
4936
4937         if (changed)
4938                 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4939
4940         return err;
4941 }
4942
4943 #ifdef CONFIG_BT_LE
4944 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4945                                struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4946 {
4947         struct mgmt_rp_set_exp_feature rp;
4948         bool val, changed = false;
4949         int err;
4950
4951         /* Command requires to use the non-controller index */
4952         if (hdev)
4953                 return mgmt_cmd_status(sk, hdev->id,
4954                                        MGMT_OP_SET_EXP_FEATURE,
4955                                        MGMT_STATUS_INVALID_INDEX);
4956
4957         /* Parameters are limited to a single octet */
4958         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4959                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4960                                        MGMT_OP_SET_EXP_FEATURE,
4961                                        MGMT_STATUS_INVALID_PARAMS);
4962
4963         /* Only boolean on/off is supported */
4964         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4965                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4966                                        MGMT_OP_SET_EXP_FEATURE,
4967                                        MGMT_STATUS_INVALID_PARAMS);
4968
4969         val = cp->param[0] ? true : false;
4970         if (val)
4971                 err = iso_init();
4972         else
4973                 err = iso_exit();
4974
4975         if (!err)
4976                 changed = true;
4977
4978         memcpy(rp.uuid, iso_socket_uuid, 16);
4979         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4980
4981         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4982
4983         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4984                                 MGMT_OP_SET_EXP_FEATURE, 0,
4985                                 &rp, sizeof(rp));
4986
4987         if (changed)
4988                 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4989
4990         return err;
4991 }
4992 #endif
4993
4994 static const struct mgmt_exp_feature {
4995         const u8 *uuid;
4996         int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4997                         struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4998 } exp_features[] = {
4999         EXP_FEAT(ZERO_KEY, set_zero_key_func),
5000 #ifdef CONFIG_BT_FEATURE_DEBUG
5001         EXP_FEAT(debug_uuid, set_debug_func),
5002 #endif
5003         EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5004         EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5005         EXP_FEAT(quality_report_uuid, set_quality_report_func),
5006         EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5007         EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5008 #ifdef CONFIG_BT_LE
5009         EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5010 #endif
5011
5012         /* end with a null feature */
5013         EXP_FEAT(NULL, NULL)
5014 };
5015
5016 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5017                            void *data, u16 data_len)
5018 {
5019         struct mgmt_cp_set_exp_feature *cp = data;
5020         size_t i = 0;
5021
5022         bt_dev_dbg(hdev, "sock %p", sk);
5023
5024         for (i = 0; exp_features[i].uuid; i++) {
5025                 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5026                         return exp_features[i].set_func(sk, hdev, cp, data_len);
5027         }
5028
5029         return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5030                                MGMT_OP_SET_EXP_FEATURE,
5031                                MGMT_STATUS_NOT_SUPPORTED);
5032 }
5033
5034 static u32 get_params_flags(struct hci_dev *hdev,
5035                             struct hci_conn_params *params)
5036 {
5037         u32 flags = hdev->conn_flags;
5038
5039         /* Devices using RPAs can only be programmed in the acceptlist if
5040          * LL Privacy has been enable otherwise they cannot mark
5041          * HCI_CONN_FLAG_REMOTE_WAKEUP.
5042          */
5043         if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5044             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5045                 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5046
5047         return flags;
5048 }
5049
5050 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5051                             u16 data_len)
5052 {
5053         struct mgmt_cp_get_device_flags *cp = data;
5054         struct mgmt_rp_get_device_flags rp;
5055         struct bdaddr_list_with_flags *br_params;
5056         struct hci_conn_params *params;
5057         u32 supported_flags;
5058         u32 current_flags = 0;
5059         u8 status = MGMT_STATUS_INVALID_PARAMS;
5060
5061         bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5062                    &cp->addr.bdaddr, cp->addr.type);
5063
5064         hci_dev_lock(hdev);
5065
5066         supported_flags = hdev->conn_flags;
5067
5068         memset(&rp, 0, sizeof(rp));
5069
5070         if (cp->addr.type == BDADDR_BREDR) {
5071                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5072                                                               &cp->addr.bdaddr,
5073                                                               cp->addr.type);
5074                 if (!br_params)
5075                         goto done;
5076
5077                 current_flags = br_params->flags;
5078         } else {
5079                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5080                                                 le_addr_type(cp->addr.type));
5081                 if (!params)
5082                         goto done;
5083
5084                 supported_flags = get_params_flags(hdev, params);
5085                 current_flags = params->flags;
5086         }
5087
5088         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5089         rp.addr.type = cp->addr.type;
5090         rp.supported_flags = cpu_to_le32(supported_flags);
5091         rp.current_flags = cpu_to_le32(current_flags);
5092
5093         status = MGMT_STATUS_SUCCESS;
5094
5095 done:
5096         hci_dev_unlock(hdev);
5097
5098         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5099                                 &rp, sizeof(rp));
5100 }
5101
5102 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5103                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5104                                  u32 supported_flags, u32 current_flags)
5105 {
5106         struct mgmt_ev_device_flags_changed ev;
5107
5108         bacpy(&ev.addr.bdaddr, bdaddr);
5109         ev.addr.type = bdaddr_type;
5110         ev.supported_flags = cpu_to_le32(supported_flags);
5111         ev.current_flags = cpu_to_le32(current_flags);
5112
5113         mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5114 }
5115
5116 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5117                             u16 len)
5118 {
5119         struct mgmt_cp_set_device_flags *cp = data;
5120         struct bdaddr_list_with_flags *br_params;
5121         struct hci_conn_params *params;
5122         u8 status = MGMT_STATUS_INVALID_PARAMS;
5123         u32 supported_flags;
5124         u32 current_flags = __le32_to_cpu(cp->current_flags);
5125
5126         bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5127                    &cp->addr.bdaddr, cp->addr.type, current_flags);
5128
5129         // We should take hci_dev_lock() early, I think.. conn_flags can change
5130         supported_flags = hdev->conn_flags;
5131
5132         if ((supported_flags | current_flags) != supported_flags) {
5133                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5134                             current_flags, supported_flags);
5135                 goto done;
5136         }
5137
5138         hci_dev_lock(hdev);
5139
5140         if (cp->addr.type == BDADDR_BREDR) {
5141                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5142                                                               &cp->addr.bdaddr,
5143                                                               cp->addr.type);
5144
5145                 if (br_params) {
5146                         br_params->flags = current_flags;
5147                         status = MGMT_STATUS_SUCCESS;
5148                 } else {
5149                         bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5150                                     &cp->addr.bdaddr, cp->addr.type);
5151                 }
5152
5153                 goto unlock;
5154         }
5155
5156         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5157                                         le_addr_type(cp->addr.type));
5158         if (!params) {
5159                 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5160                             &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5161                 goto unlock;
5162         }
5163
5164         supported_flags = get_params_flags(hdev, params);
5165
5166         if ((supported_flags | current_flags) != supported_flags) {
5167                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5168                             current_flags, supported_flags);
5169                 goto unlock;
5170         }
5171
5172         WRITE_ONCE(params->flags, current_flags);
5173         status = MGMT_STATUS_SUCCESS;
5174
5175         /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5176          * has been set.
5177          */
5178         if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5179                 hci_update_passive_scan(hdev);
5180
5181 unlock:
5182         hci_dev_unlock(hdev);
5183
5184 done:
5185         if (status == MGMT_STATUS_SUCCESS)
5186                 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5187                                      supported_flags, current_flags);
5188
5189         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5190                                  &cp->addr, sizeof(cp->addr));
5191 }
5192
5193 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5194                                    u16 handle)
5195 {
5196         struct mgmt_ev_adv_monitor_added ev;
5197
5198         ev.monitor_handle = cpu_to_le16(handle);
5199
5200         mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5201 }
5202
5203 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5204 {
5205         struct mgmt_ev_adv_monitor_removed ev;
5206         struct mgmt_pending_cmd *cmd;
5207         struct sock *sk_skip = NULL;
5208         struct mgmt_cp_remove_adv_monitor *cp;
5209
5210         cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5211         if (cmd) {
5212                 cp = cmd->param;
5213
5214                 if (cp->monitor_handle)
5215                         sk_skip = cmd->sk;
5216         }
5217
5218         ev.monitor_handle = cpu_to_le16(handle);
5219
5220         mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5221 }
5222
5223 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5224                                  void *data, u16 len)
5225 {
5226         struct adv_monitor *monitor = NULL;
5227         struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5228         int handle, err;
5229         size_t rp_size = 0;
5230         __u32 supported = 0;
5231         __u32 enabled = 0;
5232         __u16 num_handles = 0;
5233         __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5234
5235         BT_DBG("request for %s", hdev->name);
5236
5237         hci_dev_lock(hdev);
5238
5239         if (msft_monitor_supported(hdev))
5240                 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5241
5242         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5243                 handles[num_handles++] = monitor->handle;
5244
5245         hci_dev_unlock(hdev);
5246
5247         rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5248         rp = kmalloc(rp_size, GFP_KERNEL);
5249         if (!rp)
5250                 return -ENOMEM;
5251
5252         /* All supported features are currently enabled */
5253         enabled = supported;
5254
5255         rp->supported_features = cpu_to_le32(supported);
5256         rp->enabled_features = cpu_to_le32(enabled);
5257         rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5258         rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5259         rp->num_handles = cpu_to_le16(num_handles);
5260         if (num_handles)
5261                 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5262
5263         err = mgmt_cmd_complete(sk, hdev->id,
5264                                 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5265                                 MGMT_STATUS_SUCCESS, rp, rp_size);
5266
5267         kfree(rp);
5268
5269         return err;
5270 }
5271
5272 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5273                                                    void *data, int status)
5274 {
5275         struct mgmt_rp_add_adv_patterns_monitor rp;
5276         struct mgmt_pending_cmd *cmd = data;
5277         struct adv_monitor *monitor = cmd->user_data;
5278
5279         hci_dev_lock(hdev);
5280
5281         rp.monitor_handle = cpu_to_le16(monitor->handle);
5282
5283         if (!status) {
5284                 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5285                 hdev->adv_monitors_cnt++;
5286                 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5287                         monitor->state = ADV_MONITOR_STATE_REGISTERED;
5288                 hci_update_passive_scan(hdev);
5289         }
5290
5291         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5292                           mgmt_status(status), &rp, sizeof(rp));
5293         mgmt_pending_remove(cmd);
5294
5295         hci_dev_unlock(hdev);
5296         bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5297                    rp.monitor_handle, status);
5298 }
5299
5300 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5301 {
5302         struct mgmt_pending_cmd *cmd = data;
5303         struct adv_monitor *monitor = cmd->user_data;
5304
5305         return hci_add_adv_monitor(hdev, monitor);
5306 }
5307
5308 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5309                                       struct adv_monitor *m, u8 status,
5310                                       void *data, u16 len, u16 op)
5311 {
5312         struct mgmt_pending_cmd *cmd;
5313         int err;
5314
5315         hci_dev_lock(hdev);
5316
5317         if (status)
5318                 goto unlock;
5319
5320         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5321             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5322             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5323             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5324                 status = MGMT_STATUS_BUSY;
5325                 goto unlock;
5326         }
5327
5328         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5329         if (!cmd) {
5330                 status = MGMT_STATUS_NO_RESOURCES;
5331                 goto unlock;
5332         }
5333
5334         cmd->user_data = m;
5335         err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5336                                  mgmt_add_adv_patterns_monitor_complete);
5337         if (err) {
5338                 if (err == -ENOMEM)
5339                         status = MGMT_STATUS_NO_RESOURCES;
5340                 else
5341                         status = MGMT_STATUS_FAILED;
5342
5343                 goto unlock;
5344         }
5345
5346         hci_dev_unlock(hdev);
5347
5348         return 0;
5349
5350 unlock:
5351         hci_free_adv_monitor(hdev, m);
5352         hci_dev_unlock(hdev);
5353         return mgmt_cmd_status(sk, hdev->id, op, status);
5354 }
5355
5356 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5357                                    struct mgmt_adv_rssi_thresholds *rssi)
5358 {
5359         if (rssi) {
5360                 m->rssi.low_threshold = rssi->low_threshold;
5361                 m->rssi.low_threshold_timeout =
5362                     __le16_to_cpu(rssi->low_threshold_timeout);
5363                 m->rssi.high_threshold = rssi->high_threshold;
5364                 m->rssi.high_threshold_timeout =
5365                     __le16_to_cpu(rssi->high_threshold_timeout);
5366                 m->rssi.sampling_period = rssi->sampling_period;
5367         } else {
5368                 /* Default values. These numbers are the least constricting
5369                  * parameters for MSFT API to work, so it behaves as if there
5370                  * are no rssi parameter to consider. May need to be changed
5371                  * if other API are to be supported.
5372                  */
5373                 m->rssi.low_threshold = -127;
5374                 m->rssi.low_threshold_timeout = 60;
5375                 m->rssi.high_threshold = -127;
5376                 m->rssi.high_threshold_timeout = 0;
5377                 m->rssi.sampling_period = 0;
5378         }
5379 }
5380
5381 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5382                                     struct mgmt_adv_pattern *patterns)
5383 {
5384         u8 offset = 0, length = 0;
5385         struct adv_pattern *p = NULL;
5386         int i;
5387
5388         for (i = 0; i < pattern_count; i++) {
5389                 offset = patterns[i].offset;
5390                 length = patterns[i].length;
5391                 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5392                     length > HCI_MAX_EXT_AD_LENGTH ||
5393                     (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5394                         return MGMT_STATUS_INVALID_PARAMS;
5395
5396                 p = kmalloc(sizeof(*p), GFP_KERNEL);
5397                 if (!p)
5398                         return MGMT_STATUS_NO_RESOURCES;
5399
5400                 p->ad_type = patterns[i].ad_type;
5401                 p->offset = patterns[i].offset;
5402                 p->length = patterns[i].length;
5403                 memcpy(p->value, patterns[i].value, p->length);
5404
5405                 INIT_LIST_HEAD(&p->list);
5406                 list_add(&p->list, &m->patterns);
5407         }
5408
5409         return MGMT_STATUS_SUCCESS;
5410 }
5411
5412 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5413                                     void *data, u16 len)
5414 {
5415         struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5416         struct adv_monitor *m = NULL;
5417         u8 status = MGMT_STATUS_SUCCESS;
5418         size_t expected_size = sizeof(*cp);
5419
5420         BT_DBG("request for %s", hdev->name);
5421
5422         if (len <= sizeof(*cp)) {
5423                 status = MGMT_STATUS_INVALID_PARAMS;
5424                 goto done;
5425         }
5426
5427         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5428         if (len != expected_size) {
5429                 status = MGMT_STATUS_INVALID_PARAMS;
5430                 goto done;
5431         }
5432
5433         m = kzalloc(sizeof(*m), GFP_KERNEL);
5434         if (!m) {
5435                 status = MGMT_STATUS_NO_RESOURCES;
5436                 goto done;
5437         }
5438
5439         INIT_LIST_HEAD(&m->patterns);
5440
5441         parse_adv_monitor_rssi(m, NULL);
5442         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5443
5444 done:
5445         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5446                                           MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5447 }
5448
5449 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5450                                          void *data, u16 len)
5451 {
5452         struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5453         struct adv_monitor *m = NULL;
5454         u8 status = MGMT_STATUS_SUCCESS;
5455         size_t expected_size = sizeof(*cp);
5456
5457         BT_DBG("request for %s", hdev->name);
5458
5459         if (len <= sizeof(*cp)) {
5460                 status = MGMT_STATUS_INVALID_PARAMS;
5461                 goto done;
5462         }
5463
5464         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5465         if (len != expected_size) {
5466                 status = MGMT_STATUS_INVALID_PARAMS;
5467                 goto done;
5468         }
5469
5470         m = kzalloc(sizeof(*m), GFP_KERNEL);
5471         if (!m) {
5472                 status = MGMT_STATUS_NO_RESOURCES;
5473                 goto done;
5474         }
5475
5476         INIT_LIST_HEAD(&m->patterns);
5477
5478         parse_adv_monitor_rssi(m, &cp->rssi);
5479         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5480
5481 done:
5482         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5483                                          MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5484 }
5485
5486 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5487                                              void *data, int status)
5488 {
5489         struct mgmt_rp_remove_adv_monitor rp;
5490         struct mgmt_pending_cmd *cmd = data;
5491         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5492
5493         hci_dev_lock(hdev);
5494
5495         rp.monitor_handle = cp->monitor_handle;
5496
5497         if (!status)
5498                 hci_update_passive_scan(hdev);
5499
5500         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5501                           mgmt_status(status), &rp, sizeof(rp));
5502         mgmt_pending_remove(cmd);
5503
5504         hci_dev_unlock(hdev);
5505         bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5506                    rp.monitor_handle, status);
5507 }
5508
5509 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5510 {
5511         struct mgmt_pending_cmd *cmd = data;
5512         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5513         u16 handle = __le16_to_cpu(cp->monitor_handle);
5514
5515         if (!handle)
5516                 return hci_remove_all_adv_monitor(hdev);
5517
5518         return hci_remove_single_adv_monitor(hdev, handle);
5519 }
5520
5521 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5522                               void *data, u16 len)
5523 {
5524         struct mgmt_pending_cmd *cmd;
5525         int err, status;
5526
5527         hci_dev_lock(hdev);
5528
5529         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5530             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5531             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5532             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5533                 status = MGMT_STATUS_BUSY;
5534                 goto unlock;
5535         }
5536
5537         cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5538         if (!cmd) {
5539                 status = MGMT_STATUS_NO_RESOURCES;
5540                 goto unlock;
5541         }
5542
5543         err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5544                                   mgmt_remove_adv_monitor_complete);
5545
5546         if (err) {
5547                 mgmt_pending_remove(cmd);
5548
5549                 if (err == -ENOMEM)
5550                         status = MGMT_STATUS_NO_RESOURCES;
5551                 else
5552                         status = MGMT_STATUS_FAILED;
5553
5554                 goto unlock;
5555         }
5556
5557         hci_dev_unlock(hdev);
5558
5559         return 0;
5560
5561 unlock:
5562         hci_dev_unlock(hdev);
5563         return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5564                                status);
5565 }
5566
5567 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5568 {
5569         struct mgmt_rp_read_local_oob_data mgmt_rp;
5570         size_t rp_size = sizeof(mgmt_rp);
5571         struct mgmt_pending_cmd *cmd = data;
5572         struct sk_buff *skb = cmd->skb;
5573         u8 status = mgmt_status(err);
5574
5575         if (!status) {
5576                 if (!skb)
5577                         status = MGMT_STATUS_FAILED;
5578                 else if (IS_ERR(skb))
5579                         status = mgmt_status(PTR_ERR(skb));
5580                 else
5581                         status = mgmt_status(skb->data[0]);
5582         }
5583
5584         bt_dev_dbg(hdev, "status %d", status);
5585
5586         if (status) {
5587                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5588                 goto remove;
5589         }
5590
5591         memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5592
5593         if (!bredr_sc_enabled(hdev)) {
5594                 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5595
5596                 if (skb->len < sizeof(*rp)) {
5597                         mgmt_cmd_status(cmd->sk, hdev->id,
5598                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5599                                         MGMT_STATUS_FAILED);
5600                         goto remove;
5601                 }
5602
5603                 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5604                 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5605
5606                 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5607         } else {
5608                 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5609
5610                 if (skb->len < sizeof(*rp)) {
5611                         mgmt_cmd_status(cmd->sk, hdev->id,
5612                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5613                                         MGMT_STATUS_FAILED);
5614                         goto remove;
5615                 }
5616
5617                 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5618                 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5619
5620                 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5621                 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5622         }
5623
5624         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5625                           MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5626
5627 remove:
5628         if (skb && !IS_ERR(skb))
5629                 kfree_skb(skb);
5630
5631         mgmt_pending_free(cmd);
5632 }
5633
5634 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5635 {
5636         struct mgmt_pending_cmd *cmd = data;
5637
5638         if (bredr_sc_enabled(hdev))
5639                 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5640         else
5641                 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5642
5643         if (IS_ERR(cmd->skb))
5644                 return PTR_ERR(cmd->skb);
5645         else
5646                 return 0;
5647 }
5648
5649 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5650                                void *data, u16 data_len)
5651 {
5652         struct mgmt_pending_cmd *cmd;
5653         int err;
5654
5655         bt_dev_dbg(hdev, "sock %p", sk);
5656
5657         hci_dev_lock(hdev);
5658
5659         if (!hdev_is_powered(hdev)) {
5660                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5661                                       MGMT_STATUS_NOT_POWERED);
5662                 goto unlock;
5663         }
5664
5665         if (!lmp_ssp_capable(hdev)) {
5666                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5667                                       MGMT_STATUS_NOT_SUPPORTED);
5668                 goto unlock;
5669         }
5670
5671         cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5672         if (!cmd)
5673                 err = -ENOMEM;
5674         else
5675                 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5676                                          read_local_oob_data_complete);
5677
5678         if (err < 0) {
5679                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5680                                       MGMT_STATUS_FAILED);
5681
5682                 if (cmd)
5683                         mgmt_pending_free(cmd);
5684         }
5685
5686 unlock:
5687         hci_dev_unlock(hdev);
5688         return err;
5689 }
5690
5691 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5692                                void *data, u16 len)
5693 {
5694         struct mgmt_addr_info *addr = data;
5695         int err;
5696
5697         bt_dev_dbg(hdev, "sock %p", sk);
5698
5699         if (!bdaddr_type_is_valid(addr->type))
5700                 return mgmt_cmd_complete(sk, hdev->id,
5701                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
5702                                          MGMT_STATUS_INVALID_PARAMS,
5703                                          addr, sizeof(*addr));
5704
5705         hci_dev_lock(hdev);
5706
5707         if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5708                 struct mgmt_cp_add_remote_oob_data *cp = data;
5709                 u8 status;
5710
5711                 if (cp->addr.type != BDADDR_BREDR) {
5712                         err = mgmt_cmd_complete(sk, hdev->id,
5713                                                 MGMT_OP_ADD_REMOTE_OOB_DATA,
5714                                                 MGMT_STATUS_INVALID_PARAMS,
5715                                                 &cp->addr, sizeof(cp->addr));
5716                         goto unlock;
5717                 }
5718
5719                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5720                                               cp->addr.type, cp->hash,
5721                                               cp->rand, NULL, NULL);
5722                 if (err < 0)
5723                         status = MGMT_STATUS_FAILED;
5724                 else
5725                         status = MGMT_STATUS_SUCCESS;
5726
5727                 err = mgmt_cmd_complete(sk, hdev->id,
5728                                         MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5729                                         &cp->addr, sizeof(cp->addr));
5730         } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5731                 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5732                 u8 *rand192, *hash192, *rand256, *hash256;
5733                 u8 status;
5734
5735                 if (bdaddr_type_is_le(cp->addr.type)) {
5736                         /* Enforce zero-valued 192-bit parameters as
5737                          * long as legacy SMP OOB isn't implemented.
5738                          */
5739                         if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5740                             memcmp(cp->hash192, ZERO_KEY, 16)) {
5741                                 err = mgmt_cmd_complete(sk, hdev->id,
5742                                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5743                                                         MGMT_STATUS_INVALID_PARAMS,
5744                                                         addr, sizeof(*addr));
5745                                 goto unlock;
5746                         }
5747
5748                         rand192 = NULL;
5749                         hash192 = NULL;
5750                 } else {
5751                         /* In case one of the P-192 values is set to zero,
5752                          * then just disable OOB data for P-192.
5753                          */
5754                         if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5755                             !memcmp(cp->hash192, ZERO_KEY, 16)) {
5756                                 rand192 = NULL;
5757                                 hash192 = NULL;
5758                         } else {
5759                                 rand192 = cp->rand192;
5760                                 hash192 = cp->hash192;
5761                         }
5762                 }
5763
5764                 /* In case one of the P-256 values is set to zero, then just
5765                  * disable OOB data for P-256.
5766                  */
5767                 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5768                     !memcmp(cp->hash256, ZERO_KEY, 16)) {
5769                         rand256 = NULL;
5770                         hash256 = NULL;
5771                 } else {
5772                         rand256 = cp->rand256;
5773                         hash256 = cp->hash256;
5774                 }
5775
5776                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5777                                               cp->addr.type, hash192, rand192,
5778                                               hash256, rand256);
5779                 if (err < 0)
5780                         status = MGMT_STATUS_FAILED;
5781                 else
5782                         status = MGMT_STATUS_SUCCESS;
5783
5784                 err = mgmt_cmd_complete(sk, hdev->id,
5785                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5786                                         status, &cp->addr, sizeof(cp->addr));
5787         } else {
5788                 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5789                            len);
5790                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5791                                       MGMT_STATUS_INVALID_PARAMS);
5792         }
5793
5794 unlock:
5795         hci_dev_unlock(hdev);
5796         return err;
5797 }
5798
5799 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5800                                   void *data, u16 len)
5801 {
5802         struct mgmt_cp_remove_remote_oob_data *cp = data;
5803         u8 status;
5804         int err;
5805
5806         bt_dev_dbg(hdev, "sock %p", sk);
5807
5808         if (cp->addr.type != BDADDR_BREDR)
5809                 return mgmt_cmd_complete(sk, hdev->id,
5810                                          MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5811                                          MGMT_STATUS_INVALID_PARAMS,
5812                                          &cp->addr, sizeof(cp->addr));
5813
5814         hci_dev_lock(hdev);
5815
5816         if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5817                 hci_remote_oob_data_clear(hdev);
5818                 status = MGMT_STATUS_SUCCESS;
5819                 goto done;
5820         }
5821
5822         err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5823         if (err < 0)
5824                 status = MGMT_STATUS_INVALID_PARAMS;
5825         else
5826                 status = MGMT_STATUS_SUCCESS;
5827
5828 done:
5829         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5830                                 status, &cp->addr, sizeof(cp->addr));
5831
5832         hci_dev_unlock(hdev);
5833         return err;
5834 }
5835
5836 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5837 {
5838         struct mgmt_pending_cmd *cmd;
5839
5840         bt_dev_dbg(hdev, "status %u", status);
5841
5842         hci_dev_lock(hdev);
5843
5844         cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5845         if (!cmd)
5846                 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5847
5848         if (!cmd)
5849                 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5850
5851         if (cmd) {
5852                 cmd->cmd_complete(cmd, mgmt_status(status));
5853                 mgmt_pending_remove(cmd);
5854         }
5855
5856         hci_dev_unlock(hdev);
5857 }
5858
5859 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5860                                     uint8_t *mgmt_status)
5861 {
5862         switch (type) {
5863         case DISCOV_TYPE_LE:
5864                 *mgmt_status = mgmt_le_support(hdev);
5865                 if (*mgmt_status)
5866                         return false;
5867                 break;
5868         case DISCOV_TYPE_INTERLEAVED:
5869                 *mgmt_status = mgmt_le_support(hdev);
5870                 if (*mgmt_status)
5871                         return false;
5872                 fallthrough;
5873         case DISCOV_TYPE_BREDR:
5874                 *mgmt_status = mgmt_bredr_support(hdev);
5875                 if (*mgmt_status)
5876                         return false;
5877                 break;
5878         default:
5879                 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5880                 return false;
5881         }
5882
5883         return true;
5884 }
5885
5886 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5887 {
5888         struct mgmt_pending_cmd *cmd = data;
5889
5890         if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5891             cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5892             cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5893                 return;
5894
5895         bt_dev_dbg(hdev, "err %d", err);
5896
5897         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5898                           cmd->param, 1);
5899         mgmt_pending_remove(cmd);
5900
5901         hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5902                                 DISCOVERY_FINDING);
5903 }
5904
5905 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5906 {
5907         return hci_start_discovery_sync(hdev);
5908 }
5909
5910 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5911                                     u16 op, void *data, u16 len)
5912 {
5913         struct mgmt_cp_start_discovery *cp = data;
5914         struct mgmt_pending_cmd *cmd;
5915         u8 status;
5916         int err;
5917
5918         bt_dev_dbg(hdev, "sock %p", sk);
5919
5920         hci_dev_lock(hdev);
5921
5922         if (!hdev_is_powered(hdev)) {
5923                 err = mgmt_cmd_complete(sk, hdev->id, op,
5924                                         MGMT_STATUS_NOT_POWERED,
5925                                         &cp->type, sizeof(cp->type));
5926                 goto failed;
5927         }
5928
5929         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5930             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5931                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5932                                         &cp->type, sizeof(cp->type));
5933                 goto failed;
5934         }
5935
5936         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5937                 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5938                                         &cp->type, sizeof(cp->type));
5939                 goto failed;
5940         }
5941
5942         /* Can't start discovery when it is paused */
5943         if (hdev->discovery_paused) {
5944                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5945                                         &cp->type, sizeof(cp->type));
5946                 goto failed;
5947         }
5948
5949         /* Clear the discovery filter first to free any previously
5950          * allocated memory for the UUID list.
5951          */
5952         hci_discovery_filter_clear(hdev);
5953
5954         hdev->discovery.type = cp->type;
5955         hdev->discovery.report_invalid_rssi = false;
5956         if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5957                 hdev->discovery.limited = true;
5958         else
5959                 hdev->discovery.limited = false;
5960
5961         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5962         if (!cmd) {
5963                 err = -ENOMEM;
5964                 goto failed;
5965         }
5966
5967         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5968                                  start_discovery_complete);
5969         if (err < 0) {
5970                 mgmt_pending_remove(cmd);
5971                 goto failed;
5972         }
5973
5974         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5975
5976 failed:
5977         hci_dev_unlock(hdev);
5978         return err;
5979 }
5980
5981 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5982                            void *data, u16 len)
5983 {
5984         return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5985                                         data, len);
5986 }
5987
5988 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5989                                    void *data, u16 len)
5990 {
5991         return start_discovery_internal(sk, hdev,
5992                                         MGMT_OP_START_LIMITED_DISCOVERY,
5993                                         data, len);
5994 }
5995
5996 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5997                                    void *data, u16 len)
5998 {
5999         struct mgmt_cp_start_service_discovery *cp = data;
6000         struct mgmt_pending_cmd *cmd;
6001         const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6002         u16 uuid_count, expected_len;
6003         u8 status;
6004         int err;
6005
6006         bt_dev_dbg(hdev, "sock %p", sk);
6007
6008         hci_dev_lock(hdev);
6009
6010         if (!hdev_is_powered(hdev)) {
6011                 err = mgmt_cmd_complete(sk, hdev->id,
6012                                         MGMT_OP_START_SERVICE_DISCOVERY,
6013                                         MGMT_STATUS_NOT_POWERED,
6014                                         &cp->type, sizeof(cp->type));
6015                 goto failed;
6016         }
6017
6018         if (hdev->discovery.state != DISCOVERY_STOPPED ||
6019             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6020                 err = mgmt_cmd_complete(sk, hdev->id,
6021                                         MGMT_OP_START_SERVICE_DISCOVERY,
6022                                         MGMT_STATUS_BUSY, &cp->type,
6023                                         sizeof(cp->type));
6024                 goto failed;
6025         }
6026
6027         if (hdev->discovery_paused) {
6028                 err = mgmt_cmd_complete(sk, hdev->id,
6029                                         MGMT_OP_START_SERVICE_DISCOVERY,
6030                                         MGMT_STATUS_BUSY, &cp->type,
6031                                         sizeof(cp->type));
6032                 goto failed;
6033         }
6034
6035         uuid_count = __le16_to_cpu(cp->uuid_count);
6036         if (uuid_count > max_uuid_count) {
6037                 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6038                            uuid_count);
6039                 err = mgmt_cmd_complete(sk, hdev->id,
6040                                         MGMT_OP_START_SERVICE_DISCOVERY,
6041                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6042                                         sizeof(cp->type));
6043                 goto failed;
6044         }
6045
6046         expected_len = sizeof(*cp) + uuid_count * 16;
6047         if (expected_len != len) {
6048                 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6049                            expected_len, len);
6050                 err = mgmt_cmd_complete(sk, hdev->id,
6051                                         MGMT_OP_START_SERVICE_DISCOVERY,
6052                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
6053                                         sizeof(cp->type));
6054                 goto failed;
6055         }
6056
6057         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6058                 err = mgmt_cmd_complete(sk, hdev->id,
6059                                         MGMT_OP_START_SERVICE_DISCOVERY,
6060                                         status, &cp->type, sizeof(cp->type));
6061                 goto failed;
6062         }
6063
6064         cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6065                                hdev, data, len);
6066         if (!cmd) {
6067                 err = -ENOMEM;
6068                 goto failed;
6069         }
6070
6071         /* Clear the discovery filter first to free any previously
6072          * allocated memory for the UUID list.
6073          */
6074         hci_discovery_filter_clear(hdev);
6075
6076         hdev->discovery.result_filtering = true;
6077         hdev->discovery.type = cp->type;
6078         hdev->discovery.rssi = cp->rssi;
6079         hdev->discovery.uuid_count = uuid_count;
6080
6081         if (uuid_count > 0) {
6082                 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6083                                                 GFP_KERNEL);
6084                 if (!hdev->discovery.uuids) {
6085                         err = mgmt_cmd_complete(sk, hdev->id,
6086                                                 MGMT_OP_START_SERVICE_DISCOVERY,
6087                                                 MGMT_STATUS_FAILED,
6088                                                 &cp->type, sizeof(cp->type));
6089                         mgmt_pending_remove(cmd);
6090                         goto failed;
6091                 }
6092         }
6093
6094         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6095                                  start_discovery_complete);
6096         if (err < 0) {
6097                 mgmt_pending_remove(cmd);
6098                 goto failed;
6099         }
6100
6101         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6102
6103 failed:
6104         hci_dev_unlock(hdev);
6105         return err;
6106 }
6107
6108 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6109 {
6110         struct mgmt_pending_cmd *cmd;
6111
6112         bt_dev_dbg(hdev, "status %u", status);
6113
6114         hci_dev_lock(hdev);
6115
6116         cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6117         if (cmd) {
6118                 cmd->cmd_complete(cmd, mgmt_status(status));
6119                 mgmt_pending_remove(cmd);
6120         }
6121
6122         hci_dev_unlock(hdev);
6123 }
6124
6125 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6126 {
6127         struct mgmt_pending_cmd *cmd = data;
6128
6129         if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6130                 return;
6131
6132         bt_dev_dbg(hdev, "err %d", err);
6133
6134         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6135                           cmd->param, 1);
6136         mgmt_pending_remove(cmd);
6137
6138         if (!err)
6139                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6140 }
6141
6142 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6143 {
6144         return hci_stop_discovery_sync(hdev);
6145 }
6146
6147 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6148                           u16 len)
6149 {
6150         struct mgmt_cp_stop_discovery *mgmt_cp = data;
6151         struct mgmt_pending_cmd *cmd;
6152         int err;
6153
6154         bt_dev_dbg(hdev, "sock %p", sk);
6155
6156         hci_dev_lock(hdev);
6157
6158         if (!hci_discovery_active(hdev)) {
6159                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6160                                         MGMT_STATUS_REJECTED, &mgmt_cp->type,
6161                                         sizeof(mgmt_cp->type));
6162                 goto unlock;
6163         }
6164
6165         if (hdev->discovery.type != mgmt_cp->type) {
6166                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6167                                         MGMT_STATUS_INVALID_PARAMS,
6168                                         &mgmt_cp->type, sizeof(mgmt_cp->type));
6169                 goto unlock;
6170         }
6171
6172         cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6173         if (!cmd) {
6174                 err = -ENOMEM;
6175                 goto unlock;
6176         }
6177
6178         err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6179                                  stop_discovery_complete);
6180         if (err < 0) {
6181                 mgmt_pending_remove(cmd);
6182                 goto unlock;
6183         }
6184
6185         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6186
6187 unlock:
6188         hci_dev_unlock(hdev);
6189         return err;
6190 }
6191
6192 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6193                         u16 len)
6194 {
6195         struct mgmt_cp_confirm_name *cp = data;
6196         struct inquiry_entry *e;
6197         int err;
6198
6199         bt_dev_dbg(hdev, "sock %p", sk);
6200
6201         hci_dev_lock(hdev);
6202
6203         if (!hci_discovery_active(hdev)) {
6204                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6205                                         MGMT_STATUS_FAILED, &cp->addr,
6206                                         sizeof(cp->addr));
6207                 goto failed;
6208         }
6209
6210         e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6211         if (!e) {
6212                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6213                                         MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6214                                         sizeof(cp->addr));
6215                 goto failed;
6216         }
6217
6218         if (cp->name_known) {
6219                 e->name_state = NAME_KNOWN;
6220                 list_del(&e->list);
6221         } else {
6222                 e->name_state = NAME_NEEDED;
6223                 hci_inquiry_cache_update_resolve(hdev, e);
6224         }
6225
6226         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6227                                 &cp->addr, sizeof(cp->addr));
6228
6229 failed:
6230         hci_dev_unlock(hdev);
6231         return err;
6232 }
6233
6234 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6235                         u16 len)
6236 {
6237         struct mgmt_cp_block_device *cp = data;
6238         u8 status;
6239         int err;
6240
6241         bt_dev_dbg(hdev, "sock %p", sk);
6242
6243         if (!bdaddr_type_is_valid(cp->addr.type))
6244                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6245                                          MGMT_STATUS_INVALID_PARAMS,
6246                                          &cp->addr, sizeof(cp->addr));
6247
6248         hci_dev_lock(hdev);
6249
6250         err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6251                                   cp->addr.type);
6252         if (err < 0) {
6253                 status = MGMT_STATUS_FAILED;
6254                 goto done;
6255         }
6256
6257         mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6258                    sk);
6259         status = MGMT_STATUS_SUCCESS;
6260
6261 done:
6262         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6263                                 &cp->addr, sizeof(cp->addr));
6264
6265         hci_dev_unlock(hdev);
6266
6267         return err;
6268 }
6269
6270 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6271                           u16 len)
6272 {
6273         struct mgmt_cp_unblock_device *cp = data;
6274         u8 status;
6275         int err;
6276
6277         bt_dev_dbg(hdev, "sock %p", sk);
6278
6279         if (!bdaddr_type_is_valid(cp->addr.type))
6280                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6281                                          MGMT_STATUS_INVALID_PARAMS,
6282                                          &cp->addr, sizeof(cp->addr));
6283
6284         hci_dev_lock(hdev);
6285
6286         err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6287                                   cp->addr.type);
6288         if (err < 0) {
6289                 status = MGMT_STATUS_INVALID_PARAMS;
6290                 goto done;
6291         }
6292
6293         mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6294                    sk);
6295         status = MGMT_STATUS_SUCCESS;
6296
6297 done:
6298         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6299                                 &cp->addr, sizeof(cp->addr));
6300
6301         hci_dev_unlock(hdev);
6302
6303         return err;
6304 }
6305
6306 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6307 {
6308         return hci_update_eir_sync(hdev);
6309 }
6310
6311 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6312                          u16 len)
6313 {
6314         struct mgmt_cp_set_device_id *cp = data;
6315         int err;
6316         __u16 source;
6317
6318         bt_dev_dbg(hdev, "sock %p", sk);
6319
6320         source = __le16_to_cpu(cp->source);
6321
6322         if (source > 0x0002)
6323                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6324                                        MGMT_STATUS_INVALID_PARAMS);
6325
6326         hci_dev_lock(hdev);
6327
6328         hdev->devid_source = source;
6329         hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6330         hdev->devid_product = __le16_to_cpu(cp->product);
6331         hdev->devid_version = __le16_to_cpu(cp->version);
6332
6333         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6334                                 NULL, 0);
6335
6336         hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6337
6338         hci_dev_unlock(hdev);
6339
6340         return err;
6341 }
6342
6343 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6344 {
6345         if (err)
6346                 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6347         else
6348                 bt_dev_dbg(hdev, "status %d", err);
6349 }
6350
6351 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6352 {
6353         struct cmd_lookup match = { NULL, hdev };
6354         u8 instance;
6355         struct adv_info *adv_instance;
6356         u8 status = mgmt_status(err);
6357
6358         if (status) {
6359                 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6360                                      cmd_status_rsp, &status);
6361                 return;
6362         }
6363
6364         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6365                 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6366         else
6367                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6368
6369         mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6370                              &match);
6371
6372         new_settings(hdev, match.sk);
6373
6374         if (match.sk)
6375                 sock_put(match.sk);
6376
6377         /* If "Set Advertising" was just disabled and instance advertising was
6378          * set up earlier, then re-enable multi-instance advertising.
6379          */
6380         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6381             list_empty(&hdev->adv_instances))
6382                 return;
6383
6384         instance = hdev->cur_adv_instance;
6385         if (!instance) {
6386                 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6387                                                         struct adv_info, list);
6388                 if (!adv_instance)
6389                         return;
6390
6391                 instance = adv_instance->instance;
6392         }
6393
6394         err = hci_schedule_adv_instance_sync(hdev, instance, true);
6395
6396         enable_advertising_instance(hdev, err);
6397 }
6398
6399 static int set_adv_sync(struct hci_dev *hdev, void *data)
6400 {
6401         struct mgmt_pending_cmd *cmd = data;
6402         struct mgmt_mode *cp = cmd->param;
6403         u8 val = !!cp->val;
6404
6405         if (cp->val == 0x02)
6406                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6407         else
6408                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6409
6410         cancel_adv_timeout(hdev);
6411
6412         if (val) {
6413                 /* Switch to instance "0" for the Set Advertising setting.
6414                  * We cannot use update_[adv|scan_rsp]_data() here as the
6415                  * HCI_ADVERTISING flag is not yet set.
6416                  */
6417                 hdev->cur_adv_instance = 0x00;
6418
6419                 if (ext_adv_capable(hdev)) {
6420                         hci_start_ext_adv_sync(hdev, 0x00);
6421                 } else {
6422                         hci_update_adv_data_sync(hdev, 0x00);
6423                         hci_update_scan_rsp_data_sync(hdev, 0x00);
6424                         hci_enable_advertising_sync(hdev);
6425                 }
6426         } else {
6427                 hci_disable_advertising_sync(hdev);
6428         }
6429
6430         return 0;
6431 }
6432
6433 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6434                            u16 len)
6435 {
6436         struct mgmt_mode *cp = data;
6437         struct mgmt_pending_cmd *cmd;
6438         u8 val, status;
6439         int err;
6440
6441         bt_dev_dbg(hdev, "sock %p", sk);
6442
6443         status = mgmt_le_support(hdev);
6444         if (status)
6445                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446                                        status);
6447
6448         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6449                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6450                                        MGMT_STATUS_INVALID_PARAMS);
6451
6452         if (hdev->advertising_paused)
6453                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6454                                        MGMT_STATUS_BUSY);
6455
6456         hci_dev_lock(hdev);
6457
6458         val = !!cp->val;
6459
6460         /* The following conditions are ones which mean that we should
6461          * not do any HCI communication but directly send a mgmt
6462          * response to user space (after toggling the flag if
6463          * necessary).
6464          */
6465         if (!hdev_is_powered(hdev) ||
6466             (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6467              (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6468             hci_dev_test_flag(hdev, HCI_MESH) ||
6469             hci_conn_num(hdev, LE_LINK) > 0 ||
6470             (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6471              hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6472                 bool changed;
6473
6474                 if (cp->val) {
6475                         hdev->cur_adv_instance = 0x00;
6476                         changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6477                         if (cp->val == 0x02)
6478                                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6479                         else
6480                                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6481                 } else {
6482                         changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6483                         hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6484                 }
6485
6486                 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6487                 if (err < 0)
6488                         goto unlock;
6489
6490                 if (changed)
6491                         err = new_settings(hdev, sk);
6492
6493                 goto unlock;
6494         }
6495
6496         if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6497             pending_find(MGMT_OP_SET_LE, hdev)) {
6498                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6499                                       MGMT_STATUS_BUSY);
6500                 goto unlock;
6501         }
6502
6503         cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6504         if (!cmd)
6505                 err = -ENOMEM;
6506         else
6507                 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6508                                          set_advertising_complete);
6509
6510         if (err < 0 && cmd)
6511                 mgmt_pending_remove(cmd);
6512
6513 unlock:
6514         hci_dev_unlock(hdev);
6515         return err;
6516 }
6517
6518 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6519                               void *data, u16 len)
6520 {
6521         struct mgmt_cp_set_static_address *cp = data;
6522         int err;
6523
6524         bt_dev_dbg(hdev, "sock %p", sk);
6525
6526         if (!lmp_le_capable(hdev))
6527                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6528                                        MGMT_STATUS_NOT_SUPPORTED);
6529
6530         if (hdev_is_powered(hdev))
6531                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6532                                        MGMT_STATUS_REJECTED);
6533
6534         if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6535                 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6536                         return mgmt_cmd_status(sk, hdev->id,
6537                                                MGMT_OP_SET_STATIC_ADDRESS,
6538                                                MGMT_STATUS_INVALID_PARAMS);
6539
6540                 /* Two most significant bits shall be set */
6541                 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6542                         return mgmt_cmd_status(sk, hdev->id,
6543                                                MGMT_OP_SET_STATIC_ADDRESS,
6544                                                MGMT_STATUS_INVALID_PARAMS);
6545         }
6546
6547         hci_dev_lock(hdev);
6548
6549         bacpy(&hdev->static_addr, &cp->bdaddr);
6550
6551         err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6552         if (err < 0)
6553                 goto unlock;
6554
6555         err = new_settings(hdev, sk);
6556
6557 unlock:
6558         hci_dev_unlock(hdev);
6559         return err;
6560 }
6561
6562 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6563                            void *data, u16 len)
6564 {
6565         struct mgmt_cp_set_scan_params *cp = data;
6566         __u16 interval, window;
6567         int err;
6568
6569         bt_dev_dbg(hdev, "sock %p", sk);
6570
6571         if (!lmp_le_capable(hdev))
6572                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6573                                        MGMT_STATUS_NOT_SUPPORTED);
6574
6575         interval = __le16_to_cpu(cp->interval);
6576
6577         if (interval < 0x0004 || interval > 0x4000)
6578                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6579                                        MGMT_STATUS_INVALID_PARAMS);
6580
6581         window = __le16_to_cpu(cp->window);
6582
6583         if (window < 0x0004 || window > 0x4000)
6584                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6585                                        MGMT_STATUS_INVALID_PARAMS);
6586
6587         if (window > interval)
6588                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6589                                        MGMT_STATUS_INVALID_PARAMS);
6590
6591         hci_dev_lock(hdev);
6592
6593         hdev->le_scan_interval = interval;
6594         hdev->le_scan_window = window;
6595
6596         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6597                                 NULL, 0);
6598
6599         /* If background scan is running, restart it so new parameters are
6600          * loaded.
6601          */
6602         if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6603             hdev->discovery.state == DISCOVERY_STOPPED)
6604                 hci_update_passive_scan(hdev);
6605
6606         hci_dev_unlock(hdev);
6607
6608         return err;
6609 }
6610
6611 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6612 {
6613         struct mgmt_pending_cmd *cmd = data;
6614
6615         bt_dev_dbg(hdev, "err %d", err);
6616
6617         if (err) {
6618                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6619                                 mgmt_status(err));
6620         } else {
6621                 struct mgmt_mode *cp = cmd->param;
6622
6623                 if (cp->val)
6624                         hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6625                 else
6626                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6627
6628                 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6629                 new_settings(hdev, cmd->sk);
6630         }
6631
6632         mgmt_pending_free(cmd);
6633 }
6634
6635 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6636 {
6637         struct mgmt_pending_cmd *cmd = data;
6638         struct mgmt_mode *cp = cmd->param;
6639
6640         return hci_write_fast_connectable_sync(hdev, cp->val);
6641 }
6642
6643 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6644                                 void *data, u16 len)
6645 {
6646         struct mgmt_mode *cp = data;
6647         struct mgmt_pending_cmd *cmd;
6648         int err;
6649
6650         bt_dev_dbg(hdev, "sock %p", sk);
6651
6652         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6653             hdev->hci_ver < BLUETOOTH_VER_1_2)
6654                 return mgmt_cmd_status(sk, hdev->id,
6655                                        MGMT_OP_SET_FAST_CONNECTABLE,
6656                                        MGMT_STATUS_NOT_SUPPORTED);
6657
6658         if (cp->val != 0x00 && cp->val != 0x01)
6659                 return mgmt_cmd_status(sk, hdev->id,
6660                                        MGMT_OP_SET_FAST_CONNECTABLE,
6661                                        MGMT_STATUS_INVALID_PARAMS);
6662
6663         hci_dev_lock(hdev);
6664
6665         if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6666                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6667                 goto unlock;
6668         }
6669
6670         if (!hdev_is_powered(hdev)) {
6671                 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6672                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6673                 new_settings(hdev, sk);
6674                 goto unlock;
6675         }
6676
6677         cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6678                                len);
6679         if (!cmd)
6680                 err = -ENOMEM;
6681         else
6682                 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6683                                          fast_connectable_complete);
6684
6685         if (err < 0) {
6686                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6687                                 MGMT_STATUS_FAILED);
6688
6689                 if (cmd)
6690                         mgmt_pending_free(cmd);
6691         }
6692
6693 unlock:
6694         hci_dev_unlock(hdev);
6695
6696         return err;
6697 }
6698
6699 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6700 {
6701         struct mgmt_pending_cmd *cmd = data;
6702
6703         bt_dev_dbg(hdev, "err %d", err);
6704
6705         if (err) {
6706                 u8 mgmt_err = mgmt_status(err);
6707
6708                 /* We need to restore the flag if related HCI commands
6709                  * failed.
6710                  */
6711                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6712
6713                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6714         } else {
6715                 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6716                 new_settings(hdev, cmd->sk);
6717         }
6718
6719         mgmt_pending_free(cmd);
6720 }
6721
6722 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6723 {
6724         int status;
6725
6726         status = hci_write_fast_connectable_sync(hdev, false);
6727
6728         if (!status)
6729                 status = hci_update_scan_sync(hdev);
6730
6731         /* Since only the advertising data flags will change, there
6732          * is no need to update the scan response data.
6733          */
6734         if (!status)
6735                 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6736
6737         return status;
6738 }
6739
6740 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6741 {
6742         struct mgmt_mode *cp = data;
6743         struct mgmt_pending_cmd *cmd;
6744         int err;
6745
6746         bt_dev_dbg(hdev, "sock %p", sk);
6747
6748         if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6749                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750                                        MGMT_STATUS_NOT_SUPPORTED);
6751
6752         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6753                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754                                        MGMT_STATUS_REJECTED);
6755
6756         if (cp->val != 0x00 && cp->val != 0x01)
6757                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6758                                        MGMT_STATUS_INVALID_PARAMS);
6759
6760         hci_dev_lock(hdev);
6761
6762         if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6763                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6764                 goto unlock;
6765         }
6766
6767         if (!hdev_is_powered(hdev)) {
6768                 if (!cp->val) {
6769                         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6770                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6771                         hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6772                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6773                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6774                 }
6775
6776                 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6777
6778                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6779                 if (err < 0)
6780                         goto unlock;
6781
6782                 err = new_settings(hdev, sk);
6783                 goto unlock;
6784         }
6785
6786         /* Reject disabling when powered on */
6787         if (!cp->val) {
6788                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6789                                       MGMT_STATUS_REJECTED);
6790                 goto unlock;
6791         } else {
6792                 /* When configuring a dual-mode controller to operate
6793                  * with LE only and using a static address, then switching
6794                  * BR/EDR back on is not allowed.
6795                  *
6796                  * Dual-mode controllers shall operate with the public
6797                  * address as its identity address for BR/EDR and LE. So
6798                  * reject the attempt to create an invalid configuration.
6799                  *
6800                  * The same restrictions applies when secure connections
6801                  * has been enabled. For BR/EDR this is a controller feature
6802                  * while for LE it is a host stack feature. This means that
6803                  * switching BR/EDR back on when secure connections has been
6804                  * enabled is not a supported transaction.
6805                  */
6806                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6807                     (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6808                      hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6809                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6810                                               MGMT_STATUS_REJECTED);
6811                         goto unlock;
6812                 }
6813         }
6814
6815         cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6816         if (!cmd)
6817                 err = -ENOMEM;
6818         else
6819                 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6820                                          set_bredr_complete);
6821
6822         if (err < 0) {
6823                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6824                                 MGMT_STATUS_FAILED);
6825                 if (cmd)
6826                         mgmt_pending_free(cmd);
6827
6828                 goto unlock;
6829         }
6830
6831         /* We need to flip the bit already here so that
6832          * hci_req_update_adv_data generates the correct flags.
6833          */
6834         hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6835
6836 unlock:
6837         hci_dev_unlock(hdev);
6838         return err;
6839 }
6840
6841 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6842 {
6843         struct mgmt_pending_cmd *cmd = data;
6844         struct mgmt_mode *cp;
6845
6846         bt_dev_dbg(hdev, "err %d", err);
6847
6848         if (err) {
6849                 u8 mgmt_err = mgmt_status(err);
6850
6851                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6852                 goto done;
6853         }
6854
6855         cp = cmd->param;
6856
6857         switch (cp->val) {
6858         case 0x00:
6859                 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6860                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861                 break;
6862         case 0x01:
6863                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6864                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6865                 break;
6866         case 0x02:
6867                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6868                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6869                 break;
6870         }
6871
6872         send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6873         new_settings(hdev, cmd->sk);
6874
6875 done:
6876         mgmt_pending_free(cmd);
6877 }
6878
6879 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6880 {
6881         struct mgmt_pending_cmd *cmd = data;
6882         struct mgmt_mode *cp = cmd->param;
6883         u8 val = !!cp->val;
6884
6885         /* Force write of val */
6886         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6887
6888         return hci_write_sc_support_sync(hdev, val);
6889 }
6890
6891 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6892                            void *data, u16 len)
6893 {
6894         struct mgmt_mode *cp = data;
6895         struct mgmt_pending_cmd *cmd;
6896         u8 val;
6897         int err;
6898
6899         bt_dev_dbg(hdev, "sock %p", sk);
6900
6901         if (!lmp_sc_capable(hdev) &&
6902             !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6903                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6904                                        MGMT_STATUS_NOT_SUPPORTED);
6905
6906         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6907             lmp_sc_capable(hdev) &&
6908             !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6909                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910                                        MGMT_STATUS_REJECTED);
6911
6912         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6913                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6914                                        MGMT_STATUS_INVALID_PARAMS);
6915
6916         hci_dev_lock(hdev);
6917
6918         if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6919             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6920                 bool changed;
6921
6922                 if (cp->val) {
6923                         changed = !hci_dev_test_and_set_flag(hdev,
6924                                                              HCI_SC_ENABLED);
6925                         if (cp->val == 0x02)
6926                                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6927                         else
6928                                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6929                 } else {
6930                         changed = hci_dev_test_and_clear_flag(hdev,
6931                                                               HCI_SC_ENABLED);
6932                         hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6933                 }
6934
6935                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6936                 if (err < 0)
6937                         goto failed;
6938
6939                 if (changed)
6940                         err = new_settings(hdev, sk);
6941
6942                 goto failed;
6943         }
6944
6945         val = !!cp->val;
6946
6947         if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6948             (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6949                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6950                 goto failed;
6951         }
6952
6953         cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6954         if (!cmd)
6955                 err = -ENOMEM;
6956         else
6957                 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6958                                          set_secure_conn_complete);
6959
6960         if (err < 0) {
6961                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6962                                 MGMT_STATUS_FAILED);
6963                 if (cmd)
6964                         mgmt_pending_free(cmd);
6965         }
6966
6967 failed:
6968         hci_dev_unlock(hdev);
6969         return err;
6970 }
6971
6972 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6973                           void *data, u16 len)
6974 {
6975         struct mgmt_mode *cp = data;
6976         bool changed, use_changed;
6977         int err;
6978
6979         bt_dev_dbg(hdev, "sock %p", sk);
6980
6981         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6982                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6983                                        MGMT_STATUS_INVALID_PARAMS);
6984
6985         hci_dev_lock(hdev);
6986
6987         if (cp->val)
6988                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6989         else
6990                 changed = hci_dev_test_and_clear_flag(hdev,
6991                                                       HCI_KEEP_DEBUG_KEYS);
6992
6993         if (cp->val == 0x02)
6994                 use_changed = !hci_dev_test_and_set_flag(hdev,
6995                                                          HCI_USE_DEBUG_KEYS);
6996         else
6997                 use_changed = hci_dev_test_and_clear_flag(hdev,
6998                                                           HCI_USE_DEBUG_KEYS);
6999
7000         if (hdev_is_powered(hdev) && use_changed &&
7001             hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7002                 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7003                 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7004                              sizeof(mode), &mode);
7005         }
7006
7007         err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7008         if (err < 0)
7009                 goto unlock;
7010
7011         if (changed)
7012                 err = new_settings(hdev, sk);
7013
7014 unlock:
7015         hci_dev_unlock(hdev);
7016         return err;
7017 }
7018
7019 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7020                        u16 len)
7021 {
7022         struct mgmt_cp_set_privacy *cp = cp_data;
7023         bool changed;
7024         int err;
7025
7026         bt_dev_dbg(hdev, "sock %p", sk);
7027
7028         if (!lmp_le_capable(hdev))
7029                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030                                        MGMT_STATUS_NOT_SUPPORTED);
7031
7032         if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7033                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7034                                        MGMT_STATUS_INVALID_PARAMS);
7035
7036         if (hdev_is_powered(hdev))
7037                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7038                                        MGMT_STATUS_REJECTED);
7039
7040         hci_dev_lock(hdev);
7041
7042         /* If user space supports this command it is also expected to
7043          * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7044          */
7045         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7046
7047         if (cp->privacy) {
7048                 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7049                 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7050                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7051                 hci_adv_instances_set_rpa_expired(hdev, true);
7052                 if (cp->privacy == 0x02)
7053                         hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7054                 else
7055                         hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7056         } else {
7057                 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7058                 memset(hdev->irk, 0, sizeof(hdev->irk));
7059                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7060                 hci_adv_instances_set_rpa_expired(hdev, false);
7061                 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7062         }
7063
7064         err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7065         if (err < 0)
7066                 goto unlock;
7067
7068         if (changed)
7069                 err = new_settings(hdev, sk);
7070
7071 unlock:
7072         hci_dev_unlock(hdev);
7073         return err;
7074 }
7075
7076 static bool irk_is_valid(struct mgmt_irk_info *irk)
7077 {
7078         switch (irk->addr.type) {
7079         case BDADDR_LE_PUBLIC:
7080                 return true;
7081
7082         case BDADDR_LE_RANDOM:
7083                 /* Two most significant bits shall be set */
7084                 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7085                         return false;
7086                 return true;
7087         }
7088
7089         return false;
7090 }
7091
7092 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7093                      u16 len)
7094 {
7095         struct mgmt_cp_load_irks *cp = cp_data;
7096         const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7097                                    sizeof(struct mgmt_irk_info));
7098         u16 irk_count, expected_len;
7099         int i, err;
7100
7101         bt_dev_dbg(hdev, "sock %p", sk);
7102
7103         if (!lmp_le_capable(hdev))
7104                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7105                                        MGMT_STATUS_NOT_SUPPORTED);
7106
7107         irk_count = __le16_to_cpu(cp->irk_count);
7108         if (irk_count > max_irk_count) {
7109                 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7110                            irk_count);
7111                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7112                                        MGMT_STATUS_INVALID_PARAMS);
7113         }
7114
7115         expected_len = struct_size(cp, irks, irk_count);
7116         if (expected_len != len) {
7117                 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7118                            expected_len, len);
7119                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7120                                        MGMT_STATUS_INVALID_PARAMS);
7121         }
7122
7123         bt_dev_dbg(hdev, "irk_count %u", irk_count);
7124
7125         for (i = 0; i < irk_count; i++) {
7126                 struct mgmt_irk_info *key = &cp->irks[i];
7127
7128                 if (!irk_is_valid(key))
7129                         return mgmt_cmd_status(sk, hdev->id,
7130                                                MGMT_OP_LOAD_IRKS,
7131                                                MGMT_STATUS_INVALID_PARAMS);
7132         }
7133
7134         hci_dev_lock(hdev);
7135
7136         hci_smp_irks_clear(hdev);
7137
7138         for (i = 0; i < irk_count; i++) {
7139                 struct mgmt_irk_info *irk = &cp->irks[i];
7140                 u8 addr_type = le_addr_type(irk->addr.type);
7141
7142                 if (hci_is_blocked_key(hdev,
7143                                        HCI_BLOCKED_KEY_TYPE_IRK,
7144                                        irk->val)) {
7145                         bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7146                                     &irk->addr.bdaddr);
7147                         continue;
7148                 }
7149
7150                 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7151                 if (irk->addr.type == BDADDR_BREDR)
7152                         addr_type = BDADDR_BREDR;
7153
7154                 hci_add_irk(hdev, &irk->addr.bdaddr,
7155                             addr_type, irk->val,
7156                             BDADDR_ANY);
7157         }
7158
7159         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7160
7161         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7162
7163         hci_dev_unlock(hdev);
7164
7165         return err;
7166 }
7167
7168 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7169 {
7170         if (key->initiator != 0x00 && key->initiator != 0x01)
7171                 return false;
7172
7173         switch (key->addr.type) {
7174         case BDADDR_LE_PUBLIC:
7175                 return true;
7176
7177         case BDADDR_LE_RANDOM:
7178                 /* Two most significant bits shall be set */
7179                 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7180                         return false;
7181                 return true;
7182         }
7183
7184         return false;
7185 }
7186
7187 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7188                                void *cp_data, u16 len)
7189 {
7190         struct mgmt_cp_load_long_term_keys *cp = cp_data;
7191         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7192                                    sizeof(struct mgmt_ltk_info));
7193         u16 key_count, expected_len;
7194         int i, err;
7195
7196         bt_dev_dbg(hdev, "sock %p", sk);
7197
7198         if (!lmp_le_capable(hdev))
7199                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7200                                        MGMT_STATUS_NOT_SUPPORTED);
7201
7202         key_count = __le16_to_cpu(cp->key_count);
7203         if (key_count > max_key_count) {
7204                 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7205                            key_count);
7206                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7207                                        MGMT_STATUS_INVALID_PARAMS);
7208         }
7209
7210         expected_len = struct_size(cp, keys, key_count);
7211         if (expected_len != len) {
7212                 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7213                            expected_len, len);
7214                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7215                                        MGMT_STATUS_INVALID_PARAMS);
7216         }
7217
7218         bt_dev_dbg(hdev, "key_count %u", key_count);
7219
7220         for (i = 0; i < key_count; i++) {
7221                 struct mgmt_ltk_info *key = &cp->keys[i];
7222
7223                 if (!ltk_is_valid(key))
7224                         return mgmt_cmd_status(sk, hdev->id,
7225                                                MGMT_OP_LOAD_LONG_TERM_KEYS,
7226                                                MGMT_STATUS_INVALID_PARAMS);
7227         }
7228
7229         hci_dev_lock(hdev);
7230
7231         hci_smp_ltks_clear(hdev);
7232
7233         for (i = 0; i < key_count; i++) {
7234                 struct mgmt_ltk_info *key = &cp->keys[i];
7235                 u8 type, authenticated;
7236                 u8 addr_type = le_addr_type(key->addr.type);
7237
7238                 if (hci_is_blocked_key(hdev,
7239                                        HCI_BLOCKED_KEY_TYPE_LTK,
7240                                        key->val)) {
7241                         bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7242                                     &key->addr.bdaddr);
7243                         continue;
7244                 }
7245
7246                 switch (key->type) {
7247                 case MGMT_LTK_UNAUTHENTICATED:
7248                         authenticated = 0x00;
7249                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7250                         break;
7251                 case MGMT_LTK_AUTHENTICATED:
7252                         authenticated = 0x01;
7253                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7254                         break;
7255                 case MGMT_LTK_P256_UNAUTH:
7256                         authenticated = 0x00;
7257                         type = SMP_LTK_P256;
7258                         break;
7259                 case MGMT_LTK_P256_AUTH:
7260                         authenticated = 0x01;
7261                         type = SMP_LTK_P256;
7262                         break;
7263                 case MGMT_LTK_P256_DEBUG:
7264                         authenticated = 0x00;
7265                         type = SMP_LTK_P256_DEBUG;
7266                         fallthrough;
7267                 default:
7268                         continue;
7269                 }
7270
7271                 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7272                 if (key->addr.type == BDADDR_BREDR)
7273                         addr_type = BDADDR_BREDR;
7274
7275                 hci_add_ltk(hdev, &key->addr.bdaddr,
7276                             addr_type, type, authenticated,
7277                             key->val, key->enc_size, key->ediv, key->rand);
7278         }
7279
7280         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7281                            NULL, 0);
7282
7283         hci_dev_unlock(hdev);
7284
7285         return err;
7286 }
7287
7288 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7289 {
7290         struct mgmt_pending_cmd *cmd = data;
7291         struct hci_conn *conn = cmd->user_data;
7292         struct mgmt_cp_get_conn_info *cp = cmd->param;
7293         struct mgmt_rp_get_conn_info rp;
7294         u8 status;
7295
7296         bt_dev_dbg(hdev, "err %d", err);
7297
7298         memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7299
7300         status = mgmt_status(err);
7301         if (status == MGMT_STATUS_SUCCESS) {
7302                 rp.rssi = conn->rssi;
7303                 rp.tx_power = conn->tx_power;
7304                 rp.max_tx_power = conn->max_tx_power;
7305         } else {
7306                 rp.rssi = HCI_RSSI_INVALID;
7307                 rp.tx_power = HCI_TX_POWER_INVALID;
7308                 rp.max_tx_power = HCI_TX_POWER_INVALID;
7309         }
7310
7311         mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7312                           &rp, sizeof(rp));
7313
7314         mgmt_pending_free(cmd);
7315 }
7316
7317 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7318 {
7319         struct mgmt_pending_cmd *cmd = data;
7320         struct mgmt_cp_get_conn_info *cp = cmd->param;
7321         struct hci_conn *conn;
7322         int err;
7323         __le16   handle;
7324
7325         /* Make sure we are still connected */
7326         if (cp->addr.type == BDADDR_BREDR)
7327                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7328                                                &cp->addr.bdaddr);
7329         else
7330                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7331
7332         if (!conn || conn->state != BT_CONNECTED)
7333                 return MGMT_STATUS_NOT_CONNECTED;
7334
7335         cmd->user_data = conn;
7336         handle = cpu_to_le16(conn->handle);
7337
7338         /* Refresh RSSI each time */
7339         err = hci_read_rssi_sync(hdev, handle);
7340
7341         /* For LE links TX power does not change thus we don't need to
7342          * query for it once value is known.
7343          */
7344         if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7345                      conn->tx_power == HCI_TX_POWER_INVALID))
7346                 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7347
7348         /* Max TX power needs to be read only once per connection */
7349         if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7350                 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7351
7352         return err;
7353 }
7354
7355 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7356                          u16 len)
7357 {
7358         struct mgmt_cp_get_conn_info *cp = data;
7359         struct mgmt_rp_get_conn_info rp;
7360         struct hci_conn *conn;
7361         unsigned long conn_info_age;
7362         int err = 0;
7363
7364         bt_dev_dbg(hdev, "sock %p", sk);
7365
7366         memset(&rp, 0, sizeof(rp));
7367         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7368         rp.addr.type = cp->addr.type;
7369
7370         if (!bdaddr_type_is_valid(cp->addr.type))
7371                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7372                                          MGMT_STATUS_INVALID_PARAMS,
7373                                          &rp, sizeof(rp));
7374
7375         hci_dev_lock(hdev);
7376
7377         if (!hdev_is_powered(hdev)) {
7378                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7379                                         MGMT_STATUS_NOT_POWERED, &rp,
7380                                         sizeof(rp));
7381                 goto unlock;
7382         }
7383
7384         if (cp->addr.type == BDADDR_BREDR)
7385                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7386                                                &cp->addr.bdaddr);
7387         else
7388                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7389
7390         if (!conn || conn->state != BT_CONNECTED) {
7391                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7392                                         MGMT_STATUS_NOT_CONNECTED, &rp,
7393                                         sizeof(rp));
7394                 goto unlock;
7395         }
7396
7397         /* To avoid client trying to guess when to poll again for information we
7398          * calculate conn info age as random value between min/max set in hdev.
7399          */
7400         conn_info_age = hdev->conn_info_min_age +
7401                         prandom_u32_max(hdev->conn_info_max_age -
7402                                         hdev->conn_info_min_age);
7403
7404         /* Query controller to refresh cached values if they are too old or were
7405          * never read.
7406          */
7407         if (time_after(jiffies, conn->conn_info_timestamp +
7408                        msecs_to_jiffies(conn_info_age)) ||
7409             !conn->conn_info_timestamp) {
7410                 struct mgmt_pending_cmd *cmd;
7411
7412                 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7413                                        len);
7414                 if (!cmd) {
7415                         err = -ENOMEM;
7416                 } else {
7417                         err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7418                                                  cmd, get_conn_info_complete);
7419                 }
7420
7421                 if (err < 0) {
7422                         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7423                                           MGMT_STATUS_FAILED, &rp, sizeof(rp));
7424
7425                         if (cmd)
7426                                 mgmt_pending_free(cmd);
7427
7428                         goto unlock;
7429                 }
7430
7431                 conn->conn_info_timestamp = jiffies;
7432         } else {
7433                 /* Cache is valid, just reply with values cached in hci_conn */
7434                 rp.rssi = conn->rssi;
7435                 rp.tx_power = conn->tx_power;
7436                 rp.max_tx_power = conn->max_tx_power;
7437
7438                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7439                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7440         }
7441
7442 unlock:
7443         hci_dev_unlock(hdev);
7444         return err;
7445 }
7446
7447 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7448 {
7449         struct mgmt_pending_cmd *cmd = data;
7450         struct mgmt_cp_get_clock_info *cp = cmd->param;
7451         struct mgmt_rp_get_clock_info rp;
7452         struct hci_conn *conn = cmd->user_data;
7453         u8 status = mgmt_status(err);
7454
7455         bt_dev_dbg(hdev, "err %d", err);
7456
7457         memset(&rp, 0, sizeof(rp));
7458         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7459         rp.addr.type = cp->addr.type;
7460
7461         if (err)
7462                 goto complete;
7463
7464         rp.local_clock = cpu_to_le32(hdev->clock);
7465
7466         if (conn) {
7467                 rp.piconet_clock = cpu_to_le32(conn->clock);
7468                 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7469         }
7470
7471 complete:
7472         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7473                           sizeof(rp));
7474
7475         mgmt_pending_free(cmd);
7476 }
7477
7478 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7479 {
7480         struct mgmt_pending_cmd *cmd = data;
7481         struct mgmt_cp_get_clock_info *cp = cmd->param;
7482         struct hci_cp_read_clock hci_cp;
7483         struct hci_conn *conn;
7484
7485         memset(&hci_cp, 0, sizeof(hci_cp));
7486         hci_read_clock_sync(hdev, &hci_cp);
7487
7488         /* Make sure connection still exists */
7489         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7490         if (!conn || conn->state != BT_CONNECTED)
7491                 return MGMT_STATUS_NOT_CONNECTED;
7492
7493         cmd->user_data = conn;
7494         hci_cp.handle = cpu_to_le16(conn->handle);
7495         hci_cp.which = 0x01; /* Piconet clock */
7496
7497         return hci_read_clock_sync(hdev, &hci_cp);
7498 }
7499
7500 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7501                                                                 u16 len)
7502 {
7503         struct mgmt_cp_get_clock_info *cp = data;
7504         struct mgmt_rp_get_clock_info rp;
7505         struct mgmt_pending_cmd *cmd;
7506         struct hci_conn *conn;
7507         int err;
7508
7509         bt_dev_dbg(hdev, "sock %p", sk);
7510
7511         memset(&rp, 0, sizeof(rp));
7512         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7513         rp.addr.type = cp->addr.type;
7514
7515         if (cp->addr.type != BDADDR_BREDR)
7516                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7517                                          MGMT_STATUS_INVALID_PARAMS,
7518                                          &rp, sizeof(rp));
7519
7520         hci_dev_lock(hdev);
7521
7522         if (!hdev_is_powered(hdev)) {
7523                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7524                                         MGMT_STATUS_NOT_POWERED, &rp,
7525                                         sizeof(rp));
7526                 goto unlock;
7527         }
7528
7529         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7530                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7531                                                &cp->addr.bdaddr);
7532                 if (!conn || conn->state != BT_CONNECTED) {
7533                         err = mgmt_cmd_complete(sk, hdev->id,
7534                                                 MGMT_OP_GET_CLOCK_INFO,
7535                                                 MGMT_STATUS_NOT_CONNECTED,
7536                                                 &rp, sizeof(rp));
7537                         goto unlock;
7538                 }
7539         } else {
7540                 conn = NULL;
7541         }
7542
7543         cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7544         if (!cmd)
7545                 err = -ENOMEM;
7546         else
7547                 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7548                                          get_clock_info_complete);
7549
7550         if (err < 0) {
7551                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7552                                         MGMT_STATUS_FAILED, &rp, sizeof(rp));
7553
7554                 if (cmd)
7555                         mgmt_pending_free(cmd);
7556         }
7557
7558
7559 unlock:
7560         hci_dev_unlock(hdev);
7561         return err;
7562 }
7563
7564 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7565 {
7566         struct hci_conn *conn;
7567
7568         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7569         if (!conn)
7570                 return false;
7571
7572         if (conn->dst_type != type)
7573                 return false;
7574
7575         if (conn->state != BT_CONNECTED)
7576                 return false;
7577
7578         return true;
7579 }
7580
7581 /* This function requires the caller holds hdev->lock */
7582 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7583                                u8 addr_type, u8 auto_connect)
7584 {
7585         struct hci_conn_params *params;
7586
7587         params = hci_conn_params_add(hdev, addr, addr_type);
7588         if (!params)
7589                 return -EIO;
7590
7591         if (params->auto_connect == auto_connect)
7592                 return 0;
7593
7594         hci_pend_le_list_del_init(params);
7595
7596         switch (auto_connect) {
7597         case HCI_AUTO_CONN_DISABLED:
7598         case HCI_AUTO_CONN_LINK_LOSS:
7599                 /* If auto connect is being disabled when we're trying to
7600                  * connect to device, keep connecting.
7601                  */
7602                 if (params->explicit_connect)
7603                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
7604                 break;
7605         case HCI_AUTO_CONN_REPORT:
7606                 if (params->explicit_connect)
7607                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
7608                 else
7609                         hci_pend_le_list_add(params, &hdev->pend_le_reports);
7610                 break;
7611         case HCI_AUTO_CONN_DIRECT:
7612         case HCI_AUTO_CONN_ALWAYS:
7613                 if (!is_connected(hdev, addr, addr_type))
7614                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
7615                 break;
7616         }
7617
7618         params->auto_connect = auto_connect;
7619
7620         bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7621                    addr, addr_type, auto_connect);
7622
7623         return 0;
7624 }
7625
7626 static void device_added(struct sock *sk, struct hci_dev *hdev,
7627                          bdaddr_t *bdaddr, u8 type, u8 action)
7628 {
7629         struct mgmt_ev_device_added ev;
7630
7631         bacpy(&ev.addr.bdaddr, bdaddr);
7632         ev.addr.type = type;
7633         ev.action = action;
7634
7635         mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7636 }
7637
7638 static int add_device_sync(struct hci_dev *hdev, void *data)
7639 {
7640         return hci_update_passive_scan_sync(hdev);
7641 }
7642
7643 static int add_device(struct sock *sk, struct hci_dev *hdev,
7644                       void *data, u16 len)
7645 {
7646         struct mgmt_cp_add_device *cp = data;
7647         u8 auto_conn, addr_type;
7648         struct hci_conn_params *params;
7649         int err;
7650         u32 current_flags = 0;
7651         u32 supported_flags;
7652
7653         bt_dev_dbg(hdev, "sock %p", sk);
7654
7655         if (!bdaddr_type_is_valid(cp->addr.type) ||
7656             !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7657                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7658                                          MGMT_STATUS_INVALID_PARAMS,
7659                                          &cp->addr, sizeof(cp->addr));
7660
7661         if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7662                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7663                                          MGMT_STATUS_INVALID_PARAMS,
7664                                          &cp->addr, sizeof(cp->addr));
7665
7666         hci_dev_lock(hdev);
7667
7668         if (cp->addr.type == BDADDR_BREDR) {
7669                 /* Only incoming connections action is supported for now */
7670                 if (cp->action != 0x01) {
7671                         err = mgmt_cmd_complete(sk, hdev->id,
7672                                                 MGMT_OP_ADD_DEVICE,
7673                                                 MGMT_STATUS_INVALID_PARAMS,
7674                                                 &cp->addr, sizeof(cp->addr));
7675                         goto unlock;
7676                 }
7677
7678                 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7679                                                      &cp->addr.bdaddr,
7680                                                      cp->addr.type, 0);
7681                 if (err)
7682                         goto unlock;
7683
7684                 hci_update_scan(hdev);
7685
7686                 goto added;
7687         }
7688
7689         addr_type = le_addr_type(cp->addr.type);
7690
7691         if (cp->action == 0x02)
7692                 auto_conn = HCI_AUTO_CONN_ALWAYS;
7693         else if (cp->action == 0x01)
7694                 auto_conn = HCI_AUTO_CONN_DIRECT;
7695         else
7696                 auto_conn = HCI_AUTO_CONN_REPORT;
7697
7698         /* Kernel internally uses conn_params with resolvable private
7699          * address, but Add Device allows only identity addresses.
7700          * Make sure it is enforced before calling
7701          * hci_conn_params_lookup.
7702          */
7703         if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7704                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7705                                         MGMT_STATUS_INVALID_PARAMS,
7706                                         &cp->addr, sizeof(cp->addr));
7707                 goto unlock;
7708         }
7709
7710         /* If the connection parameters don't exist for this device,
7711          * they will be created and configured with defaults.
7712          */
7713         if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7714                                 auto_conn) < 0) {
7715                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7716                                         MGMT_STATUS_FAILED, &cp->addr,
7717                                         sizeof(cp->addr));
7718                 goto unlock;
7719         } else {
7720                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7721                                                 addr_type);
7722                 if (params)
7723                         current_flags = params->flags;
7724         }
7725
7726         err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7727         if (err < 0)
7728                 goto unlock;
7729
7730 added:
7731         device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7732         supported_flags = hdev->conn_flags;
7733         device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7734                              supported_flags, current_flags);
7735
7736         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7737                                 MGMT_STATUS_SUCCESS, &cp->addr,
7738                                 sizeof(cp->addr));
7739
7740 unlock:
7741         hci_dev_unlock(hdev);
7742         return err;
7743 }
7744
7745 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7746                            bdaddr_t *bdaddr, u8 type)
7747 {
7748         struct mgmt_ev_device_removed ev;
7749
7750         bacpy(&ev.addr.bdaddr, bdaddr);
7751         ev.addr.type = type;
7752
7753         mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7754 }
7755
7756 static int remove_device_sync(struct hci_dev *hdev, void *data)
7757 {
7758         return hci_update_passive_scan_sync(hdev);
7759 }
7760
7761 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7762                          void *data, u16 len)
7763 {
7764         struct mgmt_cp_remove_device *cp = data;
7765         int err;
7766
7767         bt_dev_dbg(hdev, "sock %p", sk);
7768
7769         hci_dev_lock(hdev);
7770
7771         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7772                 struct hci_conn_params *params;
7773                 u8 addr_type;
7774
7775                 if (!bdaddr_type_is_valid(cp->addr.type)) {
7776                         err = mgmt_cmd_complete(sk, hdev->id,
7777                                                 MGMT_OP_REMOVE_DEVICE,
7778                                                 MGMT_STATUS_INVALID_PARAMS,
7779                                                 &cp->addr, sizeof(cp->addr));
7780                         goto unlock;
7781                 }
7782
7783                 if (cp->addr.type == BDADDR_BREDR) {
7784                         err = hci_bdaddr_list_del(&hdev->accept_list,
7785                                                   &cp->addr.bdaddr,
7786                                                   cp->addr.type);
7787                         if (err) {
7788                                 err = mgmt_cmd_complete(sk, hdev->id,
7789                                                         MGMT_OP_REMOVE_DEVICE,
7790                                                         MGMT_STATUS_INVALID_PARAMS,
7791                                                         &cp->addr,
7792                                                         sizeof(cp->addr));
7793                                 goto unlock;
7794                         }
7795
7796                         hci_update_scan(hdev);
7797
7798                         device_removed(sk, hdev, &cp->addr.bdaddr,
7799                                        cp->addr.type);
7800                         goto complete;
7801                 }
7802
7803                 addr_type = le_addr_type(cp->addr.type);
7804
7805                 /* Kernel internally uses conn_params with resolvable private
7806                  * address, but Remove Device allows only identity addresses.
7807                  * Make sure it is enforced before calling
7808                  * hci_conn_params_lookup.
7809                  */
7810                 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7811                         err = mgmt_cmd_complete(sk, hdev->id,
7812                                                 MGMT_OP_REMOVE_DEVICE,
7813                                                 MGMT_STATUS_INVALID_PARAMS,
7814                                                 &cp->addr, sizeof(cp->addr));
7815                         goto unlock;
7816                 }
7817
7818                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7819                                                 addr_type);
7820                 if (!params) {
7821                         err = mgmt_cmd_complete(sk, hdev->id,
7822                                                 MGMT_OP_REMOVE_DEVICE,
7823                                                 MGMT_STATUS_INVALID_PARAMS,
7824                                                 &cp->addr, sizeof(cp->addr));
7825                         goto unlock;
7826                 }
7827
7828                 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7829                     params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7830                         err = mgmt_cmd_complete(sk, hdev->id,
7831                                                 MGMT_OP_REMOVE_DEVICE,
7832                                                 MGMT_STATUS_INVALID_PARAMS,
7833                                                 &cp->addr, sizeof(cp->addr));
7834                         goto unlock;
7835                 }
7836
7837                 hci_conn_params_free(params);
7838
7839                 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7840         } else {
7841                 struct hci_conn_params *p, *tmp;
7842                 struct bdaddr_list *b, *btmp;
7843
7844                 if (cp->addr.type) {
7845                         err = mgmt_cmd_complete(sk, hdev->id,
7846                                                 MGMT_OP_REMOVE_DEVICE,
7847                                                 MGMT_STATUS_INVALID_PARAMS,
7848                                                 &cp->addr, sizeof(cp->addr));
7849                         goto unlock;
7850                 }
7851
7852                 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7853                         device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7854                         list_del(&b->list);
7855                         kfree(b);
7856                 }
7857
7858                 hci_update_scan(hdev);
7859
7860                 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7861                         if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7862                                 continue;
7863                         device_removed(sk, hdev, &p->addr, p->addr_type);
7864                         if (p->explicit_connect) {
7865                                 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7866                                 continue;
7867                         }
7868                         hci_conn_params_free(p);
7869                 }
7870
7871                 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7872         }
7873
7874         hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7875
7876 complete:
7877         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7878                                 MGMT_STATUS_SUCCESS, &cp->addr,
7879                                 sizeof(cp->addr));
7880 unlock:
7881         hci_dev_unlock(hdev);
7882         return err;
7883 }
7884
7885 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7886                            u16 len)
7887 {
7888         struct mgmt_cp_load_conn_param *cp = data;
7889         const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7890                                      sizeof(struct mgmt_conn_param));
7891         u16 param_count, expected_len;
7892         int i;
7893
7894         if (!lmp_le_capable(hdev))
7895                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7896                                        MGMT_STATUS_NOT_SUPPORTED);
7897
7898         param_count = __le16_to_cpu(cp->param_count);
7899         if (param_count > max_param_count) {
7900                 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7901                            param_count);
7902                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7903                                        MGMT_STATUS_INVALID_PARAMS);
7904         }
7905
7906         expected_len = struct_size(cp, params, param_count);
7907         if (expected_len != len) {
7908                 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7909                            expected_len, len);
7910                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7911                                        MGMT_STATUS_INVALID_PARAMS);
7912         }
7913
7914         bt_dev_dbg(hdev, "param_count %u", param_count);
7915
7916         hci_dev_lock(hdev);
7917
7918         hci_conn_params_clear_disabled(hdev);
7919
7920         for (i = 0; i < param_count; i++) {
7921                 struct mgmt_conn_param *param = &cp->params[i];
7922                 struct hci_conn_params *hci_param;
7923                 u16 min, max, latency, timeout;
7924                 u8 addr_type;
7925
7926                 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7927                            param->addr.type);
7928
7929                 if (param->addr.type == BDADDR_LE_PUBLIC) {
7930                         addr_type = ADDR_LE_DEV_PUBLIC;
7931                 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7932                         addr_type = ADDR_LE_DEV_RANDOM;
7933                 } else {
7934                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7935                         continue;
7936                 }
7937
7938                 min = le16_to_cpu(param->min_interval);
7939                 max = le16_to_cpu(param->max_interval);
7940                 latency = le16_to_cpu(param->latency);
7941                 timeout = le16_to_cpu(param->timeout);
7942
7943                 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7944                            min, max, latency, timeout);
7945
7946                 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7947                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7948                         continue;
7949                 }
7950
7951                 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7952                                                 addr_type);
7953                 if (!hci_param) {
7954                         bt_dev_err(hdev, "failed to add connection parameters");
7955                         continue;
7956                 }
7957
7958                 hci_param->conn_min_interval = min;
7959                 hci_param->conn_max_interval = max;
7960                 hci_param->conn_latency = latency;
7961                 hci_param->supervision_timeout = timeout;
7962         }
7963
7964         hci_dev_unlock(hdev);
7965
7966         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7967                                  NULL, 0);
7968 }
7969
7970 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7971                                void *data, u16 len)
7972 {
7973         struct mgmt_cp_set_external_config *cp = data;
7974         bool changed;
7975         int err;
7976
7977         bt_dev_dbg(hdev, "sock %p", sk);
7978
7979         if (hdev_is_powered(hdev))
7980                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7981                                        MGMT_STATUS_REJECTED);
7982
7983         if (cp->config != 0x00 && cp->config != 0x01)
7984                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7985                                          MGMT_STATUS_INVALID_PARAMS);
7986
7987         if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7988                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7989                                        MGMT_STATUS_NOT_SUPPORTED);
7990
7991         hci_dev_lock(hdev);
7992
7993         if (cp->config)
7994                 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7995         else
7996                 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7997
7998         err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7999         if (err < 0)
8000                 goto unlock;
8001
8002         if (!changed)
8003                 goto unlock;
8004
8005         err = new_options(hdev, sk);
8006
8007         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8008                 mgmt_index_removed(hdev);
8009
8010                 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8011                         hci_dev_set_flag(hdev, HCI_CONFIG);
8012                         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8013
8014                         queue_work(hdev->req_workqueue, &hdev->power_on);
8015                 } else {
8016                         set_bit(HCI_RAW, &hdev->flags);
8017                         mgmt_index_added(hdev);
8018                 }
8019         }
8020
8021 unlock:
8022         hci_dev_unlock(hdev);
8023         return err;
8024 }
8025
8026 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8027                               void *data, u16 len)
8028 {
8029         struct mgmt_cp_set_public_address *cp = data;
8030         bool changed;
8031         int err;
8032
8033         bt_dev_dbg(hdev, "sock %p", sk);
8034
8035         if (hdev_is_powered(hdev))
8036                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8037                                        MGMT_STATUS_REJECTED);
8038
8039         if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8040                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8041                                        MGMT_STATUS_INVALID_PARAMS);
8042
8043         if (!hdev->set_bdaddr)
8044                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8045                                        MGMT_STATUS_NOT_SUPPORTED);
8046
8047         hci_dev_lock(hdev);
8048
8049         changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8050         bacpy(&hdev->public_addr, &cp->bdaddr);
8051
8052         err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8053         if (err < 0)
8054                 goto unlock;
8055
8056         if (!changed)
8057                 goto unlock;
8058
8059         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8060                 err = new_options(hdev, sk);
8061
8062         if (is_configured(hdev)) {
8063                 mgmt_index_removed(hdev);
8064
8065                 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8066
8067                 hci_dev_set_flag(hdev, HCI_CONFIG);
8068                 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8069
8070                 queue_work(hdev->req_workqueue, &hdev->power_on);
8071         }
8072
8073 unlock:
8074         hci_dev_unlock(hdev);
8075         return err;
8076 }
8077
8078 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8079                                              int err)
8080 {
8081         const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8082         struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8083         u8 *h192, *r192, *h256, *r256;
8084         struct mgmt_pending_cmd *cmd = data;
8085         struct sk_buff *skb = cmd->skb;
8086         u8 status = mgmt_status(err);
8087         u16 eir_len;
8088
8089         if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8090                 return;
8091
8092         if (!status) {
8093                 if (!skb)
8094                         status = MGMT_STATUS_FAILED;
8095                 else if (IS_ERR(skb))
8096                         status = mgmt_status(PTR_ERR(skb));
8097                 else
8098                         status = mgmt_status(skb->data[0]);
8099         }
8100
8101         bt_dev_dbg(hdev, "status %u", status);
8102
8103         mgmt_cp = cmd->param;
8104
8105         if (status) {
8106                 status = mgmt_status(status);
8107                 eir_len = 0;
8108
8109                 h192 = NULL;
8110                 r192 = NULL;
8111                 h256 = NULL;
8112                 r256 = NULL;
8113         } else if (!bredr_sc_enabled(hdev)) {
8114                 struct hci_rp_read_local_oob_data *rp;
8115
8116                 if (skb->len != sizeof(*rp)) {
8117                         status = MGMT_STATUS_FAILED;
8118                         eir_len = 0;
8119                 } else {
8120                         status = MGMT_STATUS_SUCCESS;
8121                         rp = (void *)skb->data;
8122
8123                         eir_len = 5 + 18 + 18;
8124                         h192 = rp->hash;
8125                         r192 = rp->rand;
8126                         h256 = NULL;
8127                         r256 = NULL;
8128                 }
8129         } else {
8130                 struct hci_rp_read_local_oob_ext_data *rp;
8131
8132                 if (skb->len != sizeof(*rp)) {
8133                         status = MGMT_STATUS_FAILED;
8134                         eir_len = 0;
8135                 } else {
8136                         status = MGMT_STATUS_SUCCESS;
8137                         rp = (void *)skb->data;
8138
8139                         if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8140                                 eir_len = 5 + 18 + 18;
8141                                 h192 = NULL;
8142                                 r192 = NULL;
8143                         } else {
8144                                 eir_len = 5 + 18 + 18 + 18 + 18;
8145                                 h192 = rp->hash192;
8146                                 r192 = rp->rand192;
8147                         }
8148
8149                         h256 = rp->hash256;
8150                         r256 = rp->rand256;
8151                 }
8152         }
8153
8154         mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8155         if (!mgmt_rp)
8156                 goto done;
8157
8158         if (eir_len == 0)
8159                 goto send_rsp;
8160
8161         eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8162                                   hdev->dev_class, 3);
8163
8164         if (h192 && r192) {
8165                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8166                                           EIR_SSP_HASH_C192, h192, 16);
8167                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8168                                           EIR_SSP_RAND_R192, r192, 16);
8169         }
8170
8171         if (h256 && r256) {
8172                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8173                                           EIR_SSP_HASH_C256, h256, 16);
8174                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8175                                           EIR_SSP_RAND_R256, r256, 16);
8176         }
8177
8178 send_rsp:
8179         mgmt_rp->type = mgmt_cp->type;
8180         mgmt_rp->eir_len = cpu_to_le16(eir_len);
8181
8182         err = mgmt_cmd_complete(cmd->sk, hdev->id,
8183                                 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8184                                 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8185         if (err < 0 || status)
8186                 goto done;
8187
8188         hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8189
8190         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8191                                  mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8192                                  HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8193 done:
8194         if (skb && !IS_ERR(skb))
8195                 kfree_skb(skb);
8196
8197         kfree(mgmt_rp);
8198         mgmt_pending_remove(cmd);
8199 }
8200
8201 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8202                                   struct mgmt_cp_read_local_oob_ext_data *cp)
8203 {
8204         struct mgmt_pending_cmd *cmd;
8205         int err;
8206
8207         cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8208                                cp, sizeof(*cp));
8209         if (!cmd)
8210                 return -ENOMEM;
8211
8212         err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8213                                  read_local_oob_ext_data_complete);
8214
8215         if (err < 0) {
8216                 mgmt_pending_remove(cmd);
8217                 return err;
8218         }
8219
8220         return 0;
8221 }
8222
8223 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8224                                    void *data, u16 data_len)
8225 {
8226         struct mgmt_cp_read_local_oob_ext_data *cp = data;
8227         struct mgmt_rp_read_local_oob_ext_data *rp;
8228         size_t rp_len;
8229         u16 eir_len;
8230         u8 status, flags, role, addr[7], hash[16], rand[16];
8231         int err;
8232
8233         bt_dev_dbg(hdev, "sock %p", sk);
8234
8235         if (hdev_is_powered(hdev)) {
8236                 switch (cp->type) {
8237                 case BIT(BDADDR_BREDR):
8238                         status = mgmt_bredr_support(hdev);
8239                         if (status)
8240                                 eir_len = 0;
8241                         else
8242                                 eir_len = 5;
8243                         break;
8244                 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8245                         status = mgmt_le_support(hdev);
8246                         if (status)
8247                                 eir_len = 0;
8248                         else
8249                                 eir_len = 9 + 3 + 18 + 18 + 3;
8250                         break;
8251                 default:
8252                         status = MGMT_STATUS_INVALID_PARAMS;
8253                         eir_len = 0;
8254                         break;
8255                 }
8256         } else {
8257                 status = MGMT_STATUS_NOT_POWERED;
8258                 eir_len = 0;
8259         }
8260
8261         rp_len = sizeof(*rp) + eir_len;
8262         rp = kmalloc(rp_len, GFP_ATOMIC);
8263         if (!rp)
8264                 return -ENOMEM;
8265
8266         if (!status && !lmp_ssp_capable(hdev)) {
8267                 status = MGMT_STATUS_NOT_SUPPORTED;
8268                 eir_len = 0;
8269         }
8270
8271         if (status)
8272                 goto complete;
8273
8274         hci_dev_lock(hdev);
8275
8276         eir_len = 0;
8277         switch (cp->type) {
8278         case BIT(BDADDR_BREDR):
8279                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8280                         err = read_local_ssp_oob_req(hdev, sk, cp);
8281                         hci_dev_unlock(hdev);
8282                         if (!err)
8283                                 goto done;
8284
8285                         status = MGMT_STATUS_FAILED;
8286                         goto complete;
8287                 } else {
8288                         eir_len = eir_append_data(rp->eir, eir_len,
8289                                                   EIR_CLASS_OF_DEV,
8290                                                   hdev->dev_class, 3);
8291                 }
8292                 break;
8293         case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8294                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8295                     smp_generate_oob(hdev, hash, rand) < 0) {
8296                         hci_dev_unlock(hdev);
8297                         status = MGMT_STATUS_FAILED;
8298                         goto complete;
8299                 }
8300
8301                 /* This should return the active RPA, but since the RPA
8302                  * is only programmed on demand, it is really hard to fill
8303                  * this in at the moment. For now disallow retrieving
8304                  * local out-of-band data when privacy is in use.
8305                  *
8306                  * Returning the identity address will not help here since
8307                  * pairing happens before the identity resolving key is
8308                  * known and thus the connection establishment happens
8309                  * based on the RPA and not the identity address.
8310                  */
8311                 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8312                         hci_dev_unlock(hdev);
8313                         status = MGMT_STATUS_REJECTED;
8314                         goto complete;
8315                 }
8316
8317                 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8318                    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8319                    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8320                     bacmp(&hdev->static_addr, BDADDR_ANY))) {
8321                         memcpy(addr, &hdev->static_addr, 6);
8322                         addr[6] = 0x01;
8323                 } else {
8324                         memcpy(addr, &hdev->bdaddr, 6);
8325                         addr[6] = 0x00;
8326                 }
8327
8328                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8329                                           addr, sizeof(addr));
8330
8331                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8332                         role = 0x02;
8333                 else
8334                         role = 0x01;
8335
8336                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8337                                           &role, sizeof(role));
8338
8339                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8340                         eir_len = eir_append_data(rp->eir, eir_len,
8341                                                   EIR_LE_SC_CONFIRM,
8342                                                   hash, sizeof(hash));
8343
8344                         eir_len = eir_append_data(rp->eir, eir_len,
8345                                                   EIR_LE_SC_RANDOM,
8346                                                   rand, sizeof(rand));
8347                 }
8348
8349                 flags = mgmt_get_adv_discov_flags(hdev);
8350
8351                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8352                         flags |= LE_AD_NO_BREDR;
8353
8354                 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8355                                           &flags, sizeof(flags));
8356                 break;
8357         }
8358
8359         hci_dev_unlock(hdev);
8360
8361         hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8362
8363         status = MGMT_STATUS_SUCCESS;
8364
8365 complete:
8366         rp->type = cp->type;
8367         rp->eir_len = cpu_to_le16(eir_len);
8368
8369         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8370                                 status, rp, sizeof(*rp) + eir_len);
8371         if (err < 0 || status)
8372                 goto done;
8373
8374         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8375                                  rp, sizeof(*rp) + eir_len,
8376                                  HCI_MGMT_OOB_DATA_EVENTS, sk);
8377
8378 done:
8379         kfree(rp);
8380
8381         return err;
8382 }
8383
8384 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8385 {
8386         u32 flags = 0;
8387
8388         flags |= MGMT_ADV_FLAG_CONNECTABLE;
8389         flags |= MGMT_ADV_FLAG_DISCOV;
8390         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8391         flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8392         flags |= MGMT_ADV_FLAG_APPEARANCE;
8393         flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8394         flags |= MGMT_ADV_PARAM_DURATION;
8395         flags |= MGMT_ADV_PARAM_TIMEOUT;
8396         flags |= MGMT_ADV_PARAM_INTERVALS;
8397         flags |= MGMT_ADV_PARAM_TX_POWER;
8398         flags |= MGMT_ADV_PARAM_SCAN_RSP;
8399
8400         /* In extended adv TX_POWER returned from Set Adv Param
8401          * will be always valid.
8402          */
8403         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8404                 flags |= MGMT_ADV_FLAG_TX_POWER;
8405
8406         if (ext_adv_capable(hdev)) {
8407                 flags |= MGMT_ADV_FLAG_SEC_1M;
8408                 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8409                 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8410
8411                 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8412                         flags |= MGMT_ADV_FLAG_SEC_2M;
8413
8414                 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8415                         flags |= MGMT_ADV_FLAG_SEC_CODED;
8416         }
8417
8418         return flags;
8419 }
8420
8421 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8422                              void *data, u16 data_len)
8423 {
8424         struct mgmt_rp_read_adv_features *rp;
8425         size_t rp_len;
8426         int err;
8427         struct adv_info *adv_instance;
8428         u32 supported_flags;
8429         u8 *instance;
8430
8431         bt_dev_dbg(hdev, "sock %p", sk);
8432
8433         if (!lmp_le_capable(hdev))
8434                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8435                                        MGMT_STATUS_REJECTED);
8436
8437         hci_dev_lock(hdev);
8438
8439         rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8440         rp = kmalloc(rp_len, GFP_ATOMIC);
8441         if (!rp) {
8442                 hci_dev_unlock(hdev);
8443                 return -ENOMEM;
8444         }
8445
8446         supported_flags = get_supported_adv_flags(hdev);
8447
8448         rp->supported_flags = cpu_to_le32(supported_flags);
8449         rp->max_adv_data_len = max_adv_len(hdev);
8450         rp->max_scan_rsp_len = max_adv_len(hdev);
8451         rp->max_instances = hdev->le_num_of_adv_sets;
8452         rp->num_instances = hdev->adv_instance_cnt;
8453
8454         instance = rp->instance;
8455         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8456                 /* Only instances 1-le_num_of_adv_sets are externally visible */
8457                 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8458                         *instance = adv_instance->instance;
8459                         instance++;
8460                 } else {
8461                         rp->num_instances--;
8462                         rp_len--;
8463                 }
8464         }
8465
8466         hci_dev_unlock(hdev);
8467
8468         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8469                                 MGMT_STATUS_SUCCESS, rp, rp_len);
8470
8471         kfree(rp);
8472
8473         return err;
8474 }
8475
8476 static u8 calculate_name_len(struct hci_dev *hdev)
8477 {
8478         u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8479
8480         return eir_append_local_name(hdev, buf, 0);
8481 }
8482
8483 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8484                            bool is_adv_data)
8485 {
8486         u8 max_len = max_adv_len(hdev);
8487
8488         if (is_adv_data) {
8489                 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8490                                  MGMT_ADV_FLAG_LIMITED_DISCOV |
8491                                  MGMT_ADV_FLAG_MANAGED_FLAGS))
8492                         max_len -= 3;
8493
8494                 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8495                         max_len -= 3;
8496         } else {
8497                 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8498                         max_len -= calculate_name_len(hdev);
8499
8500                 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8501                         max_len -= 4;
8502         }
8503
8504         return max_len;
8505 }
8506
8507 static bool flags_managed(u32 adv_flags)
8508 {
8509         return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8510                             MGMT_ADV_FLAG_LIMITED_DISCOV |
8511                             MGMT_ADV_FLAG_MANAGED_FLAGS);
8512 }
8513
8514 static bool tx_power_managed(u32 adv_flags)
8515 {
8516         return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8517 }
8518
8519 static bool name_managed(u32 adv_flags)
8520 {
8521         return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8522 }
8523
8524 static bool appearance_managed(u32 adv_flags)
8525 {
8526         return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8527 }
8528
8529 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8530                               u8 len, bool is_adv_data)
8531 {
8532         int i, cur_len;
8533         u8 max_len;
8534
8535         max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8536
8537         if (len > max_len)
8538                 return false;
8539
8540         /* Make sure that the data is correctly formatted. */
8541         for (i = 0; i < len; i += (cur_len + 1)) {
8542                 cur_len = data[i];
8543
8544                 if (!cur_len)
8545                         continue;
8546
8547                 if (data[i + 1] == EIR_FLAGS &&
8548                     (!is_adv_data || flags_managed(adv_flags)))
8549                         return false;
8550
8551                 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8552                         return false;
8553
8554                 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8555                         return false;
8556
8557                 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8558                         return false;
8559
8560                 if (data[i + 1] == EIR_APPEARANCE &&
8561                     appearance_managed(adv_flags))
8562                         return false;
8563
8564                 /* If the current field length would exceed the total data
8565                  * length, then it's invalid.
8566                  */
8567                 if (i + cur_len >= len)
8568                         return false;
8569         }
8570
8571         return true;
8572 }
8573
8574 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8575 {
8576         u32 supported_flags, phy_flags;
8577
8578         /* The current implementation only supports a subset of the specified
8579          * flags. Also need to check mutual exclusiveness of sec flags.
8580          */
8581         supported_flags = get_supported_adv_flags(hdev);
8582         phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8583         if (adv_flags & ~supported_flags ||
8584             ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8585                 return false;
8586
8587         return true;
8588 }
8589
8590 static bool adv_busy(struct hci_dev *hdev)
8591 {
8592         return pending_find(MGMT_OP_SET_LE, hdev);
8593 }
8594
8595 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8596                              int err)
8597 {
8598         struct adv_info *adv, *n;
8599
8600         bt_dev_dbg(hdev, "err %d", err);
8601
8602         hci_dev_lock(hdev);
8603
8604         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8605                 u8 instance;
8606
8607                 if (!adv->pending)
8608                         continue;
8609
8610                 if (!err) {
8611                         adv->pending = false;
8612                         continue;
8613                 }
8614
8615                 instance = adv->instance;
8616
8617                 if (hdev->cur_adv_instance == instance)
8618                         cancel_adv_timeout(hdev);
8619
8620                 hci_remove_adv_instance(hdev, instance);
8621                 mgmt_advertising_removed(sk, hdev, instance);
8622         }
8623
8624         hci_dev_unlock(hdev);
8625 }
8626
8627 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8628 {
8629         struct mgmt_pending_cmd *cmd = data;
8630         struct mgmt_cp_add_advertising *cp = cmd->param;
8631         struct mgmt_rp_add_advertising rp;
8632
8633         memset(&rp, 0, sizeof(rp));
8634
8635         rp.instance = cp->instance;
8636
8637         if (err)
8638                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8639                                 mgmt_status(err));
8640         else
8641                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8642                                   mgmt_status(err), &rp, sizeof(rp));
8643
8644         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8645
8646         mgmt_pending_free(cmd);
8647 }
8648
8649 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8650 {
8651         struct mgmt_pending_cmd *cmd = data;
8652         struct mgmt_cp_add_advertising *cp = cmd->param;
8653
8654         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8655 }
8656
8657 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8658                            void *data, u16 data_len)
8659 {
8660         struct mgmt_cp_add_advertising *cp = data;
8661         struct mgmt_rp_add_advertising rp;
8662         u32 flags;
8663         u8 status;
8664         u16 timeout, duration;
8665         unsigned int prev_instance_cnt;
8666         u8 schedule_instance = 0;
8667         struct adv_info *adv, *next_instance;
8668         int err;
8669         struct mgmt_pending_cmd *cmd;
8670
8671         bt_dev_dbg(hdev, "sock %p", sk);
8672
8673         status = mgmt_le_support(hdev);
8674         if (status)
8675                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8676                                        status);
8677
8678         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8679                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8680                                        MGMT_STATUS_INVALID_PARAMS);
8681
8682         if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8683                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8684                                        MGMT_STATUS_INVALID_PARAMS);
8685
8686         flags = __le32_to_cpu(cp->flags);
8687         timeout = __le16_to_cpu(cp->timeout);
8688         duration = __le16_to_cpu(cp->duration);
8689
8690         if (!requested_adv_flags_are_valid(hdev, flags))
8691                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8692                                        MGMT_STATUS_INVALID_PARAMS);
8693
8694         hci_dev_lock(hdev);
8695
8696         if (timeout && !hdev_is_powered(hdev)) {
8697                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8698                                       MGMT_STATUS_REJECTED);
8699                 goto unlock;
8700         }
8701
8702         if (adv_busy(hdev)) {
8703                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8704                                       MGMT_STATUS_BUSY);
8705                 goto unlock;
8706         }
8707
8708         if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8709             !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8710                                cp->scan_rsp_len, false)) {
8711                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8712                                       MGMT_STATUS_INVALID_PARAMS);
8713                 goto unlock;
8714         }
8715
8716         prev_instance_cnt = hdev->adv_instance_cnt;
8717
8718         adv = hci_add_adv_instance(hdev, cp->instance, flags,
8719                                    cp->adv_data_len, cp->data,
8720                                    cp->scan_rsp_len,
8721                                    cp->data + cp->adv_data_len,
8722                                    timeout, duration,
8723                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
8724                                    hdev->le_adv_min_interval,
8725                                    hdev->le_adv_max_interval, 0);
8726         if (IS_ERR(adv)) {
8727                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8728                                       MGMT_STATUS_FAILED);
8729                 goto unlock;
8730         }
8731
8732         /* Only trigger an advertising added event if a new instance was
8733          * actually added.
8734          */
8735         if (hdev->adv_instance_cnt > prev_instance_cnt)
8736                 mgmt_advertising_added(sk, hdev, cp->instance);
8737
8738         if (hdev->cur_adv_instance == cp->instance) {
8739                 /* If the currently advertised instance is being changed then
8740                  * cancel the current advertising and schedule the next
8741                  * instance. If there is only one instance then the overridden
8742                  * advertising data will be visible right away.
8743                  */
8744                 cancel_adv_timeout(hdev);
8745
8746                 next_instance = hci_get_next_instance(hdev, cp->instance);
8747                 if (next_instance)
8748                         schedule_instance = next_instance->instance;
8749         } else if (!hdev->adv_instance_timeout) {
8750                 /* Immediately advertise the new instance if no other
8751                  * instance is currently being advertised.
8752                  */
8753                 schedule_instance = cp->instance;
8754         }
8755
8756         /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8757          * there is no instance to be advertised then we have no HCI
8758          * communication to make. Simply return.
8759          */
8760         if (!hdev_is_powered(hdev) ||
8761             hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8762             !schedule_instance) {
8763                 rp.instance = cp->instance;
8764                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8765                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8766                 goto unlock;
8767         }
8768
8769         /* We're good to go, update advertising data, parameters, and start
8770          * advertising.
8771          */
8772         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8773                                data_len);
8774         if (!cmd) {
8775                 err = -ENOMEM;
8776                 goto unlock;
8777         }
8778
8779         cp->instance = schedule_instance;
8780
8781         err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8782                                  add_advertising_complete);
8783         if (err < 0)
8784                 mgmt_pending_free(cmd);
8785
8786 unlock:
8787         hci_dev_unlock(hdev);
8788
8789         return err;
8790 }
8791
8792 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8793                                         int err)
8794 {
8795         struct mgmt_pending_cmd *cmd = data;
8796         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8797         struct mgmt_rp_add_ext_adv_params rp;
8798         struct adv_info *adv;
8799         u32 flags;
8800
8801         BT_DBG("%s", hdev->name);
8802
8803         hci_dev_lock(hdev);
8804
8805         adv = hci_find_adv_instance(hdev, cp->instance);
8806         if (!adv)
8807                 goto unlock;
8808
8809         rp.instance = cp->instance;
8810         rp.tx_power = adv->tx_power;
8811
8812         /* While we're at it, inform userspace of the available space for this
8813          * advertisement, given the flags that will be used.
8814          */
8815         flags = __le32_to_cpu(cp->flags);
8816         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8817         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8818
8819         if (err) {
8820                 /* If this advertisement was previously advertising and we
8821                  * failed to update it, we signal that it has been removed and
8822                  * delete its structure
8823                  */
8824                 if (!adv->pending)
8825                         mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8826
8827                 hci_remove_adv_instance(hdev, cp->instance);
8828
8829                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8830                                 mgmt_status(err));
8831         } else {
8832                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8833                                   mgmt_status(err), &rp, sizeof(rp));
8834         }
8835
8836 unlock:
8837         if (cmd)
8838                 mgmt_pending_free(cmd);
8839
8840         hci_dev_unlock(hdev);
8841 }
8842
8843 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8844 {
8845         struct mgmt_pending_cmd *cmd = data;
8846         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8847
8848         return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8849 }
8850
8851 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8852                               void *data, u16 data_len)
8853 {
8854         struct mgmt_cp_add_ext_adv_params *cp = data;
8855         struct mgmt_rp_add_ext_adv_params rp;
8856         struct mgmt_pending_cmd *cmd = NULL;
8857         struct adv_info *adv;
8858         u32 flags, min_interval, max_interval;
8859         u16 timeout, duration;
8860         u8 status;
8861         s8 tx_power;
8862         int err;
8863
8864         BT_DBG("%s", hdev->name);
8865
8866         status = mgmt_le_support(hdev);
8867         if (status)
8868                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8869                                        status);
8870
8871         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8872                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8873                                        MGMT_STATUS_INVALID_PARAMS);
8874
8875         /* The purpose of breaking add_advertising into two separate MGMT calls
8876          * for params and data is to allow more parameters to be added to this
8877          * structure in the future. For this reason, we verify that we have the
8878          * bare minimum structure we know of when the interface was defined. Any
8879          * extra parameters we don't know about will be ignored in this request.
8880          */
8881         if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8882                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8883                                        MGMT_STATUS_INVALID_PARAMS);
8884
8885         flags = __le32_to_cpu(cp->flags);
8886
8887         if (!requested_adv_flags_are_valid(hdev, flags))
8888                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8889                                        MGMT_STATUS_INVALID_PARAMS);
8890
8891         hci_dev_lock(hdev);
8892
8893         /* In new interface, we require that we are powered to register */
8894         if (!hdev_is_powered(hdev)) {
8895                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8896                                       MGMT_STATUS_REJECTED);
8897                 goto unlock;
8898         }
8899
8900         if (adv_busy(hdev)) {
8901                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8902                                       MGMT_STATUS_BUSY);
8903                 goto unlock;
8904         }
8905
8906         /* Parse defined parameters from request, use defaults otherwise */
8907         timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8908                   __le16_to_cpu(cp->timeout) : 0;
8909
8910         duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8911                    __le16_to_cpu(cp->duration) :
8912                    hdev->def_multi_adv_rotation_duration;
8913
8914         min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8915                        __le32_to_cpu(cp->min_interval) :
8916                        hdev->le_adv_min_interval;
8917
8918         max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8919                        __le32_to_cpu(cp->max_interval) :
8920                        hdev->le_adv_max_interval;
8921
8922         tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8923                    cp->tx_power :
8924                    HCI_ADV_TX_POWER_NO_PREFERENCE;
8925
8926         /* Create advertising instance with no advertising or response data */
8927         adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8928                                    timeout, duration, tx_power, min_interval,
8929                                    max_interval, 0);
8930
8931         if (IS_ERR(adv)) {
8932                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8933                                       MGMT_STATUS_FAILED);
8934                 goto unlock;
8935         }
8936
8937         /* Submit request for advertising params if ext adv available */
8938         if (ext_adv_capable(hdev)) {
8939                 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8940                                        data, data_len);
8941                 if (!cmd) {
8942                         err = -ENOMEM;
8943                         hci_remove_adv_instance(hdev, cp->instance);
8944                         goto unlock;
8945                 }
8946
8947                 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8948                                          add_ext_adv_params_complete);
8949                 if (err < 0)
8950                         mgmt_pending_free(cmd);
8951         } else {
8952                 rp.instance = cp->instance;
8953                 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8954                 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8955                 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8956                 err = mgmt_cmd_complete(sk, hdev->id,
8957                                         MGMT_OP_ADD_EXT_ADV_PARAMS,
8958                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8959         }
8960
8961 unlock:
8962         hci_dev_unlock(hdev);
8963
8964         return err;
8965 }
8966
8967 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8968 {
8969         struct mgmt_pending_cmd *cmd = data;
8970         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8971         struct mgmt_rp_add_advertising rp;
8972
8973         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8974
8975         memset(&rp, 0, sizeof(rp));
8976
8977         rp.instance = cp->instance;
8978
8979         if (err)
8980                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8981                                 mgmt_status(err));
8982         else
8983                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8984                                   mgmt_status(err), &rp, sizeof(rp));
8985
8986         mgmt_pending_free(cmd);
8987 }
8988
8989 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8990 {
8991         struct mgmt_pending_cmd *cmd = data;
8992         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8993         int err;
8994
8995         if (ext_adv_capable(hdev)) {
8996                 err = hci_update_adv_data_sync(hdev, cp->instance);
8997                 if (err)
8998                         return err;
8999
9000                 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9001                 if (err)
9002                         return err;
9003
9004                 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9005         }
9006
9007         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9008 }
9009
9010 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9011                             u16 data_len)
9012 {
9013         struct mgmt_cp_add_ext_adv_data *cp = data;
9014         struct mgmt_rp_add_ext_adv_data rp;
9015         u8 schedule_instance = 0;
9016         struct adv_info *next_instance;
9017         struct adv_info *adv_instance;
9018         int err = 0;
9019         struct mgmt_pending_cmd *cmd;
9020
9021         BT_DBG("%s", hdev->name);
9022
9023         hci_dev_lock(hdev);
9024
9025         adv_instance = hci_find_adv_instance(hdev, cp->instance);
9026
9027         if (!adv_instance) {
9028                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9029                                       MGMT_STATUS_INVALID_PARAMS);
9030                 goto unlock;
9031         }
9032
9033         /* In new interface, we require that we are powered to register */
9034         if (!hdev_is_powered(hdev)) {
9035                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9036                                       MGMT_STATUS_REJECTED);
9037                 goto clear_new_instance;
9038         }
9039
9040         if (adv_busy(hdev)) {
9041                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9042                                       MGMT_STATUS_BUSY);
9043                 goto clear_new_instance;
9044         }
9045
9046         /* Validate new data */
9047         if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9048                                cp->adv_data_len, true) ||
9049             !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9050                                cp->adv_data_len, cp->scan_rsp_len, false)) {
9051                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9052                                       MGMT_STATUS_INVALID_PARAMS);
9053                 goto clear_new_instance;
9054         }
9055
9056         /* Set the data in the advertising instance */
9057         hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9058                                   cp->data, cp->scan_rsp_len,
9059                                   cp->data + cp->adv_data_len);
9060
9061         /* If using software rotation, determine next instance to use */
9062         if (hdev->cur_adv_instance == cp->instance) {
9063                 /* If the currently advertised instance is being changed
9064                  * then cancel the current advertising and schedule the
9065                  * next instance. If there is only one instance then the
9066                  * overridden advertising data will be visible right
9067                  * away
9068                  */
9069                 cancel_adv_timeout(hdev);
9070
9071                 next_instance = hci_get_next_instance(hdev, cp->instance);
9072                 if (next_instance)
9073                         schedule_instance = next_instance->instance;
9074         } else if (!hdev->adv_instance_timeout) {
9075                 /* Immediately advertise the new instance if no other
9076                  * instance is currently being advertised.
9077                  */
9078                 schedule_instance = cp->instance;
9079         }
9080
9081         /* If the HCI_ADVERTISING flag is set or there is no instance to
9082          * be advertised then we have no HCI communication to make.
9083          * Simply return.
9084          */
9085         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9086                 if (adv_instance->pending) {
9087                         mgmt_advertising_added(sk, hdev, cp->instance);
9088                         adv_instance->pending = false;
9089                 }
9090                 rp.instance = cp->instance;
9091                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9092                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9093                 goto unlock;
9094         }
9095
9096         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9097                                data_len);
9098         if (!cmd) {
9099                 err = -ENOMEM;
9100                 goto clear_new_instance;
9101         }
9102
9103         err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9104                                  add_ext_adv_data_complete);
9105         if (err < 0) {
9106                 mgmt_pending_free(cmd);
9107                 goto clear_new_instance;
9108         }
9109
9110         /* We were successful in updating data, so trigger advertising_added
9111          * event if this is an instance that wasn't previously advertising. If
9112          * a failure occurs in the requests we initiated, we will remove the
9113          * instance again in add_advertising_complete
9114          */
9115         if (adv_instance->pending)
9116                 mgmt_advertising_added(sk, hdev, cp->instance);
9117
9118         goto unlock;
9119
9120 clear_new_instance:
9121         hci_remove_adv_instance(hdev, cp->instance);
9122
9123 unlock:
9124         hci_dev_unlock(hdev);
9125
9126         return err;
9127 }
9128
9129 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9130                                         int err)
9131 {
9132         struct mgmt_pending_cmd *cmd = data;
9133         struct mgmt_cp_remove_advertising *cp = cmd->param;
9134         struct mgmt_rp_remove_advertising rp;
9135
9136         bt_dev_dbg(hdev, "err %d", err);
9137
9138         memset(&rp, 0, sizeof(rp));
9139         rp.instance = cp->instance;
9140
9141         if (err)
9142                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9143                                 mgmt_status(err));
9144         else
9145                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9146                                   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9147
9148         mgmt_pending_free(cmd);
9149 }
9150
9151 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9152 {
9153         struct mgmt_pending_cmd *cmd = data;
9154         struct mgmt_cp_remove_advertising *cp = cmd->param;
9155         int err;
9156
9157         err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9158         if (err)
9159                 return err;
9160
9161         if (list_empty(&hdev->adv_instances))
9162                 err = hci_disable_advertising_sync(hdev);
9163
9164         return err;
9165 }
9166
9167 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9168                               void *data, u16 data_len)
9169 {
9170         struct mgmt_cp_remove_advertising *cp = data;
9171         struct mgmt_pending_cmd *cmd;
9172         int err;
9173
9174         bt_dev_dbg(hdev, "sock %p", sk);
9175
9176         hci_dev_lock(hdev);
9177
9178         if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9179                 err = mgmt_cmd_status(sk, hdev->id,
9180                                       MGMT_OP_REMOVE_ADVERTISING,
9181                                       MGMT_STATUS_INVALID_PARAMS);
9182                 goto unlock;
9183         }
9184
9185         if (pending_find(MGMT_OP_SET_LE, hdev)) {
9186                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9187                                       MGMT_STATUS_BUSY);
9188                 goto unlock;
9189         }
9190
9191         if (list_empty(&hdev->adv_instances)) {
9192                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9193                                       MGMT_STATUS_INVALID_PARAMS);
9194                 goto unlock;
9195         }
9196
9197         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9198                                data_len);
9199         if (!cmd) {
9200                 err = -ENOMEM;
9201                 goto unlock;
9202         }
9203
9204         err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9205                                  remove_advertising_complete);
9206         if (err < 0)
9207                 mgmt_pending_free(cmd);
9208
9209 unlock:
9210         hci_dev_unlock(hdev);
9211
9212         return err;
9213 }
9214
9215 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9216                              void *data, u16 data_len)
9217 {
9218         struct mgmt_cp_get_adv_size_info *cp = data;
9219         struct mgmt_rp_get_adv_size_info rp;
9220         u32 flags, supported_flags;
9221
9222         bt_dev_dbg(hdev, "sock %p", sk);
9223
9224         if (!lmp_le_capable(hdev))
9225                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9226                                        MGMT_STATUS_REJECTED);
9227
9228         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9229                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9230                                        MGMT_STATUS_INVALID_PARAMS);
9231
9232         flags = __le32_to_cpu(cp->flags);
9233
9234         /* The current implementation only supports a subset of the specified
9235          * flags.
9236          */
9237         supported_flags = get_supported_adv_flags(hdev);
9238         if (flags & ~supported_flags)
9239                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9240                                        MGMT_STATUS_INVALID_PARAMS);
9241
9242         rp.instance = cp->instance;
9243         rp.flags = cp->flags;
9244         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9245         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9246
9247         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9248                                  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9249 }
9250
9251 static const struct hci_mgmt_handler mgmt_handlers[] = {
9252         { NULL }, /* 0x0000 (no command) */
9253         { read_version,            MGMT_READ_VERSION_SIZE,
9254                                                 HCI_MGMT_NO_HDEV |
9255                                                 HCI_MGMT_UNTRUSTED },
9256         { read_commands,           MGMT_READ_COMMANDS_SIZE,
9257                                                 HCI_MGMT_NO_HDEV |
9258                                                 HCI_MGMT_UNTRUSTED },
9259         { read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9260                                                 HCI_MGMT_NO_HDEV |
9261                                                 HCI_MGMT_UNTRUSTED },
9262         { read_controller_info,    MGMT_READ_INFO_SIZE,
9263                                                 HCI_MGMT_UNTRUSTED },
9264         { set_powered,             MGMT_SETTING_SIZE },
9265         { set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9266         { set_connectable,         MGMT_SETTING_SIZE },
9267         { set_fast_connectable,    MGMT_SETTING_SIZE },
9268         { set_bondable,            MGMT_SETTING_SIZE },
9269         { set_link_security,       MGMT_SETTING_SIZE },
9270         { set_ssp,                 MGMT_SETTING_SIZE },
9271         { set_hs,                  MGMT_SETTING_SIZE },
9272         { set_le,                  MGMT_SETTING_SIZE },
9273         { set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9274         { set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9275         { add_uuid,                MGMT_ADD_UUID_SIZE },
9276         { remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9277         { load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9278                                                 HCI_MGMT_VAR_LEN },
9279         { load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9280                                                 HCI_MGMT_VAR_LEN },
9281         { disconnect,              MGMT_DISCONNECT_SIZE },
9282         { get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9283         { pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9284         { pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9285         { set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9286         { pair_device,             MGMT_PAIR_DEVICE_SIZE },
9287         { cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9288         { unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9289         { user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9290         { user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9291         { user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9292         { user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9293         { read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9294         { add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9295                                                 HCI_MGMT_VAR_LEN },
9296         { remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9297         { start_discovery,         MGMT_START_DISCOVERY_SIZE },
9298         { stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9299         { confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9300         { block_device,            MGMT_BLOCK_DEVICE_SIZE },
9301         { unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9302         { set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9303         { set_advertising,         MGMT_SETTING_SIZE },
9304         { set_bredr,               MGMT_SETTING_SIZE },
9305         { set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9306         { set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9307         { set_secure_conn,         MGMT_SETTING_SIZE },
9308         { set_debug_keys,          MGMT_SETTING_SIZE },
9309         { set_privacy,             MGMT_SET_PRIVACY_SIZE },
9310         { load_irks,               MGMT_LOAD_IRKS_SIZE,
9311                                                 HCI_MGMT_VAR_LEN },
9312         { get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9313         { get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9314         { add_device,              MGMT_ADD_DEVICE_SIZE },
9315         { remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9316         { load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9317                                                 HCI_MGMT_VAR_LEN },
9318         { read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9319                                                 HCI_MGMT_NO_HDEV |
9320                                                 HCI_MGMT_UNTRUSTED },
9321         { read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9322                                                 HCI_MGMT_UNCONFIGURED |
9323                                                 HCI_MGMT_UNTRUSTED },
9324         { set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9325                                                 HCI_MGMT_UNCONFIGURED },
9326         { set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9327                                                 HCI_MGMT_UNCONFIGURED },
9328         { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9329                                                 HCI_MGMT_VAR_LEN },
9330         { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9331         { read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9332                                                 HCI_MGMT_NO_HDEV |
9333                                                 HCI_MGMT_UNTRUSTED },
9334         { read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9335         { add_advertising,         MGMT_ADD_ADVERTISING_SIZE,
9336                                                 HCI_MGMT_VAR_LEN },
9337         { remove_advertising,      MGMT_REMOVE_ADVERTISING_SIZE },
9338         { get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9339         { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9340         { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9341                                                 HCI_MGMT_UNTRUSTED },
9342         { set_appearance,          MGMT_SET_APPEARANCE_SIZE },
9343         { get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9344         { set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9345         { set_blocked_keys,        MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9346                                                 HCI_MGMT_VAR_LEN },
9347         { set_wideband_speech,     MGMT_SETTING_SIZE },
9348         { read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9349                                                 HCI_MGMT_UNTRUSTED },
9350         { read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9351                                                 HCI_MGMT_UNTRUSTED |
9352                                                 HCI_MGMT_HDEV_OPTIONAL },
9353         { set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9354                                                 HCI_MGMT_VAR_LEN |
9355                                                 HCI_MGMT_HDEV_OPTIONAL },
9356         { read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9357                                                 HCI_MGMT_UNTRUSTED },
9358         { set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9359                                                 HCI_MGMT_VAR_LEN },
9360         { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9361                                                 HCI_MGMT_UNTRUSTED },
9362         { set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9363                                                 HCI_MGMT_VAR_LEN },
9364         { get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9365         { set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9366         { read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9367         { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9368                                                 HCI_MGMT_VAR_LEN },
9369         { remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9370         { add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9371                                                 HCI_MGMT_VAR_LEN },
9372         { add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9373                                                 HCI_MGMT_VAR_LEN },
9374         { add_adv_patterns_monitor_rssi,
9375                                    MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9376                                                 HCI_MGMT_VAR_LEN },
9377         { set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9378                                                 HCI_MGMT_VAR_LEN },
9379         { mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9380         { mesh_send,               MGMT_MESH_SEND_SIZE,
9381                                                 HCI_MGMT_VAR_LEN },
9382         { mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9383 };
9384
9385 void mgmt_index_added(struct hci_dev *hdev)
9386 {
9387         struct mgmt_ev_ext_index ev;
9388
9389         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9390                 return;
9391
9392         switch (hdev->dev_type) {
9393         case HCI_PRIMARY:
9394                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9395                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9396                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9397                         ev.type = 0x01;
9398                 } else {
9399                         mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9400                                          HCI_MGMT_INDEX_EVENTS);
9401                         ev.type = 0x00;
9402                 }
9403                 break;
9404         case HCI_AMP:
9405                 ev.type = 0x02;
9406                 break;
9407         default:
9408                 return;
9409         }
9410
9411         ev.bus = hdev->bus;
9412
9413         mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9414                          HCI_MGMT_EXT_INDEX_EVENTS);
9415 }
9416
9417 void mgmt_index_removed(struct hci_dev *hdev)
9418 {
9419         struct mgmt_ev_ext_index ev;
9420         u8 status = MGMT_STATUS_INVALID_INDEX;
9421
9422         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9423                 return;
9424
9425         switch (hdev->dev_type) {
9426         case HCI_PRIMARY:
9427                 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9428
9429                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9430                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9431                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9432                         ev.type = 0x01;
9433                 } else {
9434                         mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9435                                          HCI_MGMT_INDEX_EVENTS);
9436                         ev.type = 0x00;
9437                 }
9438                 break;
9439         case HCI_AMP:
9440                 ev.type = 0x02;
9441                 break;
9442         default:
9443                 return;
9444         }
9445
9446         ev.bus = hdev->bus;
9447
9448         mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9449                          HCI_MGMT_EXT_INDEX_EVENTS);
9450
9451         /* Cancel any remaining timed work */
9452         if (!hci_dev_test_flag(hdev, HCI_MGMT))
9453                 return;
9454         cancel_delayed_work_sync(&hdev->discov_off);
9455         cancel_delayed_work_sync(&hdev->service_cache);
9456         cancel_delayed_work_sync(&hdev->rpa_expired);
9457 }
9458
9459 void mgmt_power_on(struct hci_dev *hdev, int err)
9460 {
9461         struct cmd_lookup match = { NULL, hdev };
9462
9463         bt_dev_dbg(hdev, "err %d", err);
9464
9465         hci_dev_lock(hdev);
9466
9467         if (!err) {
9468                 restart_le_actions(hdev);
9469                 hci_update_passive_scan(hdev);
9470         }
9471
9472         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9473
9474         new_settings(hdev, match.sk);
9475
9476         if (match.sk)
9477                 sock_put(match.sk);
9478
9479         hci_dev_unlock(hdev);
9480 }
9481
9482 void __mgmt_power_off(struct hci_dev *hdev)
9483 {
9484         struct cmd_lookup match = { NULL, hdev };
9485         u8 status, zero_cod[] = { 0, 0, 0 };
9486
9487         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9488
9489         /* If the power off is because of hdev unregistration let
9490          * use the appropriate INVALID_INDEX status. Otherwise use
9491          * NOT_POWERED. We cover both scenarios here since later in
9492          * mgmt_index_removed() any hci_conn callbacks will have already
9493          * been triggered, potentially causing misleading DISCONNECTED
9494          * status responses.
9495          */
9496         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9497                 status = MGMT_STATUS_INVALID_INDEX;
9498         else
9499                 status = MGMT_STATUS_NOT_POWERED;
9500
9501         mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9502
9503         if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9504                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9505                                    zero_cod, sizeof(zero_cod),
9506                                    HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9507                 ext_info_changed(hdev, NULL);
9508         }
9509
9510         new_settings(hdev, match.sk);
9511
9512         if (match.sk)
9513                 sock_put(match.sk);
9514 }
9515
9516 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9517 {
9518         struct mgmt_pending_cmd *cmd;
9519         u8 status;
9520
9521         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9522         if (!cmd)
9523                 return;
9524
9525         if (err == -ERFKILL)
9526                 status = MGMT_STATUS_RFKILLED;
9527         else
9528                 status = MGMT_STATUS_FAILED;
9529
9530         mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9531
9532         mgmt_pending_remove(cmd);
9533 }
9534
9535 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9536                        bool persistent)
9537 {
9538         struct mgmt_ev_new_link_key ev;
9539
9540         memset(&ev, 0, sizeof(ev));
9541
9542         ev.store_hint = persistent;
9543         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9544         ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9545         ev.key.type = key->type;
9546         memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9547         ev.key.pin_len = key->pin_len;
9548
9549         mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9550 }
9551
9552 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9553 {
9554         switch (ltk->type) {
9555         case SMP_LTK:
9556         case SMP_LTK_RESPONDER:
9557                 if (ltk->authenticated)
9558                         return MGMT_LTK_AUTHENTICATED;
9559                 return MGMT_LTK_UNAUTHENTICATED;
9560         case SMP_LTK_P256:
9561                 if (ltk->authenticated)
9562                         return MGMT_LTK_P256_AUTH;
9563                 return MGMT_LTK_P256_UNAUTH;
9564         case SMP_LTK_P256_DEBUG:
9565                 return MGMT_LTK_P256_DEBUG;
9566         }
9567
9568         return MGMT_LTK_UNAUTHENTICATED;
9569 }
9570
9571 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9572 {
9573         struct mgmt_ev_new_long_term_key ev;
9574
9575         memset(&ev, 0, sizeof(ev));
9576
9577         /* Devices using resolvable or non-resolvable random addresses
9578          * without providing an identity resolving key don't require
9579          * to store long term keys. Their addresses will change the
9580          * next time around.
9581          *
9582          * Only when a remote device provides an identity address
9583          * make sure the long term key is stored. If the remote
9584          * identity is known, the long term keys are internally
9585          * mapped to the identity address. So allow static random
9586          * and public addresses here.
9587          */
9588         if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9589             (key->bdaddr.b[5] & 0xc0) != 0xc0)
9590                 ev.store_hint = 0x00;
9591         else
9592                 ev.store_hint = persistent;
9593
9594         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9595         ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9596         ev.key.type = mgmt_ltk_type(key);
9597         ev.key.enc_size = key->enc_size;
9598         ev.key.ediv = key->ediv;
9599         ev.key.rand = key->rand;
9600
9601         if (key->type == SMP_LTK)
9602                 ev.key.initiator = 1;
9603
9604         /* Make sure we copy only the significant bytes based on the
9605          * encryption key size, and set the rest of the value to zeroes.
9606          */
9607         memcpy(ev.key.val, key->val, key->enc_size);
9608         memset(ev.key.val + key->enc_size, 0,
9609                sizeof(ev.key.val) - key->enc_size);
9610
9611         mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9612 }
9613
9614 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9615 {
9616         struct mgmt_ev_new_irk ev;
9617
9618         memset(&ev, 0, sizeof(ev));
9619
9620         ev.store_hint = persistent;
9621
9622         bacpy(&ev.rpa, &irk->rpa);
9623         bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9624         ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9625         memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9626
9627         mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9628 }
9629
9630 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9631                    bool persistent)
9632 {
9633         struct mgmt_ev_new_csrk ev;
9634
9635         memset(&ev, 0, sizeof(ev));
9636
9637         /* Devices using resolvable or non-resolvable random addresses
9638          * without providing an identity resolving key don't require
9639          * to store signature resolving keys. Their addresses will change
9640          * the next time around.
9641          *
9642          * Only when a remote device provides an identity address
9643          * make sure the signature resolving key is stored. So allow
9644          * static random and public addresses here.
9645          */
9646         if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9647             (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9648                 ev.store_hint = 0x00;
9649         else
9650                 ev.store_hint = persistent;
9651
9652         bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9653         ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9654         ev.key.type = csrk->type;
9655         memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9656
9657         mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9658 }
9659
9660 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9661                          u8 bdaddr_type, u8 store_hint, u16 min_interval,
9662                          u16 max_interval, u16 latency, u16 timeout)
9663 {
9664         struct mgmt_ev_new_conn_param ev;
9665
9666         if (!hci_is_identity_address(bdaddr, bdaddr_type))
9667                 return;
9668
9669         memset(&ev, 0, sizeof(ev));
9670         bacpy(&ev.addr.bdaddr, bdaddr);
9671         ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9672         ev.store_hint = store_hint;
9673         ev.min_interval = cpu_to_le16(min_interval);
9674         ev.max_interval = cpu_to_le16(max_interval);
9675         ev.latency = cpu_to_le16(latency);
9676         ev.timeout = cpu_to_le16(timeout);
9677
9678         mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9679 }
9680
9681 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9682                            u8 *name, u8 name_len)
9683 {
9684         struct sk_buff *skb;
9685         struct mgmt_ev_device_connected *ev;
9686         u16 eir_len = 0;
9687         u32 flags = 0;
9688
9689         /* allocate buff for LE or BR/EDR adv */
9690         if (conn->le_adv_data_len > 0)
9691                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9692                                      sizeof(*ev) + conn->le_adv_data_len);
9693         else
9694                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9695                                      sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9696                                      eir_precalc_len(sizeof(conn->dev_class)));
9697
9698         ev = skb_put(skb, sizeof(*ev));
9699         bacpy(&ev->addr.bdaddr, &conn->dst);
9700         ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9701
9702         if (conn->out)
9703                 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9704
9705         ev->flags = __cpu_to_le32(flags);
9706
9707         /* We must ensure that the EIR Data fields are ordered and
9708          * unique. Keep it simple for now and avoid the problem by not
9709          * adding any BR/EDR data to the LE adv.
9710          */
9711         if (conn->le_adv_data_len > 0) {
9712                 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9713                 eir_len = conn->le_adv_data_len;
9714         } else {
9715                 if (name)
9716                         eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9717
9718                 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9719                         eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9720                                                     conn->dev_class, sizeof(conn->dev_class));
9721         }
9722
9723         ev->eir_len = cpu_to_le16(eir_len);
9724
9725         mgmt_event_skb(skb, NULL);
9726 }
9727
9728 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9729 {
9730         struct sock **sk = data;
9731
9732         cmd->cmd_complete(cmd, 0);
9733
9734         *sk = cmd->sk;
9735         sock_hold(*sk);
9736
9737         mgmt_pending_remove(cmd);
9738 }
9739
9740 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9741 {
9742         struct hci_dev *hdev = data;
9743         struct mgmt_cp_unpair_device *cp = cmd->param;
9744
9745         device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9746
9747         cmd->cmd_complete(cmd, 0);
9748         mgmt_pending_remove(cmd);
9749 }
9750
9751 bool mgmt_powering_down(struct hci_dev *hdev)
9752 {
9753         struct mgmt_pending_cmd *cmd;
9754         struct mgmt_mode *cp;
9755
9756         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9757         if (!cmd)
9758                 return false;
9759
9760         cp = cmd->param;
9761         if (!cp->val)
9762                 return true;
9763
9764         return false;
9765 }
9766
9767 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9768                               u8 link_type, u8 addr_type, u8 reason,
9769                               bool mgmt_connected)
9770 {
9771         struct mgmt_ev_device_disconnected ev;
9772         struct sock *sk = NULL;
9773
9774         if (!mgmt_connected)
9775                 return;
9776
9777         if (link_type != ACL_LINK && link_type != LE_LINK)
9778                 return;
9779
9780         mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9781
9782         bacpy(&ev.addr.bdaddr, bdaddr);
9783         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9784         ev.reason = reason;
9785
9786         /* Report disconnects due to suspend */
9787         if (hdev->suspended)
9788                 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9789
9790         mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9791
9792         if (sk)
9793                 sock_put(sk);
9794
9795         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9796                              hdev);
9797 }
9798
9799 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9800                             u8 link_type, u8 addr_type, u8 status)
9801 {
9802         u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9803         struct mgmt_cp_disconnect *cp;
9804         struct mgmt_pending_cmd *cmd;
9805
9806         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9807                              hdev);
9808
9809         cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9810         if (!cmd)
9811                 return;
9812
9813         cp = cmd->param;
9814
9815         if (bacmp(bdaddr, &cp->addr.bdaddr))
9816                 return;
9817
9818         if (cp->addr.type != bdaddr_type)
9819                 return;
9820
9821         cmd->cmd_complete(cmd, mgmt_status(status));
9822         mgmt_pending_remove(cmd);
9823 }
9824
9825 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9826                          u8 addr_type, u8 status)
9827 {
9828         struct mgmt_ev_connect_failed ev;
9829
9830         bacpy(&ev.addr.bdaddr, bdaddr);
9831         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9832         ev.status = mgmt_status(status);
9833
9834         mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9835 }
9836
9837 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9838 {
9839         struct mgmt_ev_pin_code_request ev;
9840
9841         bacpy(&ev.addr.bdaddr, bdaddr);
9842         ev.addr.type = BDADDR_BREDR;
9843         ev.secure = secure;
9844
9845         mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9846 }
9847
9848 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9849                                   u8 status)
9850 {
9851         struct mgmt_pending_cmd *cmd;
9852
9853         cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9854         if (!cmd)
9855                 return;
9856
9857         cmd->cmd_complete(cmd, mgmt_status(status));
9858         mgmt_pending_remove(cmd);
9859 }
9860
9861 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9862                                       u8 status)
9863 {
9864         struct mgmt_pending_cmd *cmd;
9865
9866         cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9867         if (!cmd)
9868                 return;
9869
9870         cmd->cmd_complete(cmd, mgmt_status(status));
9871         mgmt_pending_remove(cmd);
9872 }
9873
9874 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9875                               u8 link_type, u8 addr_type, u32 value,
9876                               u8 confirm_hint)
9877 {
9878         struct mgmt_ev_user_confirm_request ev;
9879
9880         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9881
9882         bacpy(&ev.addr.bdaddr, bdaddr);
9883         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9884         ev.confirm_hint = confirm_hint;
9885         ev.value = cpu_to_le32(value);
9886
9887         return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9888                           NULL);
9889 }
9890
9891 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9892                               u8 link_type, u8 addr_type)
9893 {
9894         struct mgmt_ev_user_passkey_request ev;
9895
9896         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9897
9898         bacpy(&ev.addr.bdaddr, bdaddr);
9899         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9900
9901         return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9902                           NULL);
9903 }
9904
9905 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9906                                       u8 link_type, u8 addr_type, u8 status,
9907                                       u8 opcode)
9908 {
9909         struct mgmt_pending_cmd *cmd;
9910
9911         cmd = pending_find(opcode, hdev);
9912         if (!cmd)
9913                 return -ENOENT;
9914
9915         cmd->cmd_complete(cmd, mgmt_status(status));
9916         mgmt_pending_remove(cmd);
9917
9918         return 0;
9919 }
9920
9921 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9922                                      u8 link_type, u8 addr_type, u8 status)
9923 {
9924         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9925                                           status, MGMT_OP_USER_CONFIRM_REPLY);
9926 }
9927
9928 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9929                                          u8 link_type, u8 addr_type, u8 status)
9930 {
9931         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9932                                           status,
9933                                           MGMT_OP_USER_CONFIRM_NEG_REPLY);
9934 }
9935
9936 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9937                                      u8 link_type, u8 addr_type, u8 status)
9938 {
9939         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9940                                           status, MGMT_OP_USER_PASSKEY_REPLY);
9941 }
9942
9943 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9944                                          u8 link_type, u8 addr_type, u8 status)
9945 {
9946         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9947                                           status,
9948                                           MGMT_OP_USER_PASSKEY_NEG_REPLY);
9949 }
9950
9951 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9952                              u8 link_type, u8 addr_type, u32 passkey,
9953                              u8 entered)
9954 {
9955         struct mgmt_ev_passkey_notify ev;
9956
9957         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9958
9959         bacpy(&ev.addr.bdaddr, bdaddr);
9960         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9961         ev.passkey = __cpu_to_le32(passkey);
9962         ev.entered = entered;
9963
9964         return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9965 }
9966
9967 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9968 {
9969         struct mgmt_ev_auth_failed ev;
9970         struct mgmt_pending_cmd *cmd;
9971         u8 status = mgmt_status(hci_status);
9972
9973         bacpy(&ev.addr.bdaddr, &conn->dst);
9974         ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9975         ev.status = status;
9976
9977         cmd = find_pairing(conn);
9978
9979         mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9980                     cmd ? cmd->sk : NULL);
9981
9982         if (cmd) {
9983                 cmd->cmd_complete(cmd, status);
9984                 mgmt_pending_remove(cmd);
9985         }
9986 }
9987
9988 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9989 {
9990         struct cmd_lookup match = { NULL, hdev };
9991         bool changed;
9992
9993         if (status) {
9994                 u8 mgmt_err = mgmt_status(status);
9995                 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9996                                      cmd_status_rsp, &mgmt_err);
9997                 return;
9998         }
9999
10000         if (test_bit(HCI_AUTH, &hdev->flags))
10001                 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10002         else
10003                 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10004
10005         mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10006                              &match);
10007
10008         if (changed)
10009                 new_settings(hdev, match.sk);
10010
10011         if (match.sk)
10012                 sock_put(match.sk);
10013 }
10014
10015 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10016 {
10017         struct cmd_lookup *match = data;
10018
10019         if (match->sk == NULL) {
10020                 match->sk = cmd->sk;
10021                 sock_hold(match->sk);
10022         }
10023 }
10024
10025 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10026                                     u8 status)
10027 {
10028         struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10029
10030         mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10031         mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10032         mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10033
10034         if (!status) {
10035                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10036                                    3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10037                 ext_info_changed(hdev, NULL);
10038         }
10039
10040         if (match.sk)
10041                 sock_put(match.sk);
10042 }
10043
10044 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10045 {
10046         struct mgmt_cp_set_local_name ev;
10047         struct mgmt_pending_cmd *cmd;
10048
10049         if (status)
10050                 return;
10051
10052         memset(&ev, 0, sizeof(ev));
10053         memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10054         memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10055
10056         cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10057         if (!cmd) {
10058                 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10059
10060                 /* If this is a HCI command related to powering on the
10061                  * HCI dev don't send any mgmt signals.
10062                  */
10063                 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10064                         return;
10065         }
10066
10067         mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10068                            HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10069         ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10070 }
10071
10072 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10073 {
10074         int i;
10075
10076         for (i = 0; i < uuid_count; i++) {
10077                 if (!memcmp(uuid, uuids[i], 16))
10078                         return true;
10079         }
10080
10081         return false;
10082 }
10083
10084 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10085 {
10086         u16 parsed = 0;
10087
10088         while (parsed < eir_len) {
10089                 u8 field_len = eir[0];
10090                 u8 uuid[16];
10091                 int i;
10092
10093                 if (field_len == 0)
10094                         break;
10095
10096                 if (eir_len - parsed < field_len + 1)
10097                         break;
10098
10099                 switch (eir[1]) {
10100                 case EIR_UUID16_ALL:
10101                 case EIR_UUID16_SOME:
10102                         for (i = 0; i + 3 <= field_len; i += 2) {
10103                                 memcpy(uuid, bluetooth_base_uuid, 16);
10104                                 uuid[13] = eir[i + 3];
10105                                 uuid[12] = eir[i + 2];
10106                                 if (has_uuid(uuid, uuid_count, uuids))
10107                                         return true;
10108                         }
10109                         break;
10110                 case EIR_UUID32_ALL:
10111                 case EIR_UUID32_SOME:
10112                         for (i = 0; i + 5 <= field_len; i += 4) {
10113                                 memcpy(uuid, bluetooth_base_uuid, 16);
10114                                 uuid[15] = eir[i + 5];
10115                                 uuid[14] = eir[i + 4];
10116                                 uuid[13] = eir[i + 3];
10117                                 uuid[12] = eir[i + 2];
10118                                 if (has_uuid(uuid, uuid_count, uuids))
10119                                         return true;
10120                         }
10121                         break;
10122                 case EIR_UUID128_ALL:
10123                 case EIR_UUID128_SOME:
10124                         for (i = 0; i + 17 <= field_len; i += 16) {
10125                                 memcpy(uuid, eir + i + 2, 16);
10126                                 if (has_uuid(uuid, uuid_count, uuids))
10127                                         return true;
10128                         }
10129                         break;
10130                 }
10131
10132                 parsed += field_len + 1;
10133                 eir += field_len + 1;
10134         }
10135
10136         return false;
10137 }
10138
10139 static void restart_le_scan(struct hci_dev *hdev)
10140 {
10141         /* If controller is not scanning we are done. */
10142         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10143                 return;
10144
10145         if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10146                        hdev->discovery.scan_start +
10147                        hdev->discovery.scan_duration))
10148                 return;
10149
10150         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10151                            DISCOV_LE_RESTART_DELAY);
10152 }
10153
10154 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10155                             u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10156 {
10157         /* If a RSSI threshold has been specified, and
10158          * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10159          * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10160          * is set, let it through for further processing, as we might need to
10161          * restart the scan.
10162          *
10163          * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10164          * the results are also dropped.
10165          */
10166         if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10167             (rssi == HCI_RSSI_INVALID ||
10168             (rssi < hdev->discovery.rssi &&
10169              !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10170                 return  false;
10171
10172         if (hdev->discovery.uuid_count != 0) {
10173                 /* If a list of UUIDs is provided in filter, results with no
10174                  * matching UUID should be dropped.
10175                  */
10176                 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10177                                    hdev->discovery.uuids) &&
10178                     !eir_has_uuids(scan_rsp, scan_rsp_len,
10179                                    hdev->discovery.uuid_count,
10180                                    hdev->discovery.uuids))
10181                         return false;
10182         }
10183
10184         /* If duplicate filtering does not report RSSI changes, then restart
10185          * scanning to ensure updated result with updated RSSI values.
10186          */
10187         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10188                 restart_le_scan(hdev);
10189
10190                 /* Validate RSSI value against the RSSI threshold once more. */
10191                 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10192                     rssi < hdev->discovery.rssi)
10193                         return false;
10194         }
10195
10196         return true;
10197 }
10198
10199 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10200                                   bdaddr_t *bdaddr, u8 addr_type)
10201 {
10202         struct mgmt_ev_adv_monitor_device_lost ev;
10203
10204         ev.monitor_handle = cpu_to_le16(handle);
10205         bacpy(&ev.addr.bdaddr, bdaddr);
10206         ev.addr.type = addr_type;
10207
10208         mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10209                    NULL);
10210 }
10211
10212 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10213                                                struct sk_buff *skb,
10214                                                struct sock *skip_sk,
10215                                                u16 handle)
10216 {
10217         struct sk_buff *advmon_skb;
10218         size_t advmon_skb_len;
10219         __le16 *monitor_handle;
10220
10221         if (!skb)
10222                 return;
10223
10224         advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10225                           sizeof(struct mgmt_ev_device_found)) + skb->len;
10226         advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10227                                     advmon_skb_len);
10228         if (!advmon_skb)
10229                 return;
10230
10231         /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10232          * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10233          * store monitor_handle of the matched monitor.
10234          */
10235         monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10236         *monitor_handle = cpu_to_le16(handle);
10237         skb_put_data(advmon_skb, skb->data, skb->len);
10238
10239         mgmt_event_skb(advmon_skb, skip_sk);
10240 }
10241
10242 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10243                                           bdaddr_t *bdaddr, bool report_device,
10244                                           struct sk_buff *skb,
10245                                           struct sock *skip_sk)
10246 {
10247         struct monitored_device *dev, *tmp;
10248         bool matched = false;
10249         bool notified = false;
10250
10251         /* We have received the Advertisement Report because:
10252          * 1. the kernel has initiated active discovery
10253          * 2. if not, we have pend_le_reports > 0 in which case we are doing
10254          *    passive scanning
10255          * 3. if none of the above is true, we have one or more active
10256          *    Advertisement Monitor
10257          *
10258          * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10259          * and report ONLY one advertisement per device for the matched Monitor
10260          * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10261          *
10262          * For case 3, since we are not active scanning and all advertisements
10263          * received are due to a matched Advertisement Monitor, report all
10264          * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10265          */
10266         if (report_device && !hdev->advmon_pend_notify) {
10267                 mgmt_event_skb(skb, skip_sk);
10268                 return;
10269         }
10270
10271         hdev->advmon_pend_notify = false;
10272
10273         list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10274                 if (!bacmp(&dev->bdaddr, bdaddr)) {
10275                         matched = true;
10276
10277                         if (!dev->notified) {
10278                                 mgmt_send_adv_monitor_device_found(hdev, skb,
10279                                                                    skip_sk,
10280                                                                    dev->handle);
10281                                 notified = true;
10282                                 dev->notified = true;
10283                         }
10284                 }
10285
10286                 if (!dev->notified)
10287                         hdev->advmon_pend_notify = true;
10288         }
10289
10290         if (!report_device &&
10291             ((matched && !notified) || !msft_monitor_supported(hdev))) {
10292                 /* Handle 0 indicates that we are not active scanning and this
10293                  * is a subsequent advertisement report for an already matched
10294                  * Advertisement Monitor or the controller offloading support
10295                  * is not available.
10296                  */
10297                 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10298         }
10299
10300         if (report_device)
10301                 mgmt_event_skb(skb, skip_sk);
10302         else
10303                 kfree_skb(skb);
10304 }
10305
10306 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10307                               u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10308                               u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10309                               u64 instant)
10310 {
10311         struct sk_buff *skb;
10312         struct mgmt_ev_mesh_device_found *ev;
10313         int i, j;
10314
10315         if (!hdev->mesh_ad_types[0])
10316                 goto accepted;
10317
10318         /* Scan for requested AD types */
10319         if (eir_len > 0) {
10320                 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10321                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10322                                 if (!hdev->mesh_ad_types[j])
10323                                         break;
10324
10325                                 if (hdev->mesh_ad_types[j] == eir[i + 1])
10326                                         goto accepted;
10327                         }
10328                 }
10329         }
10330
10331         if (scan_rsp_len > 0) {
10332                 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10333                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10334                                 if (!hdev->mesh_ad_types[j])
10335                                         break;
10336
10337                                 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10338                                         goto accepted;
10339                         }
10340                 }
10341         }
10342
10343         return;
10344
10345 accepted:
10346         skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10347                              sizeof(*ev) + eir_len + scan_rsp_len);
10348         if (!skb)
10349                 return;
10350
10351         ev = skb_put(skb, sizeof(*ev));
10352
10353         bacpy(&ev->addr.bdaddr, bdaddr);
10354         ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10355         ev->rssi = rssi;
10356         ev->flags = cpu_to_le32(flags);
10357         ev->instant = cpu_to_le64(instant);
10358
10359         if (eir_len > 0)
10360                 /* Copy EIR or advertising data into event */
10361                 skb_put_data(skb, eir, eir_len);
10362
10363         if (scan_rsp_len > 0)
10364                 /* Append scan response data to event */
10365                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10366
10367         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10368
10369         mgmt_event_skb(skb, NULL);
10370 }
10371
10372 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10373                        u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10374                        u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10375                        u64 instant)
10376 {
10377         struct sk_buff *skb;
10378         struct mgmt_ev_device_found *ev;
10379         bool report_device = hci_discovery_active(hdev);
10380
10381         if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10382                 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10383                                   eir, eir_len, scan_rsp, scan_rsp_len,
10384                                   instant);
10385
10386         /* Don't send events for a non-kernel initiated discovery. With
10387          * LE one exception is if we have pend_le_reports > 0 in which
10388          * case we're doing passive scanning and want these events.
10389          */
10390         if (!hci_discovery_active(hdev)) {
10391                 if (link_type == ACL_LINK)
10392                         return;
10393                 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10394                         report_device = true;
10395                 else if (!hci_is_adv_monitoring(hdev))
10396                         return;
10397         }
10398
10399         if (hdev->discovery.result_filtering) {
10400                 /* We are using service discovery */
10401                 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10402                                      scan_rsp_len))
10403                         return;
10404         }
10405
10406         if (hdev->discovery.limited) {
10407                 /* Check for limited discoverable bit */
10408                 if (dev_class) {
10409                         if (!(dev_class[1] & 0x20))
10410                                 return;
10411                 } else {
10412                         u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10413                         if (!flags || !(flags[0] & LE_AD_LIMITED))
10414                                 return;
10415                 }
10416         }
10417
10418         /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10419         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10420                              sizeof(*ev) + eir_len + scan_rsp_len + 5);
10421         if (!skb)
10422                 return;
10423
10424         ev = skb_put(skb, sizeof(*ev));
10425
10426         /* In case of device discovery with BR/EDR devices (pre 1.2), the
10427          * RSSI value was reported as 0 when not available. This behavior
10428          * is kept when using device discovery. This is required for full
10429          * backwards compatibility with the API.
10430          *
10431          * However when using service discovery, the value 127 will be
10432          * returned when the RSSI is not available.
10433          */
10434         if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10435             link_type == ACL_LINK)
10436                 rssi = 0;
10437
10438         bacpy(&ev->addr.bdaddr, bdaddr);
10439         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10440         ev->rssi = rssi;
10441         ev->flags = cpu_to_le32(flags);
10442
10443         if (eir_len > 0)
10444                 /* Copy EIR or advertising data into event */
10445                 skb_put_data(skb, eir, eir_len);
10446
10447         if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10448                 u8 eir_cod[5];
10449
10450                 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10451                                            dev_class, 3);
10452                 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10453         }
10454
10455         if (scan_rsp_len > 0)
10456                 /* Append scan response data to event */
10457                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10458
10459         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10460
10461         mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10462 }
10463
10464 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10465                       u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10466 {
10467         struct sk_buff *skb;
10468         struct mgmt_ev_device_found *ev;
10469         u16 eir_len = 0;
10470         u32 flags = 0;
10471
10472         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10473                              sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10474
10475         ev = skb_put(skb, sizeof(*ev));
10476         bacpy(&ev->addr.bdaddr, bdaddr);
10477         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10478         ev->rssi = rssi;
10479
10480         if (name)
10481                 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10482         else
10483                 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10484
10485         ev->eir_len = cpu_to_le16(eir_len);
10486         ev->flags = cpu_to_le32(flags);
10487
10488         mgmt_event_skb(skb, NULL);
10489 }
10490
10491 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10492 {
10493         struct mgmt_ev_discovering ev;
10494
10495         bt_dev_dbg(hdev, "discovering %u", discovering);
10496
10497         memset(&ev, 0, sizeof(ev));
10498         ev.type = hdev->discovery.type;
10499         ev.discovering = discovering;
10500
10501         mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10502 }
10503
10504 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10505 {
10506         struct mgmt_ev_controller_suspend ev;
10507
10508         ev.suspend_state = state;
10509         mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10510 }
10511
10512 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10513                    u8 addr_type)
10514 {
10515         struct mgmt_ev_controller_resume ev;
10516
10517         ev.wake_reason = reason;
10518         if (bdaddr) {
10519                 bacpy(&ev.addr.bdaddr, bdaddr);
10520                 ev.addr.type = addr_type;
10521         } else {
10522                 memset(&ev.addr, 0, sizeof(ev.addr));
10523         }
10524
10525         mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10526 }
10527
10528 static struct hci_mgmt_chan chan = {
10529         .channel        = HCI_CHANNEL_CONTROL,
10530         .handler_count  = ARRAY_SIZE(mgmt_handlers),
10531         .handlers       = mgmt_handlers,
10532         .hdev_init      = mgmt_init_hdev,
10533 };
10534
10535 int mgmt_init(void)
10536 {
10537         return hci_mgmt_chan_register(&chan);
10538 }
10539
10540 void mgmt_exit(void)
10541 {
10542         hci_mgmt_chan_unregister(&chan);
10543 }
10544
10545 void mgmt_cleanup(struct sock *sk)
10546 {
10547         struct mgmt_mesh_tx *mesh_tx;
10548         struct hci_dev *hdev;
10549
10550         read_lock(&hci_dev_list_lock);
10551
10552         list_for_each_entry(hdev, &hci_dev_list, list) {
10553                 do {
10554                         mesh_tx = mgmt_mesh_next(hdev, sk);
10555
10556                         if (mesh_tx)
10557                                 mesh_send_complete(hdev, mesh_tx, true);
10558                 } while (mesh_tx);
10559         }
10560
10561         read_unlock(&hci_dev_list_lock);
10562 }