b8e05ddeedba98c26f91a4490af4d1aae888e5ca
[releases.git] / mgmt.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION    1
45 #define MGMT_REVISION   22
46
47 static const u16 mgmt_commands[] = {
48         MGMT_OP_READ_INDEX_LIST,
49         MGMT_OP_READ_INFO,
50         MGMT_OP_SET_POWERED,
51         MGMT_OP_SET_DISCOVERABLE,
52         MGMT_OP_SET_CONNECTABLE,
53         MGMT_OP_SET_FAST_CONNECTABLE,
54         MGMT_OP_SET_BONDABLE,
55         MGMT_OP_SET_LINK_SECURITY,
56         MGMT_OP_SET_SSP,
57         MGMT_OP_SET_HS,
58         MGMT_OP_SET_LE,
59         MGMT_OP_SET_DEV_CLASS,
60         MGMT_OP_SET_LOCAL_NAME,
61         MGMT_OP_ADD_UUID,
62         MGMT_OP_REMOVE_UUID,
63         MGMT_OP_LOAD_LINK_KEYS,
64         MGMT_OP_LOAD_LONG_TERM_KEYS,
65         MGMT_OP_DISCONNECT,
66         MGMT_OP_GET_CONNECTIONS,
67         MGMT_OP_PIN_CODE_REPLY,
68         MGMT_OP_PIN_CODE_NEG_REPLY,
69         MGMT_OP_SET_IO_CAPABILITY,
70         MGMT_OP_PAIR_DEVICE,
71         MGMT_OP_CANCEL_PAIR_DEVICE,
72         MGMT_OP_UNPAIR_DEVICE,
73         MGMT_OP_USER_CONFIRM_REPLY,
74         MGMT_OP_USER_CONFIRM_NEG_REPLY,
75         MGMT_OP_USER_PASSKEY_REPLY,
76         MGMT_OP_USER_PASSKEY_NEG_REPLY,
77         MGMT_OP_READ_LOCAL_OOB_DATA,
78         MGMT_OP_ADD_REMOTE_OOB_DATA,
79         MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80         MGMT_OP_START_DISCOVERY,
81         MGMT_OP_STOP_DISCOVERY,
82         MGMT_OP_CONFIRM_NAME,
83         MGMT_OP_BLOCK_DEVICE,
84         MGMT_OP_UNBLOCK_DEVICE,
85         MGMT_OP_SET_DEVICE_ID,
86         MGMT_OP_SET_ADVERTISING,
87         MGMT_OP_SET_BREDR,
88         MGMT_OP_SET_STATIC_ADDRESS,
89         MGMT_OP_SET_SCAN_PARAMS,
90         MGMT_OP_SET_SECURE_CONN,
91         MGMT_OP_SET_DEBUG_KEYS,
92         MGMT_OP_SET_PRIVACY,
93         MGMT_OP_LOAD_IRKS,
94         MGMT_OP_GET_CONN_INFO,
95         MGMT_OP_GET_CLOCK_INFO,
96         MGMT_OP_ADD_DEVICE,
97         MGMT_OP_REMOVE_DEVICE,
98         MGMT_OP_LOAD_CONN_PARAM,
99         MGMT_OP_READ_UNCONF_INDEX_LIST,
100         MGMT_OP_READ_CONFIG_INFO,
101         MGMT_OP_SET_EXTERNAL_CONFIG,
102         MGMT_OP_SET_PUBLIC_ADDRESS,
103         MGMT_OP_START_SERVICE_DISCOVERY,
104         MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105         MGMT_OP_READ_EXT_INDEX_LIST,
106         MGMT_OP_READ_ADV_FEATURES,
107         MGMT_OP_ADD_ADVERTISING,
108         MGMT_OP_REMOVE_ADVERTISING,
109         MGMT_OP_GET_ADV_SIZE_INFO,
110         MGMT_OP_START_LIMITED_DISCOVERY,
111         MGMT_OP_READ_EXT_INFO,
112         MGMT_OP_SET_APPEARANCE,
113         MGMT_OP_GET_PHY_CONFIGURATION,
114         MGMT_OP_SET_PHY_CONFIGURATION,
115         MGMT_OP_SET_BLOCKED_KEYS,
116         MGMT_OP_SET_WIDEBAND_SPEECH,
117         MGMT_OP_READ_CONTROLLER_CAP,
118         MGMT_OP_READ_EXP_FEATURES_INFO,
119         MGMT_OP_SET_EXP_FEATURE,
120         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121         MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123         MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124         MGMT_OP_GET_DEVICE_FLAGS,
125         MGMT_OP_SET_DEVICE_FLAGS,
126         MGMT_OP_READ_ADV_MONITOR_FEATURES,
127         MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128         MGMT_OP_REMOVE_ADV_MONITOR,
129         MGMT_OP_ADD_EXT_ADV_PARAMS,
130         MGMT_OP_ADD_EXT_ADV_DATA,
131         MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132         MGMT_OP_SET_MESH_RECEIVER,
133         MGMT_OP_MESH_READ_FEATURES,
134         MGMT_OP_MESH_SEND,
135         MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139         MGMT_EV_CONTROLLER_ERROR,
140         MGMT_EV_INDEX_ADDED,
141         MGMT_EV_INDEX_REMOVED,
142         MGMT_EV_NEW_SETTINGS,
143         MGMT_EV_CLASS_OF_DEV_CHANGED,
144         MGMT_EV_LOCAL_NAME_CHANGED,
145         MGMT_EV_NEW_LINK_KEY,
146         MGMT_EV_NEW_LONG_TERM_KEY,
147         MGMT_EV_DEVICE_CONNECTED,
148         MGMT_EV_DEVICE_DISCONNECTED,
149         MGMT_EV_CONNECT_FAILED,
150         MGMT_EV_PIN_CODE_REQUEST,
151         MGMT_EV_USER_CONFIRM_REQUEST,
152         MGMT_EV_USER_PASSKEY_REQUEST,
153         MGMT_EV_AUTH_FAILED,
154         MGMT_EV_DEVICE_FOUND,
155         MGMT_EV_DISCOVERING,
156         MGMT_EV_DEVICE_BLOCKED,
157         MGMT_EV_DEVICE_UNBLOCKED,
158         MGMT_EV_DEVICE_UNPAIRED,
159         MGMT_EV_PASSKEY_NOTIFY,
160         MGMT_EV_NEW_IRK,
161         MGMT_EV_NEW_CSRK,
162         MGMT_EV_DEVICE_ADDED,
163         MGMT_EV_DEVICE_REMOVED,
164         MGMT_EV_NEW_CONN_PARAM,
165         MGMT_EV_UNCONF_INDEX_ADDED,
166         MGMT_EV_UNCONF_INDEX_REMOVED,
167         MGMT_EV_NEW_CONFIG_OPTIONS,
168         MGMT_EV_EXT_INDEX_ADDED,
169         MGMT_EV_EXT_INDEX_REMOVED,
170         MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171         MGMT_EV_ADVERTISING_ADDED,
172         MGMT_EV_ADVERTISING_REMOVED,
173         MGMT_EV_EXT_INFO_CHANGED,
174         MGMT_EV_PHY_CONFIGURATION_CHANGED,
175         MGMT_EV_EXP_FEATURE_CHANGED,
176         MGMT_EV_DEVICE_FLAGS_CHANGED,
177         MGMT_EV_ADV_MONITOR_ADDED,
178         MGMT_EV_ADV_MONITOR_REMOVED,
179         MGMT_EV_CONTROLLER_SUSPEND,
180         MGMT_EV_CONTROLLER_RESUME,
181         MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182         MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186         MGMT_OP_READ_INDEX_LIST,
187         MGMT_OP_READ_INFO,
188         MGMT_OP_READ_UNCONF_INDEX_LIST,
189         MGMT_OP_READ_CONFIG_INFO,
190         MGMT_OP_READ_EXT_INDEX_LIST,
191         MGMT_OP_READ_EXT_INFO,
192         MGMT_OP_READ_CONTROLLER_CAP,
193         MGMT_OP_READ_EXP_FEATURES_INFO,
194         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199         MGMT_EV_INDEX_ADDED,
200         MGMT_EV_INDEX_REMOVED,
201         MGMT_EV_NEW_SETTINGS,
202         MGMT_EV_CLASS_OF_DEV_CHANGED,
203         MGMT_EV_LOCAL_NAME_CHANGED,
204         MGMT_EV_UNCONF_INDEX_ADDED,
205         MGMT_EV_UNCONF_INDEX_REMOVED,
206         MGMT_EV_NEW_CONFIG_OPTIONS,
207         MGMT_EV_EXT_INDEX_ADDED,
208         MGMT_EV_EXT_INDEX_REMOVED,
209         MGMT_EV_EXT_INFO_CHANGED,
210         MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT   msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216                  "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220         MGMT_STATUS_SUCCESS,
221         MGMT_STATUS_UNKNOWN_COMMAND,    /* Unknown Command */
222         MGMT_STATUS_NOT_CONNECTED,      /* No Connection */
223         MGMT_STATUS_FAILED,             /* Hardware Failure */
224         MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
225         MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
226         MGMT_STATUS_AUTH_FAILED,        /* PIN or Key Missing */
227         MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
228         MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
229         MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
230         MGMT_STATUS_NO_RESOURCES,       /* Max Number of SCO Connections */
231         MGMT_STATUS_ALREADY_CONNECTED,  /* ACL Connection Exists */
232         MGMT_STATUS_BUSY,               /* Command Disallowed */
233         MGMT_STATUS_NO_RESOURCES,       /* Rejected Limited Resources */
234         MGMT_STATUS_REJECTED,           /* Rejected Security */
235         MGMT_STATUS_REJECTED,           /* Rejected Personal */
236         MGMT_STATUS_TIMEOUT,            /* Host Timeout */
237         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Feature */
238         MGMT_STATUS_INVALID_PARAMS,     /* Invalid Parameters */
239         MGMT_STATUS_DISCONNECTED,       /* OE User Ended Connection */
240         MGMT_STATUS_NO_RESOURCES,       /* OE Low Resources */
241         MGMT_STATUS_DISCONNECTED,       /* OE Power Off */
242         MGMT_STATUS_DISCONNECTED,       /* Connection Terminated */
243         MGMT_STATUS_BUSY,               /* Repeated Attempts */
244         MGMT_STATUS_REJECTED,           /* Pairing Not Allowed */
245         MGMT_STATUS_FAILED,             /* Unknown LMP PDU */
246         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Remote Feature */
247         MGMT_STATUS_REJECTED,           /* SCO Offset Rejected */
248         MGMT_STATUS_REJECTED,           /* SCO Interval Rejected */
249         MGMT_STATUS_REJECTED,           /* Air Mode Rejected */
250         MGMT_STATUS_INVALID_PARAMS,     /* Invalid LMP Parameters */
251         MGMT_STATUS_FAILED,             /* Unspecified Error */
252         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported LMP Parameter Value */
253         MGMT_STATUS_FAILED,             /* Role Change Not Allowed */
254         MGMT_STATUS_TIMEOUT,            /* LMP Response Timeout */
255         MGMT_STATUS_FAILED,             /* LMP Error Transaction Collision */
256         MGMT_STATUS_FAILED,             /* LMP PDU Not Allowed */
257         MGMT_STATUS_REJECTED,           /* Encryption Mode Not Accepted */
258         MGMT_STATUS_FAILED,             /* Unit Link Key Used */
259         MGMT_STATUS_NOT_SUPPORTED,      /* QoS Not Supported */
260         MGMT_STATUS_TIMEOUT,            /* Instant Passed */
261         MGMT_STATUS_NOT_SUPPORTED,      /* Pairing Not Supported */
262         MGMT_STATUS_FAILED,             /* Transaction Collision */
263         MGMT_STATUS_FAILED,             /* Reserved for future use */
264         MGMT_STATUS_INVALID_PARAMS,     /* Unacceptable Parameter */
265         MGMT_STATUS_REJECTED,           /* QoS Rejected */
266         MGMT_STATUS_NOT_SUPPORTED,      /* Classification Not Supported */
267         MGMT_STATUS_REJECTED,           /* Insufficient Security */
268         MGMT_STATUS_INVALID_PARAMS,     /* Parameter Out Of Range */
269         MGMT_STATUS_FAILED,             /* Reserved for future use */
270         MGMT_STATUS_BUSY,               /* Role Switch Pending */
271         MGMT_STATUS_FAILED,             /* Reserved for future use */
272         MGMT_STATUS_FAILED,             /* Slot Violation */
273         MGMT_STATUS_FAILED,             /* Role Switch Failed */
274         MGMT_STATUS_INVALID_PARAMS,     /* EIR Too Large */
275         MGMT_STATUS_NOT_SUPPORTED,      /* Simple Pairing Not Supported */
276         MGMT_STATUS_BUSY,               /* Host Busy Pairing */
277         MGMT_STATUS_REJECTED,           /* Rejected, No Suitable Channel */
278         MGMT_STATUS_BUSY,               /* Controller Busy */
279         MGMT_STATUS_INVALID_PARAMS,     /* Unsuitable Connection Interval */
280         MGMT_STATUS_TIMEOUT,            /* Directed Advertising Timeout */
281         MGMT_STATUS_AUTH_FAILED,        /* Terminated Due to MIC Failure */
282         MGMT_STATUS_CONNECT_FAILED,     /* Connection Establishment Failed */
283         MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
284 };
285
286 static u8 mgmt_errno_status(int err)
287 {
288         switch (err) {
289         case 0:
290                 return MGMT_STATUS_SUCCESS;
291         case -EPERM:
292                 return MGMT_STATUS_REJECTED;
293         case -EINVAL:
294                 return MGMT_STATUS_INVALID_PARAMS;
295         case -EOPNOTSUPP:
296                 return MGMT_STATUS_NOT_SUPPORTED;
297         case -EBUSY:
298                 return MGMT_STATUS_BUSY;
299         case -ETIMEDOUT:
300                 return MGMT_STATUS_AUTH_FAILED;
301         case -ENOMEM:
302                 return MGMT_STATUS_NO_RESOURCES;
303         case -EISCONN:
304                 return MGMT_STATUS_ALREADY_CONNECTED;
305         case -ENOTCONN:
306                 return MGMT_STATUS_DISCONNECTED;
307         }
308
309         return MGMT_STATUS_FAILED;
310 }
311
312 static u8 mgmt_status(int err)
313 {
314         if (err < 0)
315                 return mgmt_errno_status(err);
316
317         if (err < ARRAY_SIZE(mgmt_status_table))
318                 return mgmt_status_table[err];
319
320         return MGMT_STATUS_FAILED;
321 }
322
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324                             u16 len, int flag)
325 {
326         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327                                flag, NULL);
328 }
329
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331                               u16 len, int flag, struct sock *skip_sk)
332 {
333         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334                                flag, skip_sk);
335 }
336
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338                       struct sock *skip_sk)
339 {
340         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341                                HCI_SOCK_TRUSTED, skip_sk);
342 }
343
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346         return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347                                    skip_sk);
348 }
349
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352         if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353                 return ADDR_LE_DEV_PUBLIC;
354         else
355                 return ADDR_LE_DEV_RANDOM;
356 }
357
358 void mgmt_fill_version_info(void *ver)
359 {
360         struct mgmt_rp_read_version *rp = ver;
361
362         rp->version = MGMT_VERSION;
363         rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367                         u16 data_len)
368 {
369         struct mgmt_rp_read_version rp;
370
371         bt_dev_dbg(hdev, "sock %p", sk);
372
373         mgmt_fill_version_info(&rp);
374
375         return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376                                  &rp, sizeof(rp));
377 }
378
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380                          u16 data_len)
381 {
382         struct mgmt_rp_read_commands *rp;
383         u16 num_commands, num_events;
384         size_t rp_size;
385         int i, err;
386
387         bt_dev_dbg(hdev, "sock %p", sk);
388
389         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390                 num_commands = ARRAY_SIZE(mgmt_commands);
391                 num_events = ARRAY_SIZE(mgmt_events);
392         } else {
393                 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394                 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395         }
396
397         rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399         rp = kmalloc(rp_size, GFP_KERNEL);
400         if (!rp)
401                 return -ENOMEM;
402
403         rp->num_commands = cpu_to_le16(num_commands);
404         rp->num_events = cpu_to_le16(num_events);
405
406         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407                 __le16 *opcode = rp->opcodes;
408
409                 for (i = 0; i < num_commands; i++, opcode++)
410                         put_unaligned_le16(mgmt_commands[i], opcode);
411
412                 for (i = 0; i < num_events; i++, opcode++)
413                         put_unaligned_le16(mgmt_events[i], opcode);
414         } else {
415                 __le16 *opcode = rp->opcodes;
416
417                 for (i = 0; i < num_commands; i++, opcode++)
418                         put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420                 for (i = 0; i < num_events; i++, opcode++)
421                         put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422         }
423
424         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425                                 rp, rp_size);
426         kfree(rp);
427
428         return err;
429 }
430
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432                            u16 data_len)
433 {
434         struct mgmt_rp_read_index_list *rp;
435         struct hci_dev *d;
436         size_t rp_len;
437         u16 count;
438         int err;
439
440         bt_dev_dbg(hdev, "sock %p", sk);
441
442         read_lock(&hci_dev_list_lock);
443
444         count = 0;
445         list_for_each_entry(d, &hci_dev_list, list) {
446                 if (d->dev_type == HCI_PRIMARY &&
447                     !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448                         count++;
449         }
450
451         rp_len = sizeof(*rp) + (2 * count);
452         rp = kmalloc(rp_len, GFP_ATOMIC);
453         if (!rp) {
454                 read_unlock(&hci_dev_list_lock);
455                 return -ENOMEM;
456         }
457
458         count = 0;
459         list_for_each_entry(d, &hci_dev_list, list) {
460                 if (hci_dev_test_flag(d, HCI_SETUP) ||
461                     hci_dev_test_flag(d, HCI_CONFIG) ||
462                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
463                         continue;
464
465                 /* Devices marked as raw-only are neither configured
466                  * nor unconfigured controllers.
467                  */
468                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469                         continue;
470
471                 if (d->dev_type == HCI_PRIMARY &&
472                     !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473                         rp->index[count++] = cpu_to_le16(d->id);
474                         bt_dev_dbg(hdev, "Added hci%u", d->id);
475                 }
476         }
477
478         rp->num_controllers = cpu_to_le16(count);
479         rp_len = sizeof(*rp) + (2 * count);
480
481         read_unlock(&hci_dev_list_lock);
482
483         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484                                 0, rp, rp_len);
485
486         kfree(rp);
487
488         return err;
489 }
490
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492                                   void *data, u16 data_len)
493 {
494         struct mgmt_rp_read_unconf_index_list *rp;
495         struct hci_dev *d;
496         size_t rp_len;
497         u16 count;
498         int err;
499
500         bt_dev_dbg(hdev, "sock %p", sk);
501
502         read_lock(&hci_dev_list_lock);
503
504         count = 0;
505         list_for_each_entry(d, &hci_dev_list, list) {
506                 if (d->dev_type == HCI_PRIMARY &&
507                     hci_dev_test_flag(d, HCI_UNCONFIGURED))
508                         count++;
509         }
510
511         rp_len = sizeof(*rp) + (2 * count);
512         rp = kmalloc(rp_len, GFP_ATOMIC);
513         if (!rp) {
514                 read_unlock(&hci_dev_list_lock);
515                 return -ENOMEM;
516         }
517
518         count = 0;
519         list_for_each_entry(d, &hci_dev_list, list) {
520                 if (hci_dev_test_flag(d, HCI_SETUP) ||
521                     hci_dev_test_flag(d, HCI_CONFIG) ||
522                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
523                         continue;
524
525                 /* Devices marked as raw-only are neither configured
526                  * nor unconfigured controllers.
527                  */
528                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529                         continue;
530
531                 if (d->dev_type == HCI_PRIMARY &&
532                     hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533                         rp->index[count++] = cpu_to_le16(d->id);
534                         bt_dev_dbg(hdev, "Added hci%u", d->id);
535                 }
536         }
537
538         rp->num_controllers = cpu_to_le16(count);
539         rp_len = sizeof(*rp) + (2 * count);
540
541         read_unlock(&hci_dev_list_lock);
542
543         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544                                 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545
546         kfree(rp);
547
548         return err;
549 }
550
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552                                void *data, u16 data_len)
553 {
554         struct mgmt_rp_read_ext_index_list *rp;
555         struct hci_dev *d;
556         u16 count;
557         int err;
558
559         bt_dev_dbg(hdev, "sock %p", sk);
560
561         read_lock(&hci_dev_list_lock);
562
563         count = 0;
564         list_for_each_entry(d, &hci_dev_list, list) {
565                 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566                         count++;
567         }
568
569         rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570         if (!rp) {
571                 read_unlock(&hci_dev_list_lock);
572                 return -ENOMEM;
573         }
574
575         count = 0;
576         list_for_each_entry(d, &hci_dev_list, list) {
577                 if (hci_dev_test_flag(d, HCI_SETUP) ||
578                     hci_dev_test_flag(d, HCI_CONFIG) ||
579                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
580                         continue;
581
582                 /* Devices marked as raw-only are neither configured
583                  * nor unconfigured controllers.
584                  */
585                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586                         continue;
587
588                 if (d->dev_type == HCI_PRIMARY) {
589                         if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590                                 rp->entry[count].type = 0x01;
591                         else
592                                 rp->entry[count].type = 0x00;
593                 } else if (d->dev_type == HCI_AMP) {
594                         rp->entry[count].type = 0x02;
595                 } else {
596                         continue;
597                 }
598
599                 rp->entry[count].bus = d->bus;
600                 rp->entry[count++].index = cpu_to_le16(d->id);
601                 bt_dev_dbg(hdev, "Added hci%u", d->id);
602         }
603
604         rp->num_controllers = cpu_to_le16(count);
605
606         read_unlock(&hci_dev_list_lock);
607
608         /* If this command is called at least once, then all the
609          * default index and unconfigured index events are disabled
610          * and from now on only extended index events are used.
611          */
612         hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613         hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614         hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615
616         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617                                 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618                                 struct_size(rp, entry, count));
619
620         kfree(rp);
621
622         return err;
623 }
624
625 static bool is_configured(struct hci_dev *hdev)
626 {
627         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629                 return false;
630
631         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633             !bacmp(&hdev->public_addr, BDADDR_ANY))
634                 return false;
635
636         return true;
637 }
638
639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641         u32 options = 0;
642
643         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
646
647         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649             !bacmp(&hdev->public_addr, BDADDR_ANY))
650                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651
652         return cpu_to_le32(options);
653 }
654
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657         __le32 options = get_missing_options(hdev);
658
659         return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660                                   sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665         __le32 options = get_missing_options(hdev);
666
667         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668                                  sizeof(options));
669 }
670
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672                             void *data, u16 data_len)
673 {
674         struct mgmt_rp_read_config_info rp;
675         u32 options = 0;
676
677         bt_dev_dbg(hdev, "sock %p", sk);
678
679         hci_dev_lock(hdev);
680
681         memset(&rp, 0, sizeof(rp));
682         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683
684         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
686
687         if (hdev->set_bdaddr)
688                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
689
690         rp.supported_options = cpu_to_le32(options);
691         rp.missing_options = get_missing_options(hdev);
692
693         hci_dev_unlock(hdev);
694
695         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696                                  &rp, sizeof(rp));
697 }
698
699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701         u32 supported_phys = 0;
702
703         if (lmp_bredr_capable(hdev)) {
704                 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705
706                 if (hdev->features[0][0] & LMP_3SLOT)
707                         supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708
709                 if (hdev->features[0][0] & LMP_5SLOT)
710                         supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711
712                 if (lmp_edr_2m_capable(hdev)) {
713                         supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714
715                         if (lmp_edr_3slot_capable(hdev))
716                                 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717
718                         if (lmp_edr_5slot_capable(hdev))
719                                 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720
721                         if (lmp_edr_3m_capable(hdev)) {
722                                 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723
724                                 if (lmp_edr_3slot_capable(hdev))
725                                         supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726
727                                 if (lmp_edr_5slot_capable(hdev))
728                                         supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729                         }
730                 }
731         }
732
733         if (lmp_le_capable(hdev)) {
734                 supported_phys |= MGMT_PHY_LE_1M_TX;
735                 supported_phys |= MGMT_PHY_LE_1M_RX;
736
737                 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738                         supported_phys |= MGMT_PHY_LE_2M_TX;
739                         supported_phys |= MGMT_PHY_LE_2M_RX;
740                 }
741
742                 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743                         supported_phys |= MGMT_PHY_LE_CODED_TX;
744                         supported_phys |= MGMT_PHY_LE_CODED_RX;
745                 }
746         }
747
748         return supported_phys;
749 }
750
751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753         u32 selected_phys = 0;
754
755         if (lmp_bredr_capable(hdev)) {
756                 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757
758                 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759                         selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760
761                 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762                         selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763
764                 if (lmp_edr_2m_capable(hdev)) {
765                         if (!(hdev->pkt_type & HCI_2DH1))
766                                 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767
768                         if (lmp_edr_3slot_capable(hdev) &&
769                             !(hdev->pkt_type & HCI_2DH3))
770                                 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771
772                         if (lmp_edr_5slot_capable(hdev) &&
773                             !(hdev->pkt_type & HCI_2DH5))
774                                 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775
776                         if (lmp_edr_3m_capable(hdev)) {
777                                 if (!(hdev->pkt_type & HCI_3DH1))
778                                         selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779
780                                 if (lmp_edr_3slot_capable(hdev) &&
781                                     !(hdev->pkt_type & HCI_3DH3))
782                                         selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783
784                                 if (lmp_edr_5slot_capable(hdev) &&
785                                     !(hdev->pkt_type & HCI_3DH5))
786                                         selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787                         }
788                 }
789         }
790
791         if (lmp_le_capable(hdev)) {
792                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793                         selected_phys |= MGMT_PHY_LE_1M_TX;
794
795                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796                         selected_phys |= MGMT_PHY_LE_1M_RX;
797
798                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799                         selected_phys |= MGMT_PHY_LE_2M_TX;
800
801                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802                         selected_phys |= MGMT_PHY_LE_2M_RX;
803
804                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805                         selected_phys |= MGMT_PHY_LE_CODED_TX;
806
807                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808                         selected_phys |= MGMT_PHY_LE_CODED_RX;
809         }
810
811         return selected_phys;
812 }
813
814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816         return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817                 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819
820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822         u32 settings = 0;
823
824         settings |= MGMT_SETTING_POWERED;
825         settings |= MGMT_SETTING_BONDABLE;
826         settings |= MGMT_SETTING_DEBUG_KEYS;
827         settings |= MGMT_SETTING_CONNECTABLE;
828         settings |= MGMT_SETTING_DISCOVERABLE;
829
830         if (lmp_bredr_capable(hdev)) {
831                 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832                         settings |= MGMT_SETTING_FAST_CONNECTABLE;
833                 settings |= MGMT_SETTING_BREDR;
834                 settings |= MGMT_SETTING_LINK_SECURITY;
835
836                 if (lmp_ssp_capable(hdev)) {
837                         settings |= MGMT_SETTING_SSP;
838                 }
839
840                 if (lmp_sc_capable(hdev))
841                         settings |= MGMT_SETTING_SECURE_CONN;
842
843                 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
844                              &hdev->quirks))
845                         settings |= MGMT_SETTING_WIDEBAND_SPEECH;
846         }
847
848         if (lmp_le_capable(hdev)) {
849                 settings |= MGMT_SETTING_LE;
850                 settings |= MGMT_SETTING_SECURE_CONN;
851                 settings |= MGMT_SETTING_PRIVACY;
852                 settings |= MGMT_SETTING_STATIC_ADDRESS;
853                 settings |= MGMT_SETTING_ADVERTISING;
854         }
855
856         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
857             hdev->set_bdaddr)
858                 settings |= MGMT_SETTING_CONFIGURATION;
859
860         if (cis_central_capable(hdev))
861                 settings |= MGMT_SETTING_CIS_CENTRAL;
862
863         if (cis_peripheral_capable(hdev))
864                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
865
866         settings |= MGMT_SETTING_PHY_CONFIGURATION;
867
868         return settings;
869 }
870
871 static u32 get_current_settings(struct hci_dev *hdev)
872 {
873         u32 settings = 0;
874
875         if (hdev_is_powered(hdev))
876                 settings |= MGMT_SETTING_POWERED;
877
878         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
879                 settings |= MGMT_SETTING_CONNECTABLE;
880
881         if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
882                 settings |= MGMT_SETTING_FAST_CONNECTABLE;
883
884         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885                 settings |= MGMT_SETTING_DISCOVERABLE;
886
887         if (hci_dev_test_flag(hdev, HCI_BONDABLE))
888                 settings |= MGMT_SETTING_BONDABLE;
889
890         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
891                 settings |= MGMT_SETTING_BREDR;
892
893         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
894                 settings |= MGMT_SETTING_LE;
895
896         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
897                 settings |= MGMT_SETTING_LINK_SECURITY;
898
899         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
900                 settings |= MGMT_SETTING_SSP;
901
902         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
903                 settings |= MGMT_SETTING_ADVERTISING;
904
905         if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
906                 settings |= MGMT_SETTING_SECURE_CONN;
907
908         if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
909                 settings |= MGMT_SETTING_DEBUG_KEYS;
910
911         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
912                 settings |= MGMT_SETTING_PRIVACY;
913
914         /* The current setting for static address has two purposes. The
915          * first is to indicate if the static address will be used and
916          * the second is to indicate if it is actually set.
917          *
918          * This means if the static address is not configured, this flag
919          * will never be set. If the address is configured, then if the
920          * address is actually used decides if the flag is set or not.
921          *
922          * For single mode LE only controllers and dual-mode controllers
923          * with BR/EDR disabled, the existence of the static address will
924          * be evaluated.
925          */
926         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
927             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
928             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
929                 if (bacmp(&hdev->static_addr, BDADDR_ANY))
930                         settings |= MGMT_SETTING_STATIC_ADDRESS;
931         }
932
933         if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
934                 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
935
936         if (cis_central_capable(hdev))
937                 settings |= MGMT_SETTING_CIS_CENTRAL;
938
939         if (cis_peripheral_capable(hdev))
940                 settings |= MGMT_SETTING_CIS_PERIPHERAL;
941
942         if (bis_capable(hdev))
943                 settings |= MGMT_SETTING_ISO_BROADCASTER;
944
945         if (sync_recv_capable(hdev))
946                 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
947
948         return settings;
949 }
950
951 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
952 {
953         return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
954 }
955
956 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
957 {
958         struct mgmt_pending_cmd *cmd;
959
960         /* If there's a pending mgmt command the flags will not yet have
961          * their final values, so check for this first.
962          */
963         cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
964         if (cmd) {
965                 struct mgmt_mode *cp = cmd->param;
966                 if (cp->val == 0x01)
967                         return LE_AD_GENERAL;
968                 else if (cp->val == 0x02)
969                         return LE_AD_LIMITED;
970         } else {
971                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
972                         return LE_AD_LIMITED;
973                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
974                         return LE_AD_GENERAL;
975         }
976
977         return 0;
978 }
979
980 bool mgmt_get_connectable(struct hci_dev *hdev)
981 {
982         struct mgmt_pending_cmd *cmd;
983
984         /* If there's a pending mgmt command the flag will not yet have
985          * it's final value, so check for this first.
986          */
987         cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
988         if (cmd) {
989                 struct mgmt_mode *cp = cmd->param;
990
991                 return cp->val;
992         }
993
994         return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
995 }
996
997 static int service_cache_sync(struct hci_dev *hdev, void *data)
998 {
999         hci_update_eir_sync(hdev);
1000         hci_update_class_sync(hdev);
1001
1002         return 0;
1003 }
1004
1005 static void service_cache_off(struct work_struct *work)
1006 {
1007         struct hci_dev *hdev = container_of(work, struct hci_dev,
1008                                             service_cache.work);
1009
1010         if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1011                 return;
1012
1013         hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1014 }
1015
1016 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1017 {
1018         /* The generation of a new RPA and programming it into the
1019          * controller happens in the hci_req_enable_advertising()
1020          * function.
1021          */
1022         if (ext_adv_capable(hdev))
1023                 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1024         else
1025                 return hci_enable_advertising_sync(hdev);
1026 }
1027
1028 static void rpa_expired(struct work_struct *work)
1029 {
1030         struct hci_dev *hdev = container_of(work, struct hci_dev,
1031                                             rpa_expired.work);
1032
1033         bt_dev_dbg(hdev, "");
1034
1035         hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1036
1037         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1038                 return;
1039
1040         hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1041 }
1042
1043 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1044
1045 static void discov_off(struct work_struct *work)
1046 {
1047         struct hci_dev *hdev = container_of(work, struct hci_dev,
1048                                             discov_off.work);
1049
1050         bt_dev_dbg(hdev, "");
1051
1052         hci_dev_lock(hdev);
1053
1054         /* When discoverable timeout triggers, then just make sure
1055          * the limited discoverable flag is cleared. Even in the case
1056          * of a timeout triggered from general discoverable, it is
1057          * safe to unconditionally clear the flag.
1058          */
1059         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061         hdev->discov_timeout = 0;
1062
1063         hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1064
1065         mgmt_new_settings(hdev);
1066
1067         hci_dev_unlock(hdev);
1068 }
1069
1070 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1071
1072 static void mesh_send_complete(struct hci_dev *hdev,
1073                                struct mgmt_mesh_tx *mesh_tx, bool silent)
1074 {
1075         u8 handle = mesh_tx->handle;
1076
1077         if (!silent)
1078                 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1079                            sizeof(handle), NULL);
1080
1081         mgmt_mesh_remove(mesh_tx);
1082 }
1083
1084 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1085 {
1086         struct mgmt_mesh_tx *mesh_tx;
1087
1088         hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089         hci_disable_advertising_sync(hdev);
1090         mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092         if (mesh_tx)
1093                 mesh_send_complete(hdev, mesh_tx, false);
1094
1095         return 0;
1096 }
1097
1098 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1100 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1101 {
1102         struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1103
1104         if (!mesh_tx)
1105                 return;
1106
1107         err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1108                                  mesh_send_start_complete);
1109
1110         if (err < 0)
1111                 mesh_send_complete(hdev, mesh_tx, false);
1112         else
1113                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1114 }
1115
1116 static void mesh_send_done(struct work_struct *work)
1117 {
1118         struct hci_dev *hdev = container_of(work, struct hci_dev,
1119                                             mesh_send_done.work);
1120
1121         if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1122                 return;
1123
1124         hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1125 }
1126
1127 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1128 {
1129         if (hci_dev_test_flag(hdev, HCI_MGMT))
1130                 return;
1131
1132         BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1133
1134         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135         INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136         INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137         INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1138
1139         /* Non-mgmt controlled devices get this bit set
1140          * implicitly so that pairing works for them, however
1141          * for mgmt we require user-space to explicitly enable
1142          * it
1143          */
1144         hci_dev_clear_flag(hdev, HCI_BONDABLE);
1145
1146         hci_dev_set_flag(hdev, HCI_MGMT);
1147 }
1148
1149 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150                                 void *data, u16 data_len)
1151 {
1152         struct mgmt_rp_read_info rp;
1153
1154         bt_dev_dbg(hdev, "sock %p", sk);
1155
1156         hci_dev_lock(hdev);
1157
1158         memset(&rp, 0, sizeof(rp));
1159
1160         bacpy(&rp.bdaddr, &hdev->bdaddr);
1161
1162         rp.version = hdev->hci_ver;
1163         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1164
1165         rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166         rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1167
1168         memcpy(rp.dev_class, hdev->dev_class, 3);
1169
1170         memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171         memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1172
1173         hci_dev_unlock(hdev);
1174
1175         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1176                                  sizeof(rp));
1177 }
1178
1179 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1180 {
1181         u16 eir_len = 0;
1182         size_t name_len;
1183
1184         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185                 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186                                           hdev->dev_class, 3);
1187
1188         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189                 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1190                                           hdev->appearance);
1191
1192         name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1193         eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194                                   hdev->dev_name, name_len);
1195
1196         name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1197         eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198                                   hdev->short_name, name_len);
1199
1200         return eir_len;
1201 }
1202
1203 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204                                     void *data, u16 data_len)
1205 {
1206         char buf[512];
1207         struct mgmt_rp_read_ext_info *rp = (void *)buf;
1208         u16 eir_len;
1209
1210         bt_dev_dbg(hdev, "sock %p", sk);
1211
1212         memset(&buf, 0, sizeof(buf));
1213
1214         hci_dev_lock(hdev);
1215
1216         bacpy(&rp->bdaddr, &hdev->bdaddr);
1217
1218         rp->version = hdev->hci_ver;
1219         rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1220
1221         rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222         rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1223
1224
1225         eir_len = append_eir_data_to_buf(hdev, rp->eir);
1226         rp->eir_len = cpu_to_le16(eir_len);
1227
1228         hci_dev_unlock(hdev);
1229
1230         /* If this command is called at least once, then the events
1231          * for class of device and local name changes are disabled
1232          * and only the new extended controller information event
1233          * is used.
1234          */
1235         hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1236         hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1237         hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1238
1239         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1240                                  sizeof(*rp) + eir_len);
1241 }
1242
1243 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1244 {
1245         char buf[512];
1246         struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1247         u16 eir_len;
1248
1249         memset(buf, 0, sizeof(buf));
1250
1251         eir_len = append_eir_data_to_buf(hdev, ev->eir);
1252         ev->eir_len = cpu_to_le16(eir_len);
1253
1254         return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1255                                   sizeof(*ev) + eir_len,
1256                                   HCI_MGMT_EXT_INFO_EVENTS, skip);
1257 }
1258
1259 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1260 {
1261         __le32 settings = cpu_to_le32(get_current_settings(hdev));
1262
1263         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1264                                  sizeof(settings));
1265 }
1266
1267 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1268 {
1269         struct mgmt_ev_advertising_added ev;
1270
1271         ev.instance = instance;
1272
1273         mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1274 }
1275
1276 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1277                               u8 instance)
1278 {
1279         struct mgmt_ev_advertising_removed ev;
1280
1281         ev.instance = instance;
1282
1283         mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1284 }
1285
1286 static void cancel_adv_timeout(struct hci_dev *hdev)
1287 {
1288         if (hdev->adv_instance_timeout) {
1289                 hdev->adv_instance_timeout = 0;
1290                 cancel_delayed_work(&hdev->adv_instance_expire);
1291         }
1292 }
1293
1294 /* This function requires the caller holds hdev->lock */
1295 static void restart_le_actions(struct hci_dev *hdev)
1296 {
1297         struct hci_conn_params *p;
1298
1299         list_for_each_entry(p, &hdev->le_conn_params, list) {
1300                 /* Needed for AUTO_OFF case where might not "really"
1301                  * have been powered off.
1302                  */
1303                 hci_pend_le_list_del_init(p);
1304
1305                 switch (p->auto_connect) {
1306                 case HCI_AUTO_CONN_DIRECT:
1307                 case HCI_AUTO_CONN_ALWAYS:
1308                         hci_pend_le_list_add(p, &hdev->pend_le_conns);
1309                         break;
1310                 case HCI_AUTO_CONN_REPORT:
1311                         hci_pend_le_list_add(p, &hdev->pend_le_reports);
1312                         break;
1313                 default:
1314                         break;
1315                 }
1316         }
1317 }
1318
1319 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1320 {
1321         __le32 ev = cpu_to_le32(get_current_settings(hdev));
1322
1323         return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1324                                   sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1325 }
1326
1327 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1328 {
1329         struct mgmt_pending_cmd *cmd = data;
1330         struct mgmt_mode *cp;
1331
1332         /* Make sure cmd still outstanding. */
1333         if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1334                 return;
1335
1336         cp = cmd->param;
1337
1338         bt_dev_dbg(hdev, "err %d", err);
1339
1340         if (!err) {
1341                 if (cp->val) {
1342                         hci_dev_lock(hdev);
1343                         restart_le_actions(hdev);
1344                         hci_update_passive_scan(hdev);
1345                         hci_dev_unlock(hdev);
1346                 }
1347
1348                 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1349
1350                 /* Only call new_setting for power on as power off is deferred
1351                  * to hdev->power_off work which does call hci_dev_do_close.
1352                  */
1353                 if (cp->val)
1354                         new_settings(hdev, cmd->sk);
1355         } else {
1356                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1357                                 mgmt_status(err));
1358         }
1359
1360         mgmt_pending_remove(cmd);
1361 }
1362
1363 static int set_powered_sync(struct hci_dev *hdev, void *data)
1364 {
1365         struct mgmt_pending_cmd *cmd = data;
1366         struct mgmt_mode *cp = cmd->param;
1367
1368         BT_DBG("%s", hdev->name);
1369
1370         return hci_set_powered_sync(hdev, cp->val);
1371 }
1372
1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1374                        u16 len)
1375 {
1376         struct mgmt_mode *cp = data;
1377         struct mgmt_pending_cmd *cmd;
1378         int err;
1379
1380         bt_dev_dbg(hdev, "sock %p", sk);
1381
1382         if (cp->val != 0x00 && cp->val != 0x01)
1383                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384                                        MGMT_STATUS_INVALID_PARAMS);
1385
1386         hci_dev_lock(hdev);
1387
1388         if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1389                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1390                                       MGMT_STATUS_BUSY);
1391                 goto failed;
1392         }
1393
1394         if (!!cp->val == hdev_is_powered(hdev)) {
1395                 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1396                 goto failed;
1397         }
1398
1399         cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1400         if (!cmd) {
1401                 err = -ENOMEM;
1402                 goto failed;
1403         }
1404
1405         /* Cancel potentially blocking sync operation before power off */
1406         if (cp->val == 0x00) {
1407                 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1408                 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1409                                          mgmt_set_powered_complete);
1410         } else {
1411                 /* Use hci_cmd_sync_submit since hdev might not be running */
1412                 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1413                                           mgmt_set_powered_complete);
1414         }
1415
1416         if (err < 0)
1417                 mgmt_pending_remove(cmd);
1418
1419 failed:
1420         hci_dev_unlock(hdev);
1421         return err;
1422 }
1423
1424 int mgmt_new_settings(struct hci_dev *hdev)
1425 {
1426         return new_settings(hdev, NULL);
1427 }
1428
1429 struct cmd_lookup {
1430         struct sock *sk;
1431         struct hci_dev *hdev;
1432         u8 mgmt_status;
1433 };
1434
1435 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1436 {
1437         struct cmd_lookup *match = data;
1438
1439         send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1440
1441         list_del(&cmd->list);
1442
1443         if (match->sk == NULL) {
1444                 match->sk = cmd->sk;
1445                 sock_hold(match->sk);
1446         }
1447
1448         mgmt_pending_free(cmd);
1449 }
1450
1451 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1452 {
1453         u8 *status = data;
1454
1455         mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1456         mgmt_pending_remove(cmd);
1457 }
1458
1459 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1460 {
1461         if (cmd->cmd_complete) {
1462                 u8 *status = data;
1463
1464                 cmd->cmd_complete(cmd, *status);
1465                 mgmt_pending_remove(cmd);
1466
1467                 return;
1468         }
1469
1470         cmd_status_rsp(cmd, data);
1471 }
1472
1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 {
1475         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476                                  cmd->param, cmd->param_len);
1477 }
1478
1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482                                  cmd->param, sizeof(struct mgmt_addr_info));
1483 }
1484
1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1486 {
1487         if (!lmp_bredr_capable(hdev))
1488                 return MGMT_STATUS_NOT_SUPPORTED;
1489         else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490                 return MGMT_STATUS_REJECTED;
1491         else
1492                 return MGMT_STATUS_SUCCESS;
1493 }
1494
1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1496 {
1497         if (!lmp_le_capable(hdev))
1498                 return MGMT_STATUS_NOT_SUPPORTED;
1499         else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500                 return MGMT_STATUS_REJECTED;
1501         else
1502                 return MGMT_STATUS_SUCCESS;
1503 }
1504
1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1506                                            int err)
1507 {
1508         struct mgmt_pending_cmd *cmd = data;
1509
1510         bt_dev_dbg(hdev, "err %d", err);
1511
1512         /* Make sure cmd still outstanding. */
1513         if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1514                 return;
1515
1516         hci_dev_lock(hdev);
1517
1518         if (err) {
1519                 u8 mgmt_err = mgmt_status(err);
1520                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1521                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1522                 goto done;
1523         }
1524
1525         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1526             hdev->discov_timeout > 0) {
1527                 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1528                 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1529         }
1530
1531         send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1532         new_settings(hdev, cmd->sk);
1533
1534 done:
1535         mgmt_pending_remove(cmd);
1536         hci_dev_unlock(hdev);
1537 }
1538
1539 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1540 {
1541         BT_DBG("%s", hdev->name);
1542
1543         return hci_update_discoverable_sync(hdev);
1544 }
1545
1546 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1547                             u16 len)
1548 {
1549         struct mgmt_cp_set_discoverable *cp = data;
1550         struct mgmt_pending_cmd *cmd;
1551         u16 timeout;
1552         int err;
1553
1554         bt_dev_dbg(hdev, "sock %p", sk);
1555
1556         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1557             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1558                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1559                                        MGMT_STATUS_REJECTED);
1560
1561         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1562                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1563                                        MGMT_STATUS_INVALID_PARAMS);
1564
1565         timeout = __le16_to_cpu(cp->timeout);
1566
1567         /* Disabling discoverable requires that no timeout is set,
1568          * and enabling limited discoverable requires a timeout.
1569          */
1570         if ((cp->val == 0x00 && timeout > 0) ||
1571             (cp->val == 0x02 && timeout == 0))
1572                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573                                        MGMT_STATUS_INVALID_PARAMS);
1574
1575         hci_dev_lock(hdev);
1576
1577         if (!hdev_is_powered(hdev) && timeout > 0) {
1578                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579                                       MGMT_STATUS_NOT_POWERED);
1580                 goto failed;
1581         }
1582
1583         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1584             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1585                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586                                       MGMT_STATUS_BUSY);
1587                 goto failed;
1588         }
1589
1590         if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1591                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1592                                       MGMT_STATUS_REJECTED);
1593                 goto failed;
1594         }
1595
1596         if (hdev->advertising_paused) {
1597                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1598                                       MGMT_STATUS_BUSY);
1599                 goto failed;
1600         }
1601
1602         if (!hdev_is_powered(hdev)) {
1603                 bool changed = false;
1604
1605                 /* Setting limited discoverable when powered off is
1606                  * not a valid operation since it requires a timeout
1607                  * and so no need to check HCI_LIMITED_DISCOVERABLE.
1608                  */
1609                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1610                         hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1611                         changed = true;
1612                 }
1613
1614                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1615                 if (err < 0)
1616                         goto failed;
1617
1618                 if (changed)
1619                         err = new_settings(hdev, sk);
1620
1621                 goto failed;
1622         }
1623
1624         /* If the current mode is the same, then just update the timeout
1625          * value with the new value. And if only the timeout gets updated,
1626          * then no need for any HCI transactions.
1627          */
1628         if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1629             (cp->val == 0x02) == hci_dev_test_flag(hdev,
1630                                                    HCI_LIMITED_DISCOVERABLE)) {
1631                 cancel_delayed_work(&hdev->discov_off);
1632                 hdev->discov_timeout = timeout;
1633
1634                 if (cp->val && hdev->discov_timeout > 0) {
1635                         int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1636                         queue_delayed_work(hdev->req_workqueue,
1637                                            &hdev->discov_off, to);
1638                 }
1639
1640                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1641                 goto failed;
1642         }
1643
1644         cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1645         if (!cmd) {
1646                 err = -ENOMEM;
1647                 goto failed;
1648         }
1649
1650         /* Cancel any potential discoverable timeout that might be
1651          * still active and store new timeout value. The arming of
1652          * the timeout happens in the complete handler.
1653          */
1654         cancel_delayed_work(&hdev->discov_off);
1655         hdev->discov_timeout = timeout;
1656
1657         if (cp->val)
1658                 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1659         else
1660                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1661
1662         /* Limited discoverable mode */
1663         if (cp->val == 0x02)
1664                 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1665         else
1666                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1667
1668         err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1669                                  mgmt_set_discoverable_complete);
1670
1671         if (err < 0)
1672                 mgmt_pending_remove(cmd);
1673
1674 failed:
1675         hci_dev_unlock(hdev);
1676         return err;
1677 }
1678
1679 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1680                                           int err)
1681 {
1682         struct mgmt_pending_cmd *cmd = data;
1683
1684         bt_dev_dbg(hdev, "err %d", err);
1685
1686         /* Make sure cmd still outstanding. */
1687         if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1688                 return;
1689
1690         hci_dev_lock(hdev);
1691
1692         if (err) {
1693                 u8 mgmt_err = mgmt_status(err);
1694                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1695                 goto done;
1696         }
1697
1698         send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1699         new_settings(hdev, cmd->sk);
1700
1701 done:
1702         if (cmd)
1703                 mgmt_pending_remove(cmd);
1704
1705         hci_dev_unlock(hdev);
1706 }
1707
1708 static int set_connectable_update_settings(struct hci_dev *hdev,
1709                                            struct sock *sk, u8 val)
1710 {
1711         bool changed = false;
1712         int err;
1713
1714         if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1715                 changed = true;
1716
1717         if (val) {
1718                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1719         } else {
1720                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1721                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1722         }
1723
1724         err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1725         if (err < 0)
1726                 return err;
1727
1728         if (changed) {
1729                 hci_update_scan(hdev);
1730                 hci_update_passive_scan(hdev);
1731                 return new_settings(hdev, sk);
1732         }
1733
1734         return 0;
1735 }
1736
1737 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1738 {
1739         BT_DBG("%s", hdev->name);
1740
1741         return hci_update_connectable_sync(hdev);
1742 }
1743
1744 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1745                            u16 len)
1746 {
1747         struct mgmt_mode *cp = data;
1748         struct mgmt_pending_cmd *cmd;
1749         int err;
1750
1751         bt_dev_dbg(hdev, "sock %p", sk);
1752
1753         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1754             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1755                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1756                                        MGMT_STATUS_REJECTED);
1757
1758         if (cp->val != 0x00 && cp->val != 0x01)
1759                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760                                        MGMT_STATUS_INVALID_PARAMS);
1761
1762         hci_dev_lock(hdev);
1763
1764         if (!hdev_is_powered(hdev)) {
1765                 err = set_connectable_update_settings(hdev, sk, cp->val);
1766                 goto failed;
1767         }
1768
1769         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1770             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1771                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1772                                       MGMT_STATUS_BUSY);
1773                 goto failed;
1774         }
1775
1776         cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1777         if (!cmd) {
1778                 err = -ENOMEM;
1779                 goto failed;
1780         }
1781
1782         if (cp->val) {
1783                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1784         } else {
1785                 if (hdev->discov_timeout > 0)
1786                         cancel_delayed_work(&hdev->discov_off);
1787
1788                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1789                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1790                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1791         }
1792
1793         err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1794                                  mgmt_set_connectable_complete);
1795
1796         if (err < 0)
1797                 mgmt_pending_remove(cmd);
1798
1799 failed:
1800         hci_dev_unlock(hdev);
1801         return err;
1802 }
1803
1804 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1805                         u16 len)
1806 {
1807         struct mgmt_mode *cp = data;
1808         bool changed;
1809         int err;
1810
1811         bt_dev_dbg(hdev, "sock %p", sk);
1812
1813         if (cp->val != 0x00 && cp->val != 0x01)
1814                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1815                                        MGMT_STATUS_INVALID_PARAMS);
1816
1817         hci_dev_lock(hdev);
1818
1819         if (cp->val)
1820                 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1821         else
1822                 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1823
1824         err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1825         if (err < 0)
1826                 goto unlock;
1827
1828         if (changed) {
1829                 /* In limited privacy mode the change of bondable mode
1830                  * may affect the local advertising address.
1831                  */
1832                 hci_update_discoverable(hdev);
1833
1834                 err = new_settings(hdev, sk);
1835         }
1836
1837 unlock:
1838         hci_dev_unlock(hdev);
1839         return err;
1840 }
1841
1842 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1843                              u16 len)
1844 {
1845         struct mgmt_mode *cp = data;
1846         struct mgmt_pending_cmd *cmd;
1847         u8 val, status;
1848         int err;
1849
1850         bt_dev_dbg(hdev, "sock %p", sk);
1851
1852         status = mgmt_bredr_support(hdev);
1853         if (status)
1854                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1855                                        status);
1856
1857         if (cp->val != 0x00 && cp->val != 0x01)
1858                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1859                                        MGMT_STATUS_INVALID_PARAMS);
1860
1861         hci_dev_lock(hdev);
1862
1863         if (!hdev_is_powered(hdev)) {
1864                 bool changed = false;
1865
1866                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1867                         hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1868                         changed = true;
1869                 }
1870
1871                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1872                 if (err < 0)
1873                         goto failed;
1874
1875                 if (changed)
1876                         err = new_settings(hdev, sk);
1877
1878                 goto failed;
1879         }
1880
1881         if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1882                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1883                                       MGMT_STATUS_BUSY);
1884                 goto failed;
1885         }
1886
1887         val = !!cp->val;
1888
1889         if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1890                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1891                 goto failed;
1892         }
1893
1894         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1895         if (!cmd) {
1896                 err = -ENOMEM;
1897                 goto failed;
1898         }
1899
1900         err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1901         if (err < 0) {
1902                 mgmt_pending_remove(cmd);
1903                 goto failed;
1904         }
1905
1906 failed:
1907         hci_dev_unlock(hdev);
1908         return err;
1909 }
1910
1911 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1912 {
1913         struct cmd_lookup match = { NULL, hdev };
1914         struct mgmt_pending_cmd *cmd = data;
1915         struct mgmt_mode *cp = cmd->param;
1916         u8 enable = cp->val;
1917         bool changed;
1918
1919         /* Make sure cmd still outstanding. */
1920         if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1921                 return;
1922
1923         if (err) {
1924                 u8 mgmt_err = mgmt_status(err);
1925
1926                 if (enable && hci_dev_test_and_clear_flag(hdev,
1927                                                           HCI_SSP_ENABLED)) {
1928                         new_settings(hdev, NULL);
1929                 }
1930
1931                 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1932                                      &mgmt_err);
1933                 return;
1934         }
1935
1936         if (enable) {
1937                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1938         } else {
1939                 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1940         }
1941
1942         mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1943
1944         if (changed)
1945                 new_settings(hdev, match.sk);
1946
1947         if (match.sk)
1948                 sock_put(match.sk);
1949
1950         hci_update_eir_sync(hdev);
1951 }
1952
1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1954 {
1955         struct mgmt_pending_cmd *cmd = data;
1956         struct mgmt_mode *cp = cmd->param;
1957         bool changed = false;
1958         int err;
1959
1960         if (cp->val)
1961                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1962
1963         err = hci_write_ssp_mode_sync(hdev, cp->val);
1964
1965         if (!err && changed)
1966                 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1967
1968         return err;
1969 }
1970
1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1972 {
1973         struct mgmt_mode *cp = data;
1974         struct mgmt_pending_cmd *cmd;
1975         u8 status;
1976         int err;
1977
1978         bt_dev_dbg(hdev, "sock %p", sk);
1979
1980         status = mgmt_bredr_support(hdev);
1981         if (status)
1982                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1983
1984         if (!lmp_ssp_capable(hdev))
1985                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986                                        MGMT_STATUS_NOT_SUPPORTED);
1987
1988         if (cp->val != 0x00 && cp->val != 0x01)
1989                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990                                        MGMT_STATUS_INVALID_PARAMS);
1991
1992         hci_dev_lock(hdev);
1993
1994         if (!hdev_is_powered(hdev)) {
1995                 bool changed;
1996
1997                 if (cp->val) {
1998                         changed = !hci_dev_test_and_set_flag(hdev,
1999                                                              HCI_SSP_ENABLED);
2000                 } else {
2001                         changed = hci_dev_test_and_clear_flag(hdev,
2002                                                               HCI_SSP_ENABLED);
2003                 }
2004
2005                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2006                 if (err < 0)
2007                         goto failed;
2008
2009                 if (changed)
2010                         err = new_settings(hdev, sk);
2011
2012                 goto failed;
2013         }
2014
2015         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2016                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2017                                       MGMT_STATUS_BUSY);
2018                 goto failed;
2019         }
2020
2021         if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2022                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2023                 goto failed;
2024         }
2025
2026         cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2027         if (!cmd)
2028                 err = -ENOMEM;
2029         else
2030                 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2031                                          set_ssp_complete);
2032
2033         if (err < 0) {
2034                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035                                       MGMT_STATUS_FAILED);
2036
2037                 if (cmd)
2038                         mgmt_pending_remove(cmd);
2039         }
2040
2041 failed:
2042         hci_dev_unlock(hdev);
2043         return err;
2044 }
2045
2046 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2047 {
2048         bt_dev_dbg(hdev, "sock %p", sk);
2049
2050         return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2051                                        MGMT_STATUS_NOT_SUPPORTED);
2052 }
2053
2054 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2055 {
2056         struct cmd_lookup match = { NULL, hdev };
2057         u8 status = mgmt_status(err);
2058
2059         bt_dev_dbg(hdev, "err %d", err);
2060
2061         if (status) {
2062                 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2063                                                         &status);
2064                 return;
2065         }
2066
2067         mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2068
2069         new_settings(hdev, match.sk);
2070
2071         if (match.sk)
2072                 sock_put(match.sk);
2073 }
2074
2075 static int set_le_sync(struct hci_dev *hdev, void *data)
2076 {
2077         struct mgmt_pending_cmd *cmd = data;
2078         struct mgmt_mode *cp = cmd->param;
2079         u8 val = !!cp->val;
2080         int err;
2081
2082         if (!val) {
2083                 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2084
2085                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2086                         hci_disable_advertising_sync(hdev);
2087
2088                 if (ext_adv_capable(hdev))
2089                         hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2090         } else {
2091                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2092         }
2093
2094         err = hci_write_le_host_supported_sync(hdev, val, 0);
2095
2096         /* Make sure the controller has a good default for
2097          * advertising data. Restrict the update to when LE
2098          * has actually been enabled. During power on, the
2099          * update in powered_update_hci will take care of it.
2100          */
2101         if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2102                 if (ext_adv_capable(hdev)) {
2103                         int status;
2104
2105                         status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2106                         if (!status)
2107                                 hci_update_scan_rsp_data_sync(hdev, 0x00);
2108                 } else {
2109                         hci_update_adv_data_sync(hdev, 0x00);
2110                         hci_update_scan_rsp_data_sync(hdev, 0x00);
2111                 }
2112
2113                 hci_update_passive_scan(hdev);
2114         }
2115
2116         return err;
2117 }
2118
2119 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2120 {
2121         struct mgmt_pending_cmd *cmd = data;
2122         u8 status = mgmt_status(err);
2123         struct sock *sk = cmd->sk;
2124
2125         if (status) {
2126                 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2127                                      cmd_status_rsp, &status);
2128                 return;
2129         }
2130
2131         mgmt_pending_remove(cmd);
2132         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2133 }
2134
2135 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2136 {
2137         struct mgmt_pending_cmd *cmd = data;
2138         struct mgmt_cp_set_mesh *cp = cmd->param;
2139         size_t len = cmd->param_len;
2140
2141         memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2142
2143         if (cp->enable)
2144                 hci_dev_set_flag(hdev, HCI_MESH);
2145         else
2146                 hci_dev_clear_flag(hdev, HCI_MESH);
2147
2148         len -= sizeof(*cp);
2149
2150         /* If filters don't fit, forward all adv pkts */
2151         if (len <= sizeof(hdev->mesh_ad_types))
2152                 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2153
2154         hci_update_passive_scan_sync(hdev);
2155         return 0;
2156 }
2157
2158 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2159 {
2160         struct mgmt_cp_set_mesh *cp = data;
2161         struct mgmt_pending_cmd *cmd;
2162         int err = 0;
2163
2164         bt_dev_dbg(hdev, "sock %p", sk);
2165
2166         if (!lmp_le_capable(hdev) ||
2167             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2168                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2169                                        MGMT_STATUS_NOT_SUPPORTED);
2170
2171         if (cp->enable != 0x00 && cp->enable != 0x01)
2172                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2173                                        MGMT_STATUS_INVALID_PARAMS);
2174
2175         hci_dev_lock(hdev);
2176
2177         cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2178         if (!cmd)
2179                 err = -ENOMEM;
2180         else
2181                 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2182                                          set_mesh_complete);
2183
2184         if (err < 0) {
2185                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186                                       MGMT_STATUS_FAILED);
2187
2188                 if (cmd)
2189                         mgmt_pending_remove(cmd);
2190         }
2191
2192         hci_dev_unlock(hdev);
2193         return err;
2194 }
2195
2196 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2197 {
2198         struct mgmt_mesh_tx *mesh_tx = data;
2199         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2200         unsigned long mesh_send_interval;
2201         u8 mgmt_err = mgmt_status(err);
2202
2203         /* Report any errors here, but don't report completion */
2204
2205         if (mgmt_err) {
2206                 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2207                 /* Send Complete Error Code for handle */
2208                 mesh_send_complete(hdev, mesh_tx, false);
2209                 return;
2210         }
2211
2212         mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2213         queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2214                            mesh_send_interval);
2215 }
2216
2217 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2218 {
2219         struct mgmt_mesh_tx *mesh_tx = data;
2220         struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2221         struct adv_info *adv, *next_instance;
2222         u8 instance = hdev->le_num_of_adv_sets + 1;
2223         u16 timeout, duration;
2224         int err = 0;
2225
2226         if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2227                 return MGMT_STATUS_BUSY;
2228
2229         timeout = 1000;
2230         duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2231         adv = hci_add_adv_instance(hdev, instance, 0,
2232                                    send->adv_data_len, send->adv_data,
2233                                    0, NULL,
2234                                    timeout, duration,
2235                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
2236                                    hdev->le_adv_min_interval,
2237                                    hdev->le_adv_max_interval,
2238                                    mesh_tx->handle);
2239
2240         if (!IS_ERR(adv))
2241                 mesh_tx->instance = instance;
2242         else
2243                 err = PTR_ERR(adv);
2244
2245         if (hdev->cur_adv_instance == instance) {
2246                 /* If the currently advertised instance is being changed then
2247                  * cancel the current advertising and schedule the next
2248                  * instance. If there is only one instance then the overridden
2249                  * advertising data will be visible right away.
2250                  */
2251                 cancel_adv_timeout(hdev);
2252
2253                 next_instance = hci_get_next_instance(hdev, instance);
2254                 if (next_instance)
2255                         instance = next_instance->instance;
2256                 else
2257                         instance = 0;
2258         } else if (hdev->adv_instance_timeout) {
2259                 /* Immediately advertise the new instance if no other, or
2260                  * let it go naturally from queue if ADV is already happening
2261                  */
2262                 instance = 0;
2263         }
2264
2265         if (instance)
2266                 return hci_schedule_adv_instance_sync(hdev, instance, true);
2267
2268         return err;
2269 }
2270
2271 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2272 {
2273         struct mgmt_rp_mesh_read_features *rp = data;
2274
2275         if (rp->used_handles >= rp->max_handles)
2276                 return;
2277
2278         rp->handles[rp->used_handles++] = mesh_tx->handle;
2279 }
2280
2281 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2282                          void *data, u16 len)
2283 {
2284         struct mgmt_rp_mesh_read_features rp;
2285
2286         if (!lmp_le_capable(hdev) ||
2287             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2288                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2289                                        MGMT_STATUS_NOT_SUPPORTED);
2290
2291         memset(&rp, 0, sizeof(rp));
2292         rp.index = cpu_to_le16(hdev->id);
2293         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2294                 rp.max_handles = MESH_HANDLES_MAX;
2295
2296         hci_dev_lock(hdev);
2297
2298         if (rp.max_handles)
2299                 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2300
2301         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2302                           rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2303
2304         hci_dev_unlock(hdev);
2305         return 0;
2306 }
2307
2308 static int send_cancel(struct hci_dev *hdev, void *data)
2309 {
2310         struct mgmt_pending_cmd *cmd = data;
2311         struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2312         struct mgmt_mesh_tx *mesh_tx;
2313
2314         if (!cancel->handle) {
2315                 do {
2316                         mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2317
2318                         if (mesh_tx)
2319                                 mesh_send_complete(hdev, mesh_tx, false);
2320                 } while (mesh_tx);
2321         } else {
2322                 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2323
2324                 if (mesh_tx && mesh_tx->sk == cmd->sk)
2325                         mesh_send_complete(hdev, mesh_tx, false);
2326         }
2327
2328         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2329                           0, NULL, 0);
2330         mgmt_pending_free(cmd);
2331
2332         return 0;
2333 }
2334
2335 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2336                             void *data, u16 len)
2337 {
2338         struct mgmt_pending_cmd *cmd;
2339         int err;
2340
2341         if (!lmp_le_capable(hdev) ||
2342             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2343                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344                                        MGMT_STATUS_NOT_SUPPORTED);
2345
2346         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2347                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2348                                        MGMT_STATUS_REJECTED);
2349
2350         hci_dev_lock(hdev);
2351         cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2352         if (!cmd)
2353                 err = -ENOMEM;
2354         else
2355                 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2356
2357         if (err < 0) {
2358                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359                                       MGMT_STATUS_FAILED);
2360
2361                 if (cmd)
2362                         mgmt_pending_free(cmd);
2363         }
2364
2365         hci_dev_unlock(hdev);
2366         return err;
2367 }
2368
2369 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2370 {
2371         struct mgmt_mesh_tx *mesh_tx;
2372         struct mgmt_cp_mesh_send *send = data;
2373         struct mgmt_rp_mesh_read_features rp;
2374         bool sending;
2375         int err = 0;
2376
2377         if (!lmp_le_capable(hdev) ||
2378             !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2379                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2380                                        MGMT_STATUS_NOT_SUPPORTED);
2381         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2382             len <= MGMT_MESH_SEND_SIZE ||
2383             len > (MGMT_MESH_SEND_SIZE + 31))
2384                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2385                                        MGMT_STATUS_REJECTED);
2386
2387         hci_dev_lock(hdev);
2388
2389         memset(&rp, 0, sizeof(rp));
2390         rp.max_handles = MESH_HANDLES_MAX;
2391
2392         mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2393
2394         if (rp.max_handles <= rp.used_handles) {
2395                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2396                                       MGMT_STATUS_BUSY);
2397                 goto done;
2398         }
2399
2400         sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2401         mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2402
2403         if (!mesh_tx)
2404                 err = -ENOMEM;
2405         else if (!sending)
2406                 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2407                                          mesh_send_start_complete);
2408
2409         if (err < 0) {
2410                 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2411                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2412                                       MGMT_STATUS_FAILED);
2413
2414                 if (mesh_tx) {
2415                         if (sending)
2416                                 mgmt_mesh_remove(mesh_tx);
2417                 }
2418         } else {
2419                 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2420
2421                 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2422                                   &mesh_tx->handle, 1);
2423         }
2424
2425 done:
2426         hci_dev_unlock(hdev);
2427         return err;
2428 }
2429
2430 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2431 {
2432         struct mgmt_mode *cp = data;
2433         struct mgmt_pending_cmd *cmd;
2434         int err;
2435         u8 val, enabled;
2436
2437         bt_dev_dbg(hdev, "sock %p", sk);
2438
2439         if (!lmp_le_capable(hdev))
2440                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2441                                        MGMT_STATUS_NOT_SUPPORTED);
2442
2443         if (cp->val != 0x00 && cp->val != 0x01)
2444                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2445                                        MGMT_STATUS_INVALID_PARAMS);
2446
2447         /* Bluetooth single mode LE only controllers or dual-mode
2448          * controllers configured as LE only devices, do not allow
2449          * switching LE off. These have either LE enabled explicitly
2450          * or BR/EDR has been previously switched off.
2451          *
2452          * When trying to enable an already enabled LE, then gracefully
2453          * send a positive response. Trying to disable it however will
2454          * result into rejection.
2455          */
2456         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2457                 if (cp->val == 0x01)
2458                         return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2459
2460                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2461                                        MGMT_STATUS_REJECTED);
2462         }
2463
2464         hci_dev_lock(hdev);
2465
2466         val = !!cp->val;
2467         enabled = lmp_host_le_capable(hdev);
2468
2469         if (!hdev_is_powered(hdev) || val == enabled) {
2470                 bool changed = false;
2471
2472                 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2473                         hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2474                         changed = true;
2475                 }
2476
2477                 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2478                         hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2479                         changed = true;
2480                 }
2481
2482                 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2483                 if (err < 0)
2484                         goto unlock;
2485
2486                 if (changed)
2487                         err = new_settings(hdev, sk);
2488
2489                 goto unlock;
2490         }
2491
2492         if (pending_find(MGMT_OP_SET_LE, hdev) ||
2493             pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2494                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495                                       MGMT_STATUS_BUSY);
2496                 goto unlock;
2497         }
2498
2499         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2500         if (!cmd)
2501                 err = -ENOMEM;
2502         else
2503                 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2504                                          set_le_complete);
2505
2506         if (err < 0) {
2507                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2508                                       MGMT_STATUS_FAILED);
2509
2510                 if (cmd)
2511                         mgmt_pending_remove(cmd);
2512         }
2513
2514 unlock:
2515         hci_dev_unlock(hdev);
2516         return err;
2517 }
2518
2519 /* This is a helper function to test for pending mgmt commands that can
2520  * cause CoD or EIR HCI commands. We can only allow one such pending
2521  * mgmt command at a time since otherwise we cannot easily track what
2522  * the current values are, will be, and based on that calculate if a new
2523  * HCI command needs to be sent and if yes with what value.
2524  */
2525 static bool pending_eir_or_class(struct hci_dev *hdev)
2526 {
2527         struct mgmt_pending_cmd *cmd;
2528
2529         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2530                 switch (cmd->opcode) {
2531                 case MGMT_OP_ADD_UUID:
2532                 case MGMT_OP_REMOVE_UUID:
2533                 case MGMT_OP_SET_DEV_CLASS:
2534                 case MGMT_OP_SET_POWERED:
2535                         return true;
2536                 }
2537         }
2538
2539         return false;
2540 }
2541
2542 static const u8 bluetooth_base_uuid[] = {
2543                         0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2544                         0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2545 };
2546
2547 static u8 get_uuid_size(const u8 *uuid)
2548 {
2549         u32 val;
2550
2551         if (memcmp(uuid, bluetooth_base_uuid, 12))
2552                 return 128;
2553
2554         val = get_unaligned_le32(&uuid[12]);
2555         if (val > 0xffff)
2556                 return 32;
2557
2558         return 16;
2559 }
2560
2561 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2562 {
2563         struct mgmt_pending_cmd *cmd = data;
2564
2565         bt_dev_dbg(hdev, "err %d", err);
2566
2567         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2568                           mgmt_status(err), hdev->dev_class, 3);
2569
2570         mgmt_pending_free(cmd);
2571 }
2572
2573 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2574 {
2575         int err;
2576
2577         err = hci_update_class_sync(hdev);
2578         if (err)
2579                 return err;
2580
2581         return hci_update_eir_sync(hdev);
2582 }
2583
2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2585 {
2586         struct mgmt_cp_add_uuid *cp = data;
2587         struct mgmt_pending_cmd *cmd;
2588         struct bt_uuid *uuid;
2589         int err;
2590
2591         bt_dev_dbg(hdev, "sock %p", sk);
2592
2593         hci_dev_lock(hdev);
2594
2595         if (pending_eir_or_class(hdev)) {
2596                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2597                                       MGMT_STATUS_BUSY);
2598                 goto failed;
2599         }
2600
2601         uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2602         if (!uuid) {
2603                 err = -ENOMEM;
2604                 goto failed;
2605         }
2606
2607         memcpy(uuid->uuid, cp->uuid, 16);
2608         uuid->svc_hint = cp->svc_hint;
2609         uuid->size = get_uuid_size(cp->uuid);
2610
2611         list_add_tail(&uuid->list, &hdev->uuids);
2612
2613         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2614         if (!cmd) {
2615                 err = -ENOMEM;
2616                 goto failed;
2617         }
2618
2619         /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2620          * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2621          */
2622         err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2623                                   mgmt_class_complete);
2624         if (err < 0) {
2625                 mgmt_pending_free(cmd);
2626                 goto failed;
2627         }
2628
2629 failed:
2630         hci_dev_unlock(hdev);
2631         return err;
2632 }
2633
2634 static bool enable_service_cache(struct hci_dev *hdev)
2635 {
2636         if (!hdev_is_powered(hdev))
2637                 return false;
2638
2639         if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2640                 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2641                                    CACHE_TIMEOUT);
2642                 return true;
2643         }
2644
2645         return false;
2646 }
2647
2648 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2649 {
2650         int err;
2651
2652         err = hci_update_class_sync(hdev);
2653         if (err)
2654                 return err;
2655
2656         return hci_update_eir_sync(hdev);
2657 }
2658
2659 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2660                        u16 len)
2661 {
2662         struct mgmt_cp_remove_uuid *cp = data;
2663         struct mgmt_pending_cmd *cmd;
2664         struct bt_uuid *match, *tmp;
2665         static const u8 bt_uuid_any[] = {
2666                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2667         };
2668         int err, found;
2669
2670         bt_dev_dbg(hdev, "sock %p", sk);
2671
2672         hci_dev_lock(hdev);
2673
2674         if (pending_eir_or_class(hdev)) {
2675                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2676                                       MGMT_STATUS_BUSY);
2677                 goto unlock;
2678         }
2679
2680         if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2681                 hci_uuids_clear(hdev);
2682
2683                 if (enable_service_cache(hdev)) {
2684                         err = mgmt_cmd_complete(sk, hdev->id,
2685                                                 MGMT_OP_REMOVE_UUID,
2686                                                 0, hdev->dev_class, 3);
2687                         goto unlock;
2688                 }
2689
2690                 goto update_class;
2691         }
2692
2693         found = 0;
2694
2695         list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2696                 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2697                         continue;
2698
2699                 list_del(&match->list);
2700                 kfree(match);
2701                 found++;
2702         }
2703
2704         if (found == 0) {
2705                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2706                                       MGMT_STATUS_INVALID_PARAMS);
2707                 goto unlock;
2708         }
2709
2710 update_class:
2711         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2712         if (!cmd) {
2713                 err = -ENOMEM;
2714                 goto unlock;
2715         }
2716
2717         /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2718          * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2719          */
2720         err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2721                                   mgmt_class_complete);
2722         if (err < 0)
2723                 mgmt_pending_free(cmd);
2724
2725 unlock:
2726         hci_dev_unlock(hdev);
2727         return err;
2728 }
2729
2730 static int set_class_sync(struct hci_dev *hdev, void *data)
2731 {
2732         int err = 0;
2733
2734         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2735                 cancel_delayed_work_sync(&hdev->service_cache);
2736                 err = hci_update_eir_sync(hdev);
2737         }
2738
2739         if (err)
2740                 return err;
2741
2742         return hci_update_class_sync(hdev);
2743 }
2744
2745 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2746                          u16 len)
2747 {
2748         struct mgmt_cp_set_dev_class *cp = data;
2749         struct mgmt_pending_cmd *cmd;
2750         int err;
2751
2752         bt_dev_dbg(hdev, "sock %p", sk);
2753
2754         if (!lmp_bredr_capable(hdev))
2755                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756                                        MGMT_STATUS_NOT_SUPPORTED);
2757
2758         hci_dev_lock(hdev);
2759
2760         if (pending_eir_or_class(hdev)) {
2761                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2762                                       MGMT_STATUS_BUSY);
2763                 goto unlock;
2764         }
2765
2766         if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2767                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2768                                       MGMT_STATUS_INVALID_PARAMS);
2769                 goto unlock;
2770         }
2771
2772         hdev->major_class = cp->major;
2773         hdev->minor_class = cp->minor;
2774
2775         if (!hdev_is_powered(hdev)) {
2776                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2777                                         hdev->dev_class, 3);
2778                 goto unlock;
2779         }
2780
2781         cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2782         if (!cmd) {
2783                 err = -ENOMEM;
2784                 goto unlock;
2785         }
2786
2787         /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2788          * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2789          */
2790         err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2791                                   mgmt_class_complete);
2792         if (err < 0)
2793                 mgmt_pending_free(cmd);
2794
2795 unlock:
2796         hci_dev_unlock(hdev);
2797         return err;
2798 }
2799
2800 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2801                           u16 len)
2802 {
2803         struct mgmt_cp_load_link_keys *cp = data;
2804         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2805                                    sizeof(struct mgmt_link_key_info));
2806         u16 key_count, expected_len;
2807         bool changed;
2808         int i;
2809
2810         bt_dev_dbg(hdev, "sock %p", sk);
2811
2812         if (!lmp_bredr_capable(hdev))
2813                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2814                                        MGMT_STATUS_NOT_SUPPORTED);
2815
2816         key_count = __le16_to_cpu(cp->key_count);
2817         if (key_count > max_key_count) {
2818                 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2819                            key_count);
2820                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2821                                        MGMT_STATUS_INVALID_PARAMS);
2822         }
2823
2824         expected_len = struct_size(cp, keys, key_count);
2825         if (expected_len != len) {
2826                 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2827                            expected_len, len);
2828                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2829                                        MGMT_STATUS_INVALID_PARAMS);
2830         }
2831
2832         if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2833                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2834                                        MGMT_STATUS_INVALID_PARAMS);
2835
2836         bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2837                    key_count);
2838
2839         for (i = 0; i < key_count; i++) {
2840                 struct mgmt_link_key_info *key = &cp->keys[i];
2841
2842                 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2843                 if (key->type > 0x08)
2844                         return mgmt_cmd_status(sk, hdev->id,
2845                                                MGMT_OP_LOAD_LINK_KEYS,
2846                                                MGMT_STATUS_INVALID_PARAMS);
2847         }
2848
2849         hci_dev_lock(hdev);
2850
2851         hci_link_keys_clear(hdev);
2852
2853         if (cp->debug_keys)
2854                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2855         else
2856                 changed = hci_dev_test_and_clear_flag(hdev,
2857                                                       HCI_KEEP_DEBUG_KEYS);
2858
2859         if (changed)
2860                 new_settings(hdev, NULL);
2861
2862         for (i = 0; i < key_count; i++) {
2863                 struct mgmt_link_key_info *key = &cp->keys[i];
2864
2865                 if (hci_is_blocked_key(hdev,
2866                                        HCI_BLOCKED_KEY_TYPE_LINKKEY,
2867                                        key->val)) {
2868                         bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2869                                     &key->addr.bdaddr);
2870                         continue;
2871                 }
2872
2873                 /* Always ignore debug keys and require a new pairing if
2874                  * the user wants to use them.
2875                  */
2876                 if (key->type == HCI_LK_DEBUG_COMBINATION)
2877                         continue;
2878
2879                 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2880                                  key->type, key->pin_len, NULL);
2881         }
2882
2883         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2884
2885         hci_dev_unlock(hdev);
2886
2887         return 0;
2888 }
2889
2890 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2891                            u8 addr_type, struct sock *skip_sk)
2892 {
2893         struct mgmt_ev_device_unpaired ev;
2894
2895         bacpy(&ev.addr.bdaddr, bdaddr);
2896         ev.addr.type = addr_type;
2897
2898         return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2899                           skip_sk);
2900 }
2901
2902 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2903 {
2904         struct mgmt_pending_cmd *cmd = data;
2905         struct mgmt_cp_unpair_device *cp = cmd->param;
2906
2907         if (!err)
2908                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2909
2910         cmd->cmd_complete(cmd, err);
2911         mgmt_pending_free(cmd);
2912 }
2913
2914 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2915 {
2916         struct mgmt_pending_cmd *cmd = data;
2917         struct mgmt_cp_unpair_device *cp = cmd->param;
2918         struct hci_conn *conn;
2919
2920         if (cp->addr.type == BDADDR_BREDR)
2921                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2922                                                &cp->addr.bdaddr);
2923         else
2924                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2925                                                le_addr_type(cp->addr.type));
2926
2927         if (!conn)
2928                 return 0;
2929
2930         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2931 }
2932
2933 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2934                          u16 len)
2935 {
2936         struct mgmt_cp_unpair_device *cp = data;
2937         struct mgmt_rp_unpair_device rp;
2938         struct hci_conn_params *params;
2939         struct mgmt_pending_cmd *cmd;
2940         struct hci_conn *conn;
2941         u8 addr_type;
2942         int err;
2943
2944         memset(&rp, 0, sizeof(rp));
2945         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2946         rp.addr.type = cp->addr.type;
2947
2948         if (!bdaddr_type_is_valid(cp->addr.type))
2949                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2950                                          MGMT_STATUS_INVALID_PARAMS,
2951                                          &rp, sizeof(rp));
2952
2953         if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2954                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2955                                          MGMT_STATUS_INVALID_PARAMS,
2956                                          &rp, sizeof(rp));
2957
2958         hci_dev_lock(hdev);
2959
2960         if (!hdev_is_powered(hdev)) {
2961                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2962                                         MGMT_STATUS_NOT_POWERED, &rp,
2963                                         sizeof(rp));
2964                 goto unlock;
2965         }
2966
2967         if (cp->addr.type == BDADDR_BREDR) {
2968                 /* If disconnection is requested, then look up the
2969                  * connection. If the remote device is connected, it
2970                  * will be later used to terminate the link.
2971                  *
2972                  * Setting it to NULL explicitly will cause no
2973                  * termination of the link.
2974                  */
2975                 if (cp->disconnect)
2976                         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2977                                                        &cp->addr.bdaddr);
2978                 else
2979                         conn = NULL;
2980
2981                 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2982                 if (err < 0) {
2983                         err = mgmt_cmd_complete(sk, hdev->id,
2984                                                 MGMT_OP_UNPAIR_DEVICE,
2985                                                 MGMT_STATUS_NOT_PAIRED, &rp,
2986                                                 sizeof(rp));
2987                         goto unlock;
2988                 }
2989
2990                 goto done;
2991         }
2992
2993         /* LE address type */
2994         addr_type = le_addr_type(cp->addr.type);
2995
2996         /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2997         err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2998         if (err < 0) {
2999                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3000                                         MGMT_STATUS_NOT_PAIRED, &rp,
3001                                         sizeof(rp));
3002                 goto unlock;
3003         }
3004
3005         conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3006         if (!conn) {
3007                 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3008                 goto done;
3009         }
3010
3011
3012         /* Defer clearing up the connection parameters until closing to
3013          * give a chance of keeping them if a repairing happens.
3014          */
3015         set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3016
3017         /* Disable auto-connection parameters if present */
3018         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3019         if (params) {
3020                 if (params->explicit_connect)
3021                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3022                 else
3023                         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3024         }
3025
3026         /* If disconnection is not requested, then clear the connection
3027          * variable so that the link is not terminated.
3028          */
3029         if (!cp->disconnect)
3030                 conn = NULL;
3031
3032 done:
3033         /* If the connection variable is set, then termination of the
3034          * link is requested.
3035          */
3036         if (!conn) {
3037                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3038                                         &rp, sizeof(rp));
3039                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3040                 goto unlock;
3041         }
3042
3043         cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3044                                sizeof(*cp));
3045         if (!cmd) {
3046                 err = -ENOMEM;
3047                 goto unlock;
3048         }
3049
3050         cmd->cmd_complete = addr_cmd_complete;
3051
3052         err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3053                                  unpair_device_complete);
3054         if (err < 0)
3055                 mgmt_pending_free(cmd);
3056
3057 unlock:
3058         hci_dev_unlock(hdev);
3059         return err;
3060 }
3061
3062 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3063                       u16 len)
3064 {
3065         struct mgmt_cp_disconnect *cp = data;
3066         struct mgmt_rp_disconnect rp;
3067         struct mgmt_pending_cmd *cmd;
3068         struct hci_conn *conn;
3069         int err;
3070
3071         bt_dev_dbg(hdev, "sock %p", sk);
3072
3073         memset(&rp, 0, sizeof(rp));
3074         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3075         rp.addr.type = cp->addr.type;
3076
3077         if (!bdaddr_type_is_valid(cp->addr.type))
3078                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3079                                          MGMT_STATUS_INVALID_PARAMS,
3080                                          &rp, sizeof(rp));
3081
3082         hci_dev_lock(hdev);
3083
3084         if (!test_bit(HCI_UP, &hdev->flags)) {
3085                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3086                                         MGMT_STATUS_NOT_POWERED, &rp,
3087                                         sizeof(rp));
3088                 goto failed;
3089         }
3090
3091         if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3092                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3093                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3094                 goto failed;
3095         }
3096
3097         if (cp->addr.type == BDADDR_BREDR)
3098                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3099                                                &cp->addr.bdaddr);
3100         else
3101                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3102                                                le_addr_type(cp->addr.type));
3103
3104         if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3105                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3106                                         MGMT_STATUS_NOT_CONNECTED, &rp,
3107                                         sizeof(rp));
3108                 goto failed;
3109         }
3110
3111         cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3112         if (!cmd) {
3113                 err = -ENOMEM;
3114                 goto failed;
3115         }
3116
3117         cmd->cmd_complete = generic_cmd_complete;
3118
3119         err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3120         if (err < 0)
3121                 mgmt_pending_remove(cmd);
3122
3123 failed:
3124         hci_dev_unlock(hdev);
3125         return err;
3126 }
3127
3128 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3129 {
3130         switch (link_type) {
3131         case ISO_LINK:
3132         case LE_LINK:
3133                 switch (addr_type) {
3134                 case ADDR_LE_DEV_PUBLIC:
3135                         return BDADDR_LE_PUBLIC;
3136
3137                 default:
3138                         /* Fallback to LE Random address type */
3139                         return BDADDR_LE_RANDOM;
3140                 }
3141
3142         default:
3143                 /* Fallback to BR/EDR type */
3144                 return BDADDR_BREDR;
3145         }
3146 }
3147
3148 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3149                            u16 data_len)
3150 {
3151         struct mgmt_rp_get_connections *rp;
3152         struct hci_conn *c;
3153         int err;
3154         u16 i;
3155
3156         bt_dev_dbg(hdev, "sock %p", sk);
3157
3158         hci_dev_lock(hdev);
3159
3160         if (!hdev_is_powered(hdev)) {
3161                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3162                                       MGMT_STATUS_NOT_POWERED);
3163                 goto unlock;
3164         }
3165
3166         i = 0;
3167         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3168                 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3169                         i++;
3170         }
3171
3172         rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3173         if (!rp) {
3174                 err = -ENOMEM;
3175                 goto unlock;
3176         }
3177
3178         i = 0;
3179         list_for_each_entry(c, &hdev->conn_hash.list, list) {
3180                 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3181                         continue;
3182                 bacpy(&rp->addr[i].bdaddr, &c->dst);
3183                 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3184                 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3185                         continue;
3186                 i++;
3187         }
3188
3189         rp->conn_count = cpu_to_le16(i);
3190
3191         /* Recalculate length in case of filtered SCO connections, etc */
3192         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3193                                 struct_size(rp, addr, i));
3194
3195         kfree(rp);
3196
3197 unlock:
3198         hci_dev_unlock(hdev);
3199         return err;
3200 }
3201
3202 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3203                                    struct mgmt_cp_pin_code_neg_reply *cp)
3204 {
3205         struct mgmt_pending_cmd *cmd;
3206         int err;
3207
3208         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3209                                sizeof(*cp));
3210         if (!cmd)
3211                 return -ENOMEM;
3212
3213         cmd->cmd_complete = addr_cmd_complete;
3214
3215         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3216                            sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3217         if (err < 0)
3218                 mgmt_pending_remove(cmd);
3219
3220         return err;
3221 }
3222
3223 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3224                           u16 len)
3225 {
3226         struct hci_conn *conn;
3227         struct mgmt_cp_pin_code_reply *cp = data;
3228         struct hci_cp_pin_code_reply reply;
3229         struct mgmt_pending_cmd *cmd;
3230         int err;
3231
3232         bt_dev_dbg(hdev, "sock %p", sk);
3233
3234         hci_dev_lock(hdev);
3235
3236         if (!hdev_is_powered(hdev)) {
3237                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3238                                       MGMT_STATUS_NOT_POWERED);
3239                 goto failed;
3240         }
3241
3242         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3243         if (!conn) {
3244                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3245                                       MGMT_STATUS_NOT_CONNECTED);
3246                 goto failed;
3247         }
3248
3249         if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3250                 struct mgmt_cp_pin_code_neg_reply ncp;
3251
3252                 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3253
3254                 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3255
3256                 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3257                 if (err >= 0)
3258                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3259                                               MGMT_STATUS_INVALID_PARAMS);
3260
3261                 goto failed;
3262         }
3263
3264         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3265         if (!cmd) {
3266                 err = -ENOMEM;
3267                 goto failed;
3268         }
3269
3270         cmd->cmd_complete = addr_cmd_complete;
3271
3272         bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3273         reply.pin_len = cp->pin_len;
3274         memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3275
3276         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3277         if (err < 0)
3278                 mgmt_pending_remove(cmd);
3279
3280 failed:
3281         hci_dev_unlock(hdev);
3282         return err;
3283 }
3284
3285 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3286                              u16 len)
3287 {
3288         struct mgmt_cp_set_io_capability *cp = data;
3289
3290         bt_dev_dbg(hdev, "sock %p", sk);
3291
3292         if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3293                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3294                                        MGMT_STATUS_INVALID_PARAMS);
3295
3296         hci_dev_lock(hdev);
3297
3298         hdev->io_capability = cp->io_capability;
3299
3300         bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3301
3302         hci_dev_unlock(hdev);
3303
3304         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3305                                  NULL, 0);
3306 }
3307
3308 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3309 {
3310         struct hci_dev *hdev = conn->hdev;
3311         struct mgmt_pending_cmd *cmd;
3312
3313         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3314                 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3315                         continue;
3316
3317                 if (cmd->user_data != conn)
3318                         continue;
3319
3320                 return cmd;
3321         }
3322
3323         return NULL;
3324 }
3325
3326 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3327 {
3328         struct mgmt_rp_pair_device rp;
3329         struct hci_conn *conn = cmd->user_data;
3330         int err;
3331
3332         bacpy(&rp.addr.bdaddr, &conn->dst);
3333         rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3334
3335         err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3336                                 status, &rp, sizeof(rp));
3337
3338         /* So we don't get further callbacks for this connection */
3339         conn->connect_cfm_cb = NULL;
3340         conn->security_cfm_cb = NULL;
3341         conn->disconn_cfm_cb = NULL;
3342
3343         hci_conn_drop(conn);
3344
3345         /* The device is paired so there is no need to remove
3346          * its connection parameters anymore.
3347          */
3348         clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3349
3350         hci_conn_put(conn);
3351
3352         return err;
3353 }
3354
3355 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3356 {
3357         u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3358         struct mgmt_pending_cmd *cmd;
3359
3360         cmd = find_pairing(conn);
3361         if (cmd) {
3362                 cmd->cmd_complete(cmd, status);
3363                 mgmt_pending_remove(cmd);
3364         }
3365 }
3366
3367 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3368 {
3369         struct mgmt_pending_cmd *cmd;
3370
3371         BT_DBG("status %u", status);
3372
3373         cmd = find_pairing(conn);
3374         if (!cmd) {
3375                 BT_DBG("Unable to find a pending command");
3376                 return;
3377         }
3378
3379         cmd->cmd_complete(cmd, mgmt_status(status));
3380         mgmt_pending_remove(cmd);
3381 }
3382
3383 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3384 {
3385         struct mgmt_pending_cmd *cmd;
3386
3387         BT_DBG("status %u", status);
3388
3389         if (!status)
3390                 return;
3391
3392         cmd = find_pairing(conn);
3393         if (!cmd) {
3394                 BT_DBG("Unable to find a pending command");
3395                 return;
3396         }
3397
3398         cmd->cmd_complete(cmd, mgmt_status(status));
3399         mgmt_pending_remove(cmd);
3400 }
3401
3402 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3403                        u16 len)
3404 {
3405         struct mgmt_cp_pair_device *cp = data;
3406         struct mgmt_rp_pair_device rp;
3407         struct mgmt_pending_cmd *cmd;
3408         u8 sec_level, auth_type;
3409         struct hci_conn *conn;
3410         int err;
3411
3412         bt_dev_dbg(hdev, "sock %p", sk);
3413
3414         memset(&rp, 0, sizeof(rp));
3415         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3416         rp.addr.type = cp->addr.type;
3417
3418         if (!bdaddr_type_is_valid(cp->addr.type))
3419                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3420                                          MGMT_STATUS_INVALID_PARAMS,
3421                                          &rp, sizeof(rp));
3422
3423         if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3424                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3425                                          MGMT_STATUS_INVALID_PARAMS,
3426                                          &rp, sizeof(rp));
3427
3428         hci_dev_lock(hdev);
3429
3430         if (!hdev_is_powered(hdev)) {
3431                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3432                                         MGMT_STATUS_NOT_POWERED, &rp,
3433                                         sizeof(rp));
3434                 goto unlock;
3435         }
3436
3437         if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3438                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3439                                         MGMT_STATUS_ALREADY_PAIRED, &rp,
3440                                         sizeof(rp));
3441                 goto unlock;
3442         }
3443
3444         sec_level = BT_SECURITY_MEDIUM;
3445         auth_type = HCI_AT_DEDICATED_BONDING;
3446
3447         if (cp->addr.type == BDADDR_BREDR) {
3448                 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3449                                        auth_type, CONN_REASON_PAIR_DEVICE,
3450                                        HCI_ACL_CONN_TIMEOUT);
3451         } else {
3452                 u8 addr_type = le_addr_type(cp->addr.type);
3453                 struct hci_conn_params *p;
3454
3455                 /* When pairing a new device, it is expected to remember
3456                  * this device for future connections. Adding the connection
3457                  * parameter information ahead of time allows tracking
3458                  * of the peripheral preferred values and will speed up any
3459                  * further connection establishment.
3460                  *
3461                  * If connection parameters already exist, then they
3462                  * will be kept and this function does nothing.
3463                  */
3464                 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3465
3466                 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3467                         p->auto_connect = HCI_AUTO_CONN_DISABLED;
3468
3469                 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3470                                            sec_level, HCI_LE_CONN_TIMEOUT,
3471                                            CONN_REASON_PAIR_DEVICE);
3472         }
3473
3474         if (IS_ERR(conn)) {
3475                 int status;
3476
3477                 if (PTR_ERR(conn) == -EBUSY)
3478                         status = MGMT_STATUS_BUSY;
3479                 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3480                         status = MGMT_STATUS_NOT_SUPPORTED;
3481                 else if (PTR_ERR(conn) == -ECONNREFUSED)
3482                         status = MGMT_STATUS_REJECTED;
3483                 else
3484                         status = MGMT_STATUS_CONNECT_FAILED;
3485
3486                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3487                                         status, &rp, sizeof(rp));
3488                 goto unlock;
3489         }
3490
3491         if (conn->connect_cfm_cb) {
3492                 hci_conn_drop(conn);
3493                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3494                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3495                 goto unlock;
3496         }
3497
3498         cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3499         if (!cmd) {
3500                 err = -ENOMEM;
3501                 hci_conn_drop(conn);
3502                 goto unlock;
3503         }
3504
3505         cmd->cmd_complete = pairing_complete;
3506
3507         /* For LE, just connecting isn't a proof that the pairing finished */
3508         if (cp->addr.type == BDADDR_BREDR) {
3509                 conn->connect_cfm_cb = pairing_complete_cb;
3510                 conn->security_cfm_cb = pairing_complete_cb;
3511                 conn->disconn_cfm_cb = pairing_complete_cb;
3512         } else {
3513                 conn->connect_cfm_cb = le_pairing_complete_cb;
3514                 conn->security_cfm_cb = le_pairing_complete_cb;
3515                 conn->disconn_cfm_cb = le_pairing_complete_cb;
3516         }
3517
3518         conn->io_capability = cp->io_cap;
3519         cmd->user_data = hci_conn_get(conn);
3520
3521         if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3522             hci_conn_security(conn, sec_level, auth_type, true)) {
3523                 cmd->cmd_complete(cmd, 0);
3524                 mgmt_pending_remove(cmd);
3525         }
3526
3527         err = 0;
3528
3529 unlock:
3530         hci_dev_unlock(hdev);
3531         return err;
3532 }
3533
3534 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3535                               u16 len)
3536 {
3537         struct mgmt_addr_info *addr = data;
3538         struct mgmt_pending_cmd *cmd;
3539         struct hci_conn *conn;
3540         int err;
3541
3542         bt_dev_dbg(hdev, "sock %p", sk);
3543
3544         hci_dev_lock(hdev);
3545
3546         if (!hdev_is_powered(hdev)) {
3547                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3548                                       MGMT_STATUS_NOT_POWERED);
3549                 goto unlock;
3550         }
3551
3552         cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3553         if (!cmd) {
3554                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3555                                       MGMT_STATUS_INVALID_PARAMS);
3556                 goto unlock;
3557         }
3558
3559         conn = cmd->user_data;
3560
3561         if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3562                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3563                                       MGMT_STATUS_INVALID_PARAMS);
3564                 goto unlock;
3565         }
3566
3567         cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3568         mgmt_pending_remove(cmd);
3569
3570         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3571                                 addr, sizeof(*addr));
3572
3573         /* Since user doesn't want to proceed with the connection, abort any
3574          * ongoing pairing and then terminate the link if it was created
3575          * because of the pair device action.
3576          */
3577         if (addr->type == BDADDR_BREDR)
3578                 hci_remove_link_key(hdev, &addr->bdaddr);
3579         else
3580                 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3581                                               le_addr_type(addr->type));
3582
3583         if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3584                 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3585
3586 unlock:
3587         hci_dev_unlock(hdev);
3588         return err;
3589 }
3590
3591 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3592                              struct mgmt_addr_info *addr, u16 mgmt_op,
3593                              u16 hci_op, __le32 passkey)
3594 {
3595         struct mgmt_pending_cmd *cmd;
3596         struct hci_conn *conn;
3597         int err;
3598
3599         hci_dev_lock(hdev);
3600
3601         if (!hdev_is_powered(hdev)) {
3602                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3603                                         MGMT_STATUS_NOT_POWERED, addr,
3604                                         sizeof(*addr));
3605                 goto done;
3606         }
3607
3608         if (addr->type == BDADDR_BREDR)
3609                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3610         else
3611                 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3612                                                le_addr_type(addr->type));
3613
3614         if (!conn) {
3615                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3616                                         MGMT_STATUS_NOT_CONNECTED, addr,
3617                                         sizeof(*addr));
3618                 goto done;
3619         }
3620
3621         if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3622                 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3623                 if (!err)
3624                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3625                                                 MGMT_STATUS_SUCCESS, addr,
3626                                                 sizeof(*addr));
3627                 else
3628                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3629                                                 MGMT_STATUS_FAILED, addr,
3630                                                 sizeof(*addr));
3631
3632                 goto done;
3633         }
3634
3635         cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3636         if (!cmd) {
3637                 err = -ENOMEM;
3638                 goto done;
3639         }
3640
3641         cmd->cmd_complete = addr_cmd_complete;
3642
3643         /* Continue with pairing via HCI */
3644         if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3645                 struct hci_cp_user_passkey_reply cp;
3646
3647                 bacpy(&cp.bdaddr, &addr->bdaddr);
3648                 cp.passkey = passkey;
3649                 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3650         } else
3651                 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3652                                    &addr->bdaddr);
3653
3654         if (err < 0)
3655                 mgmt_pending_remove(cmd);
3656
3657 done:
3658         hci_dev_unlock(hdev);
3659         return err;
3660 }
3661
3662 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3663                               void *data, u16 len)
3664 {
3665         struct mgmt_cp_pin_code_neg_reply *cp = data;
3666
3667         bt_dev_dbg(hdev, "sock %p", sk);
3668
3669         return user_pairing_resp(sk, hdev, &cp->addr,
3670                                 MGMT_OP_PIN_CODE_NEG_REPLY,
3671                                 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3672 }
3673
3674 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3675                               u16 len)
3676 {
3677         struct mgmt_cp_user_confirm_reply *cp = data;
3678
3679         bt_dev_dbg(hdev, "sock %p", sk);
3680
3681         if (len != sizeof(*cp))
3682                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3683                                        MGMT_STATUS_INVALID_PARAMS);
3684
3685         return user_pairing_resp(sk, hdev, &cp->addr,
3686                                  MGMT_OP_USER_CONFIRM_REPLY,
3687                                  HCI_OP_USER_CONFIRM_REPLY, 0);
3688 }
3689
3690 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3691                                   void *data, u16 len)
3692 {
3693         struct mgmt_cp_user_confirm_neg_reply *cp = data;
3694
3695         bt_dev_dbg(hdev, "sock %p", sk);
3696
3697         return user_pairing_resp(sk, hdev, &cp->addr,
3698                                  MGMT_OP_USER_CONFIRM_NEG_REPLY,
3699                                  HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3700 }
3701
3702 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3703                               u16 len)
3704 {
3705         struct mgmt_cp_user_passkey_reply *cp = data;
3706
3707         bt_dev_dbg(hdev, "sock %p", sk);
3708
3709         return user_pairing_resp(sk, hdev, &cp->addr,
3710                                  MGMT_OP_USER_PASSKEY_REPLY,
3711                                  HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3712 }
3713
3714 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3715                                   void *data, u16 len)
3716 {
3717         struct mgmt_cp_user_passkey_neg_reply *cp = data;
3718
3719         bt_dev_dbg(hdev, "sock %p", sk);
3720
3721         return user_pairing_resp(sk, hdev, &cp->addr,
3722                                  MGMT_OP_USER_PASSKEY_NEG_REPLY,
3723                                  HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3724 }
3725
3726 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3727 {
3728         struct adv_info *adv_instance;
3729
3730         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3731         if (!adv_instance)
3732                 return 0;
3733
3734         /* stop if current instance doesn't need to be changed */
3735         if (!(adv_instance->flags & flags))
3736                 return 0;
3737
3738         cancel_adv_timeout(hdev);
3739
3740         adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3741         if (!adv_instance)
3742                 return 0;
3743
3744         hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3745
3746         return 0;
3747 }
3748
3749 static int name_changed_sync(struct hci_dev *hdev, void *data)
3750 {
3751         return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3752 }
3753
3754 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3755 {
3756         struct mgmt_pending_cmd *cmd = data;
3757         struct mgmt_cp_set_local_name *cp = cmd->param;
3758         u8 status = mgmt_status(err);
3759
3760         bt_dev_dbg(hdev, "err %d", err);
3761
3762         if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3763                 return;
3764
3765         if (status) {
3766                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3767                                 status);
3768         } else {
3769                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3770                                   cp, sizeof(*cp));
3771
3772                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3773                         hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3774         }
3775
3776         mgmt_pending_remove(cmd);
3777 }
3778
3779 static int set_name_sync(struct hci_dev *hdev, void *data)
3780 {
3781         if (lmp_bredr_capable(hdev)) {
3782                 hci_update_name_sync(hdev);
3783                 hci_update_eir_sync(hdev);
3784         }
3785
3786         /* The name is stored in the scan response data and so
3787          * no need to update the advertising data here.
3788          */
3789         if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3790                 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3791
3792         return 0;
3793 }
3794
3795 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3796                           u16 len)
3797 {
3798         struct mgmt_cp_set_local_name *cp = data;
3799         struct mgmt_pending_cmd *cmd;
3800         int err;
3801
3802         bt_dev_dbg(hdev, "sock %p", sk);
3803
3804         hci_dev_lock(hdev);
3805
3806         /* If the old values are the same as the new ones just return a
3807          * direct command complete event.
3808          */
3809         if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3810             !memcmp(hdev->short_name, cp->short_name,
3811                     sizeof(hdev->short_name))) {
3812                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3813                                         data, len);
3814                 goto failed;
3815         }
3816
3817         memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3818
3819         if (!hdev_is_powered(hdev)) {
3820                 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3821
3822                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3823                                         data, len);
3824                 if (err < 0)
3825                         goto failed;
3826
3827                 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3828                                          len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3829                 ext_info_changed(hdev, sk);
3830
3831                 goto failed;
3832         }
3833
3834         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3835         if (!cmd)
3836                 err = -ENOMEM;
3837         else
3838                 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3839                                          set_name_complete);
3840
3841         if (err < 0) {
3842                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3843                                       MGMT_STATUS_FAILED);
3844
3845                 if (cmd)
3846                         mgmt_pending_remove(cmd);
3847
3848                 goto failed;
3849         }
3850
3851         memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3852
3853 failed:
3854         hci_dev_unlock(hdev);
3855         return err;
3856 }
3857
3858 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3859 {
3860         return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3861 }
3862
3863 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3864                           u16 len)
3865 {
3866         struct mgmt_cp_set_appearance *cp = data;
3867         u16 appearance;
3868         int err;
3869
3870         bt_dev_dbg(hdev, "sock %p", sk);
3871
3872         if (!lmp_le_capable(hdev))
3873                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3874                                        MGMT_STATUS_NOT_SUPPORTED);
3875
3876         appearance = le16_to_cpu(cp->appearance);
3877
3878         hci_dev_lock(hdev);
3879
3880         if (hdev->appearance != appearance) {
3881                 hdev->appearance = appearance;
3882
3883                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3884                         hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3885                                            NULL);
3886
3887                 ext_info_changed(hdev, sk);
3888         }
3889
3890         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3891                                 0);
3892
3893         hci_dev_unlock(hdev);
3894
3895         return err;
3896 }
3897
3898 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3899                                  void *data, u16 len)
3900 {
3901         struct mgmt_rp_get_phy_configuration rp;
3902
3903         bt_dev_dbg(hdev, "sock %p", sk);
3904
3905         hci_dev_lock(hdev);
3906
3907         memset(&rp, 0, sizeof(rp));
3908
3909         rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3910         rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3911         rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3912
3913         hci_dev_unlock(hdev);
3914
3915         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3916                                  &rp, sizeof(rp));
3917 }
3918
3919 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3920 {
3921         struct mgmt_ev_phy_configuration_changed ev;
3922
3923         memset(&ev, 0, sizeof(ev));
3924
3925         ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3926
3927         return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3928                           sizeof(ev), skip);
3929 }
3930
3931 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3932 {
3933         struct mgmt_pending_cmd *cmd = data;
3934         struct sk_buff *skb = cmd->skb;
3935         u8 status = mgmt_status(err);
3936
3937         if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3938                 return;
3939
3940         if (!status) {
3941                 if (!skb)
3942                         status = MGMT_STATUS_FAILED;
3943                 else if (IS_ERR(skb))
3944                         status = mgmt_status(PTR_ERR(skb));
3945                 else
3946                         status = mgmt_status(skb->data[0]);
3947         }
3948
3949         bt_dev_dbg(hdev, "status %d", status);
3950
3951         if (status) {
3952                 mgmt_cmd_status(cmd->sk, hdev->id,
3953                                 MGMT_OP_SET_PHY_CONFIGURATION, status);
3954         } else {
3955                 mgmt_cmd_complete(cmd->sk, hdev->id,
3956                                   MGMT_OP_SET_PHY_CONFIGURATION, 0,
3957                                   NULL, 0);
3958
3959                 mgmt_phy_configuration_changed(hdev, cmd->sk);
3960         }
3961
3962         if (skb && !IS_ERR(skb))
3963                 kfree_skb(skb);
3964
3965         mgmt_pending_remove(cmd);
3966 }
3967
3968 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3969 {
3970         struct mgmt_pending_cmd *cmd = data;
3971         struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3972         struct hci_cp_le_set_default_phy cp_phy;
3973         u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3974
3975         memset(&cp_phy, 0, sizeof(cp_phy));
3976
3977         if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3978                 cp_phy.all_phys |= 0x01;
3979
3980         if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3981                 cp_phy.all_phys |= 0x02;
3982
3983         if (selected_phys & MGMT_PHY_LE_1M_TX)
3984                 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3985
3986         if (selected_phys & MGMT_PHY_LE_2M_TX)
3987                 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3988
3989         if (selected_phys & MGMT_PHY_LE_CODED_TX)
3990                 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3991
3992         if (selected_phys & MGMT_PHY_LE_1M_RX)
3993                 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3994
3995         if (selected_phys & MGMT_PHY_LE_2M_RX)
3996                 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3997
3998         if (selected_phys & MGMT_PHY_LE_CODED_RX)
3999                 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4000
4001         cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4002                                    sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4003
4004         return 0;
4005 }
4006
4007 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4008                                  void *data, u16 len)
4009 {
4010         struct mgmt_cp_set_phy_configuration *cp = data;
4011         struct mgmt_pending_cmd *cmd;
4012         u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4013         u16 pkt_type = (HCI_DH1 | HCI_DM1);
4014         bool changed = false;
4015         int err;
4016
4017         bt_dev_dbg(hdev, "sock %p", sk);
4018
4019         configurable_phys = get_configurable_phys(hdev);
4020         supported_phys = get_supported_phys(hdev);
4021         selected_phys = __le32_to_cpu(cp->selected_phys);
4022
4023         if (selected_phys & ~supported_phys)
4024                 return mgmt_cmd_status(sk, hdev->id,
4025                                        MGMT_OP_SET_PHY_CONFIGURATION,
4026                                        MGMT_STATUS_INVALID_PARAMS);
4027
4028         unconfigure_phys = supported_phys & ~configurable_phys;
4029
4030         if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4031                 return mgmt_cmd_status(sk, hdev->id,
4032                                        MGMT_OP_SET_PHY_CONFIGURATION,
4033                                        MGMT_STATUS_INVALID_PARAMS);
4034
4035         if (selected_phys == get_selected_phys(hdev))
4036                 return mgmt_cmd_complete(sk, hdev->id,
4037                                          MGMT_OP_SET_PHY_CONFIGURATION,
4038                                          0, NULL, 0);
4039
4040         hci_dev_lock(hdev);
4041
4042         if (!hdev_is_powered(hdev)) {
4043                 err = mgmt_cmd_status(sk, hdev->id,
4044                                       MGMT_OP_SET_PHY_CONFIGURATION,
4045                                       MGMT_STATUS_REJECTED);
4046                 goto unlock;
4047         }
4048
4049         if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4050                 err = mgmt_cmd_status(sk, hdev->id,
4051                                       MGMT_OP_SET_PHY_CONFIGURATION,
4052                                       MGMT_STATUS_BUSY);
4053                 goto unlock;
4054         }
4055
4056         if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4057                 pkt_type |= (HCI_DH3 | HCI_DM3);
4058         else
4059                 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4060
4061         if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4062                 pkt_type |= (HCI_DH5 | HCI_DM5);
4063         else
4064                 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4065
4066         if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4067                 pkt_type &= ~HCI_2DH1;
4068         else
4069                 pkt_type |= HCI_2DH1;
4070
4071         if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4072                 pkt_type &= ~HCI_2DH3;
4073         else
4074                 pkt_type |= HCI_2DH3;
4075
4076         if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4077                 pkt_type &= ~HCI_2DH5;
4078         else
4079                 pkt_type |= HCI_2DH5;
4080
4081         if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4082                 pkt_type &= ~HCI_3DH1;
4083         else
4084                 pkt_type |= HCI_3DH1;
4085
4086         if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4087                 pkt_type &= ~HCI_3DH3;
4088         else
4089                 pkt_type |= HCI_3DH3;
4090
4091         if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4092                 pkt_type &= ~HCI_3DH5;
4093         else
4094                 pkt_type |= HCI_3DH5;
4095
4096         if (pkt_type != hdev->pkt_type) {
4097                 hdev->pkt_type = pkt_type;
4098                 changed = true;
4099         }
4100
4101         if ((selected_phys & MGMT_PHY_LE_MASK) ==
4102             (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4103                 if (changed)
4104                         mgmt_phy_configuration_changed(hdev, sk);
4105
4106                 err = mgmt_cmd_complete(sk, hdev->id,
4107                                         MGMT_OP_SET_PHY_CONFIGURATION,
4108                                         0, NULL, 0);
4109
4110                 goto unlock;
4111         }
4112
4113         cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4114                                len);
4115         if (!cmd)
4116                 err = -ENOMEM;
4117         else
4118                 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4119                                          set_default_phy_complete);
4120
4121         if (err < 0) {
4122                 err = mgmt_cmd_status(sk, hdev->id,
4123                                       MGMT_OP_SET_PHY_CONFIGURATION,
4124                                       MGMT_STATUS_FAILED);
4125
4126                 if (cmd)
4127                         mgmt_pending_remove(cmd);
4128         }
4129
4130 unlock:
4131         hci_dev_unlock(hdev);
4132
4133         return err;
4134 }
4135
4136 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4137                             u16 len)
4138 {
4139         int err = MGMT_STATUS_SUCCESS;
4140         struct mgmt_cp_set_blocked_keys *keys = data;
4141         const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4142                                    sizeof(struct mgmt_blocked_key_info));
4143         u16 key_count, expected_len;
4144         int i;
4145
4146         bt_dev_dbg(hdev, "sock %p", sk);
4147
4148         key_count = __le16_to_cpu(keys->key_count);
4149         if (key_count > max_key_count) {
4150                 bt_dev_err(hdev, "too big key_count value %u", key_count);
4151                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4152                                        MGMT_STATUS_INVALID_PARAMS);
4153         }
4154
4155         expected_len = struct_size(keys, keys, key_count);
4156         if (expected_len != len) {
4157                 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4158                            expected_len, len);
4159                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4160                                        MGMT_STATUS_INVALID_PARAMS);
4161         }
4162
4163         hci_dev_lock(hdev);
4164
4165         hci_blocked_keys_clear(hdev);
4166
4167         for (i = 0; i < key_count; ++i) {
4168                 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4169
4170                 if (!b) {
4171                         err = MGMT_STATUS_NO_RESOURCES;
4172                         break;
4173                 }
4174
4175                 b->type = keys->keys[i].type;
4176                 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4177                 list_add_rcu(&b->list, &hdev->blocked_keys);
4178         }
4179         hci_dev_unlock(hdev);
4180
4181         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4182                                 err, NULL, 0);
4183 }
4184
4185 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4186                                void *data, u16 len)
4187 {
4188         struct mgmt_mode *cp = data;
4189         int err;
4190         bool changed = false;
4191
4192         bt_dev_dbg(hdev, "sock %p", sk);
4193
4194         if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4195                 return mgmt_cmd_status(sk, hdev->id,
4196                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4197                                        MGMT_STATUS_NOT_SUPPORTED);
4198
4199         if (cp->val != 0x00 && cp->val != 0x01)
4200                 return mgmt_cmd_status(sk, hdev->id,
4201                                        MGMT_OP_SET_WIDEBAND_SPEECH,
4202                                        MGMT_STATUS_INVALID_PARAMS);
4203
4204         hci_dev_lock(hdev);
4205
4206         if (hdev_is_powered(hdev) &&
4207             !!cp->val != hci_dev_test_flag(hdev,
4208                                            HCI_WIDEBAND_SPEECH_ENABLED)) {
4209                 err = mgmt_cmd_status(sk, hdev->id,
4210                                       MGMT_OP_SET_WIDEBAND_SPEECH,
4211                                       MGMT_STATUS_REJECTED);
4212                 goto unlock;
4213         }
4214
4215         if (cp->val)
4216                 changed = !hci_dev_test_and_set_flag(hdev,
4217                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4218         else
4219                 changed = hci_dev_test_and_clear_flag(hdev,
4220                                                    HCI_WIDEBAND_SPEECH_ENABLED);
4221
4222         err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4223         if (err < 0)
4224                 goto unlock;
4225
4226         if (changed)
4227                 err = new_settings(hdev, sk);
4228
4229 unlock:
4230         hci_dev_unlock(hdev);
4231         return err;
4232 }
4233
4234 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4235                                void *data, u16 data_len)
4236 {
4237         char buf[20];
4238         struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4239         u16 cap_len = 0;
4240         u8 flags = 0;
4241         u8 tx_power_range[2];
4242
4243         bt_dev_dbg(hdev, "sock %p", sk);
4244
4245         memset(&buf, 0, sizeof(buf));
4246
4247         hci_dev_lock(hdev);
4248
4249         /* When the Read Simple Pairing Options command is supported, then
4250          * the remote public key validation is supported.
4251          *
4252          * Alternatively, when Microsoft extensions are available, they can
4253          * indicate support for public key validation as well.
4254          */
4255         if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4256                 flags |= 0x01;  /* Remote public key validation (BR/EDR) */
4257
4258         flags |= 0x02;          /* Remote public key validation (LE) */
4259
4260         /* When the Read Encryption Key Size command is supported, then the
4261          * encryption key size is enforced.
4262          */
4263         if (hdev->commands[20] & 0x10)
4264                 flags |= 0x04;  /* Encryption key size enforcement (BR/EDR) */
4265
4266         flags |= 0x08;          /* Encryption key size enforcement (LE) */
4267
4268         cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4269                                   &flags, 1);
4270
4271         /* When the Read Simple Pairing Options command is supported, then
4272          * also max encryption key size information is provided.
4273          */
4274         if (hdev->commands[41] & 0x08)
4275                 cap_len = eir_append_le16(rp->cap, cap_len,
4276                                           MGMT_CAP_MAX_ENC_KEY_SIZE,
4277                                           hdev->max_enc_key_size);
4278
4279         cap_len = eir_append_le16(rp->cap, cap_len,
4280                                   MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4281                                   SMP_MAX_ENC_KEY_SIZE);
4282
4283         /* Append the min/max LE tx power parameters if we were able to fetch
4284          * it from the controller
4285          */
4286         if (hdev->commands[38] & 0x80) {
4287                 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4288                 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4289                 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4290                                           tx_power_range, 2);
4291         }
4292
4293         rp->cap_len = cpu_to_le16(cap_len);
4294
4295         hci_dev_unlock(hdev);
4296
4297         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4298                                  rp, sizeof(*rp) + cap_len);
4299 }
4300
4301 #ifdef CONFIG_BT_FEATURE_DEBUG
4302 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4303 static const u8 debug_uuid[16] = {
4304         0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4305         0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4306 };
4307 #endif
4308
4309 /* 330859bc-7506-492d-9370-9a6f0614037f */
4310 static const u8 quality_report_uuid[16] = {
4311         0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4312         0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4313 };
4314
4315 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4316 static const u8 offload_codecs_uuid[16] = {
4317         0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4318         0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4319 };
4320
4321 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4322 static const u8 le_simultaneous_roles_uuid[16] = {
4323         0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4324         0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4325 };
4326
4327 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4328 static const u8 rpa_resolution_uuid[16] = {
4329         0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4330         0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4331 };
4332
4333 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4334 static const u8 iso_socket_uuid[16] = {
4335         0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4336         0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4337 };
4338
4339 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4340 static const u8 mgmt_mesh_uuid[16] = {
4341         0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4342         0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4343 };
4344
4345 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4346                                   void *data, u16 data_len)
4347 {
4348         struct mgmt_rp_read_exp_features_info *rp;
4349         size_t len;
4350         u16 idx = 0;
4351         u32 flags;
4352         int status;
4353
4354         bt_dev_dbg(hdev, "sock %p", sk);
4355
4356         /* Enough space for 7 features */
4357         len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4358         rp = kzalloc(len, GFP_KERNEL);
4359         if (!rp)
4360                 return -ENOMEM;
4361
4362 #ifdef CONFIG_BT_FEATURE_DEBUG
4363         if (!hdev) {
4364                 flags = bt_dbg_get() ? BIT(0) : 0;
4365
4366                 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4367                 rp->features[idx].flags = cpu_to_le32(flags);
4368                 idx++;
4369         }
4370 #endif
4371
4372         if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4373                 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4374                         flags = BIT(0);
4375                 else
4376                         flags = 0;
4377
4378                 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4379                 rp->features[idx].flags = cpu_to_le32(flags);
4380                 idx++;
4381         }
4382
4383         if (hdev && ll_privacy_capable(hdev)) {
4384                 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4385                         flags = BIT(0) | BIT(1);
4386                 else
4387                         flags = BIT(1);
4388
4389                 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4390                 rp->features[idx].flags = cpu_to_le32(flags);
4391                 idx++;
4392         }
4393
4394         if (hdev && (aosp_has_quality_report(hdev) ||
4395                      hdev->set_quality_report)) {
4396                 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4397                         flags = BIT(0);
4398                 else
4399                         flags = 0;
4400
4401                 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4402                 rp->features[idx].flags = cpu_to_le32(flags);
4403                 idx++;
4404         }
4405
4406         if (hdev && hdev->get_data_path_id) {
4407                 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4408                         flags = BIT(0);
4409                 else
4410                         flags = 0;
4411
4412                 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4413                 rp->features[idx].flags = cpu_to_le32(flags);
4414                 idx++;
4415         }
4416
4417         if (IS_ENABLED(CONFIG_BT_LE)) {
4418                 flags = iso_enabled() ? BIT(0) : 0;
4419                 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4420                 rp->features[idx].flags = cpu_to_le32(flags);
4421                 idx++;
4422         }
4423
4424         if (hdev && lmp_le_capable(hdev)) {
4425                 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4426                         flags = BIT(0);
4427                 else
4428                         flags = 0;
4429
4430                 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4431                 rp->features[idx].flags = cpu_to_le32(flags);
4432                 idx++;
4433         }
4434
4435         rp->feature_count = cpu_to_le16(idx);
4436
4437         /* After reading the experimental features information, enable
4438          * the events to update client on any future change.
4439          */
4440         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4441
4442         status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4443                                    MGMT_OP_READ_EXP_FEATURES_INFO,
4444                                    0, rp, sizeof(*rp) + (20 * idx));
4445
4446         kfree(rp);
4447         return status;
4448 }
4449
4450 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4451                                           struct sock *skip)
4452 {
4453         struct mgmt_ev_exp_feature_changed ev;
4454
4455         memset(&ev, 0, sizeof(ev));
4456         memcpy(ev.uuid, rpa_resolution_uuid, 16);
4457         ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4458
4459         // Do we need to be atomic with the conn_flags?
4460         if (enabled && privacy_mode_capable(hdev))
4461                 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4462         else
4463                 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4464
4465         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4466                                   &ev, sizeof(ev),
4467                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4468
4469 }
4470
4471 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4472                                bool enabled, struct sock *skip)
4473 {
4474         struct mgmt_ev_exp_feature_changed ev;
4475
4476         memset(&ev, 0, sizeof(ev));
4477         memcpy(ev.uuid, uuid, 16);
4478         ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4479
4480         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4481                                   &ev, sizeof(ev),
4482                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4483 }
4484
4485 #define EXP_FEAT(_uuid, _set_func)      \
4486 {                                       \
4487         .uuid = _uuid,                  \
4488         .set_func = _set_func,          \
4489 }
4490
4491 /* The zero key uuid is special. Multiple exp features are set through it. */
4492 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4493                              struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4494 {
4495         struct mgmt_rp_set_exp_feature rp;
4496
4497         memset(rp.uuid, 0, 16);
4498         rp.flags = cpu_to_le32(0);
4499
4500 #ifdef CONFIG_BT_FEATURE_DEBUG
4501         if (!hdev) {
4502                 bool changed = bt_dbg_get();
4503
4504                 bt_dbg_set(false);
4505
4506                 if (changed)
4507                         exp_feature_changed(NULL, ZERO_KEY, false, sk);
4508         }
4509 #endif
4510
4511         if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4512                 bool changed;
4513
4514                 changed = hci_dev_test_and_clear_flag(hdev,
4515                                                       HCI_ENABLE_LL_PRIVACY);
4516                 if (changed)
4517                         exp_feature_changed(hdev, rpa_resolution_uuid, false,
4518                                             sk);
4519         }
4520
4521         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4522
4523         return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4524                                  MGMT_OP_SET_EXP_FEATURE, 0,
4525                                  &rp, sizeof(rp));
4526 }
4527
4528 #ifdef CONFIG_BT_FEATURE_DEBUG
4529 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4530                           struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4531 {
4532         struct mgmt_rp_set_exp_feature rp;
4533
4534         bool val, changed;
4535         int err;
4536
4537         /* Command requires to use the non-controller index */
4538         if (hdev)
4539                 return mgmt_cmd_status(sk, hdev->id,
4540                                        MGMT_OP_SET_EXP_FEATURE,
4541                                        MGMT_STATUS_INVALID_INDEX);
4542
4543         /* Parameters are limited to a single octet */
4544         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4545                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4546                                        MGMT_OP_SET_EXP_FEATURE,
4547                                        MGMT_STATUS_INVALID_PARAMS);
4548
4549         /* Only boolean on/off is supported */
4550         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4551                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4552                                        MGMT_OP_SET_EXP_FEATURE,
4553                                        MGMT_STATUS_INVALID_PARAMS);
4554
4555         val = !!cp->param[0];
4556         changed = val ? !bt_dbg_get() : bt_dbg_get();
4557         bt_dbg_set(val);
4558
4559         memcpy(rp.uuid, debug_uuid, 16);
4560         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4561
4562         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4563
4564         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4565                                 MGMT_OP_SET_EXP_FEATURE, 0,
4566                                 &rp, sizeof(rp));
4567
4568         if (changed)
4569                 exp_feature_changed(hdev, debug_uuid, val, sk);
4570
4571         return err;
4572 }
4573 #endif
4574
4575 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4576                               struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4577 {
4578         struct mgmt_rp_set_exp_feature rp;
4579         bool val, changed;
4580         int err;
4581
4582         /* Command requires to use the controller index */
4583         if (!hdev)
4584                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4585                                        MGMT_OP_SET_EXP_FEATURE,
4586                                        MGMT_STATUS_INVALID_INDEX);
4587
4588         /* Parameters are limited to a single octet */
4589         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4590                 return mgmt_cmd_status(sk, hdev->id,
4591                                        MGMT_OP_SET_EXP_FEATURE,
4592                                        MGMT_STATUS_INVALID_PARAMS);
4593
4594         /* Only boolean on/off is supported */
4595         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4596                 return mgmt_cmd_status(sk, hdev->id,
4597                                        MGMT_OP_SET_EXP_FEATURE,
4598                                        MGMT_STATUS_INVALID_PARAMS);
4599
4600         val = !!cp->param[0];
4601
4602         if (val) {
4603                 changed = !hci_dev_test_and_set_flag(hdev,
4604                                                      HCI_MESH_EXPERIMENTAL);
4605         } else {
4606                 hci_dev_clear_flag(hdev, HCI_MESH);
4607                 changed = hci_dev_test_and_clear_flag(hdev,
4608                                                       HCI_MESH_EXPERIMENTAL);
4609         }
4610
4611         memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4612         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4613
4614         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4615
4616         err = mgmt_cmd_complete(sk, hdev->id,
4617                                 MGMT_OP_SET_EXP_FEATURE, 0,
4618                                 &rp, sizeof(rp));
4619
4620         if (changed)
4621                 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4622
4623         return err;
4624 }
4625
4626 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4627                                    struct mgmt_cp_set_exp_feature *cp,
4628                                    u16 data_len)
4629 {
4630         struct mgmt_rp_set_exp_feature rp;
4631         bool val, changed;
4632         int err;
4633         u32 flags;
4634
4635         /* Command requires to use the controller index */
4636         if (!hdev)
4637                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4638                                        MGMT_OP_SET_EXP_FEATURE,
4639                                        MGMT_STATUS_INVALID_INDEX);
4640
4641         /* Changes can only be made when controller is powered down */
4642         if (hdev_is_powered(hdev))
4643                 return mgmt_cmd_status(sk, hdev->id,
4644                                        MGMT_OP_SET_EXP_FEATURE,
4645                                        MGMT_STATUS_REJECTED);
4646
4647         /* Parameters are limited to a single octet */
4648         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4649                 return mgmt_cmd_status(sk, hdev->id,
4650                                        MGMT_OP_SET_EXP_FEATURE,
4651                                        MGMT_STATUS_INVALID_PARAMS);
4652
4653         /* Only boolean on/off is supported */
4654         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4655                 return mgmt_cmd_status(sk, hdev->id,
4656                                        MGMT_OP_SET_EXP_FEATURE,
4657                                        MGMT_STATUS_INVALID_PARAMS);
4658
4659         val = !!cp->param[0];
4660
4661         if (val) {
4662                 changed = !hci_dev_test_and_set_flag(hdev,
4663                                                      HCI_ENABLE_LL_PRIVACY);
4664                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4665
4666                 /* Enable LL privacy + supported settings changed */
4667                 flags = BIT(0) | BIT(1);
4668         } else {
4669                 changed = hci_dev_test_and_clear_flag(hdev,
4670                                                       HCI_ENABLE_LL_PRIVACY);
4671
4672                 /* Disable LL privacy + supported settings changed */
4673                 flags = BIT(1);
4674         }
4675
4676         memcpy(rp.uuid, rpa_resolution_uuid, 16);
4677         rp.flags = cpu_to_le32(flags);
4678
4679         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4680
4681         err = mgmt_cmd_complete(sk, hdev->id,
4682                                 MGMT_OP_SET_EXP_FEATURE, 0,
4683                                 &rp, sizeof(rp));
4684
4685         if (changed)
4686                 exp_ll_privacy_feature_changed(val, hdev, sk);
4687
4688         return err;
4689 }
4690
4691 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4692                                    struct mgmt_cp_set_exp_feature *cp,
4693                                    u16 data_len)
4694 {
4695         struct mgmt_rp_set_exp_feature rp;
4696         bool val, changed;
4697         int err;
4698
4699         /* Command requires to use a valid controller index */
4700         if (!hdev)
4701                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4702                                        MGMT_OP_SET_EXP_FEATURE,
4703                                        MGMT_STATUS_INVALID_INDEX);
4704
4705         /* Parameters are limited to a single octet */
4706         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4707                 return mgmt_cmd_status(sk, hdev->id,
4708                                        MGMT_OP_SET_EXP_FEATURE,
4709                                        MGMT_STATUS_INVALID_PARAMS);
4710
4711         /* Only boolean on/off is supported */
4712         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4713                 return mgmt_cmd_status(sk, hdev->id,
4714                                        MGMT_OP_SET_EXP_FEATURE,
4715                                        MGMT_STATUS_INVALID_PARAMS);
4716
4717         hci_req_sync_lock(hdev);
4718
4719         val = !!cp->param[0];
4720         changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4721
4722         if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4723                 err = mgmt_cmd_status(sk, hdev->id,
4724                                       MGMT_OP_SET_EXP_FEATURE,
4725                                       MGMT_STATUS_NOT_SUPPORTED);
4726                 goto unlock_quality_report;
4727         }
4728
4729         if (changed) {
4730                 if (hdev->set_quality_report)
4731                         err = hdev->set_quality_report(hdev, val);
4732                 else
4733                         err = aosp_set_quality_report(hdev, val);
4734
4735                 if (err) {
4736                         err = mgmt_cmd_status(sk, hdev->id,
4737                                               MGMT_OP_SET_EXP_FEATURE,
4738                                               MGMT_STATUS_FAILED);
4739                         goto unlock_quality_report;
4740                 }
4741
4742                 if (val)
4743                         hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4744                 else
4745                         hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4746         }
4747
4748         bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4749
4750         memcpy(rp.uuid, quality_report_uuid, 16);
4751         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4752         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4753
4754         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4755                                 &rp, sizeof(rp));
4756
4757         if (changed)
4758                 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4759
4760 unlock_quality_report:
4761         hci_req_sync_unlock(hdev);
4762         return err;
4763 }
4764
4765 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4766                                   struct mgmt_cp_set_exp_feature *cp,
4767                                   u16 data_len)
4768 {
4769         bool val, changed;
4770         int err;
4771         struct mgmt_rp_set_exp_feature rp;
4772
4773         /* Command requires to use a valid controller index */
4774         if (!hdev)
4775                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4776                                        MGMT_OP_SET_EXP_FEATURE,
4777                                        MGMT_STATUS_INVALID_INDEX);
4778
4779         /* Parameters are limited to a single octet */
4780         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4781                 return mgmt_cmd_status(sk, hdev->id,
4782                                        MGMT_OP_SET_EXP_FEATURE,
4783                                        MGMT_STATUS_INVALID_PARAMS);
4784
4785         /* Only boolean on/off is supported */
4786         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4787                 return mgmt_cmd_status(sk, hdev->id,
4788                                        MGMT_OP_SET_EXP_FEATURE,
4789                                        MGMT_STATUS_INVALID_PARAMS);
4790
4791         val = !!cp->param[0];
4792         changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4793
4794         if (!hdev->get_data_path_id) {
4795                 return mgmt_cmd_status(sk, hdev->id,
4796                                        MGMT_OP_SET_EXP_FEATURE,
4797                                        MGMT_STATUS_NOT_SUPPORTED);
4798         }
4799
4800         if (changed) {
4801                 if (val)
4802                         hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4803                 else
4804                         hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4805         }
4806
4807         bt_dev_info(hdev, "offload codecs enable %d changed %d",
4808                     val, changed);
4809
4810         memcpy(rp.uuid, offload_codecs_uuid, 16);
4811         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4812         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4813         err = mgmt_cmd_complete(sk, hdev->id,
4814                                 MGMT_OP_SET_EXP_FEATURE, 0,
4815                                 &rp, sizeof(rp));
4816
4817         if (changed)
4818                 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4819
4820         return err;
4821 }
4822
4823 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4824                                           struct mgmt_cp_set_exp_feature *cp,
4825                                           u16 data_len)
4826 {
4827         bool val, changed;
4828         int err;
4829         struct mgmt_rp_set_exp_feature rp;
4830
4831         /* Command requires to use a valid controller index */
4832         if (!hdev)
4833                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4834                                        MGMT_OP_SET_EXP_FEATURE,
4835                                        MGMT_STATUS_INVALID_INDEX);
4836
4837         /* Parameters are limited to a single octet */
4838         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4839                 return mgmt_cmd_status(sk, hdev->id,
4840                                        MGMT_OP_SET_EXP_FEATURE,
4841                                        MGMT_STATUS_INVALID_PARAMS);
4842
4843         /* Only boolean on/off is supported */
4844         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4845                 return mgmt_cmd_status(sk, hdev->id,
4846                                        MGMT_OP_SET_EXP_FEATURE,
4847                                        MGMT_STATUS_INVALID_PARAMS);
4848
4849         val = !!cp->param[0];
4850         changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4851
4852         if (!hci_dev_le_state_simultaneous(hdev)) {
4853                 return mgmt_cmd_status(sk, hdev->id,
4854                                        MGMT_OP_SET_EXP_FEATURE,
4855                                        MGMT_STATUS_NOT_SUPPORTED);
4856         }
4857
4858         if (changed) {
4859                 if (val)
4860                         hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4861                 else
4862                         hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4863         }
4864
4865         bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4866                     val, changed);
4867
4868         memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4869         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4870         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4871         err = mgmt_cmd_complete(sk, hdev->id,
4872                                 MGMT_OP_SET_EXP_FEATURE, 0,
4873                                 &rp, sizeof(rp));
4874
4875         if (changed)
4876                 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4877
4878         return err;
4879 }
4880
4881 #ifdef CONFIG_BT_LE
4882 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4883                                struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4884 {
4885         struct mgmt_rp_set_exp_feature rp;
4886         bool val, changed = false;
4887         int err;
4888
4889         /* Command requires to use the non-controller index */
4890         if (hdev)
4891                 return mgmt_cmd_status(sk, hdev->id,
4892                                        MGMT_OP_SET_EXP_FEATURE,
4893                                        MGMT_STATUS_INVALID_INDEX);
4894
4895         /* Parameters are limited to a single octet */
4896         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4897                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4898                                        MGMT_OP_SET_EXP_FEATURE,
4899                                        MGMT_STATUS_INVALID_PARAMS);
4900
4901         /* Only boolean on/off is supported */
4902         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4903                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4904                                        MGMT_OP_SET_EXP_FEATURE,
4905                                        MGMT_STATUS_INVALID_PARAMS);
4906
4907         val = cp->param[0] ? true : false;
4908         if (val)
4909                 err = iso_init();
4910         else
4911                 err = iso_exit();
4912
4913         if (!err)
4914                 changed = true;
4915
4916         memcpy(rp.uuid, iso_socket_uuid, 16);
4917         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4918
4919         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4920
4921         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4922                                 MGMT_OP_SET_EXP_FEATURE, 0,
4923                                 &rp, sizeof(rp));
4924
4925         if (changed)
4926                 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4927
4928         return err;
4929 }
4930 #endif
4931
4932 static const struct mgmt_exp_feature {
4933         const u8 *uuid;
4934         int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4935                         struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4936 } exp_features[] = {
4937         EXP_FEAT(ZERO_KEY, set_zero_key_func),
4938 #ifdef CONFIG_BT_FEATURE_DEBUG
4939         EXP_FEAT(debug_uuid, set_debug_func),
4940 #endif
4941         EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4942         EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4943         EXP_FEAT(quality_report_uuid, set_quality_report_func),
4944         EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4945         EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4946 #ifdef CONFIG_BT_LE
4947         EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4948 #endif
4949
4950         /* end with a null feature */
4951         EXP_FEAT(NULL, NULL)
4952 };
4953
4954 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4955                            void *data, u16 data_len)
4956 {
4957         struct mgmt_cp_set_exp_feature *cp = data;
4958         size_t i = 0;
4959
4960         bt_dev_dbg(hdev, "sock %p", sk);
4961
4962         for (i = 0; exp_features[i].uuid; i++) {
4963                 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4964                         return exp_features[i].set_func(sk, hdev, cp, data_len);
4965         }
4966
4967         return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4968                                MGMT_OP_SET_EXP_FEATURE,
4969                                MGMT_STATUS_NOT_SUPPORTED);
4970 }
4971
4972 static u32 get_params_flags(struct hci_dev *hdev,
4973                             struct hci_conn_params *params)
4974 {
4975         u32 flags = hdev->conn_flags;
4976
4977         /* Devices using RPAs can only be programmed in the acceptlist if
4978          * LL Privacy has been enable otherwise they cannot mark
4979          * HCI_CONN_FLAG_REMOTE_WAKEUP.
4980          */
4981         if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4982             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
4983                 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4984
4985         return flags;
4986 }
4987
4988 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4989                             u16 data_len)
4990 {
4991         struct mgmt_cp_get_device_flags *cp = data;
4992         struct mgmt_rp_get_device_flags rp;
4993         struct bdaddr_list_with_flags *br_params;
4994         struct hci_conn_params *params;
4995         u32 supported_flags;
4996         u32 current_flags = 0;
4997         u8 status = MGMT_STATUS_INVALID_PARAMS;
4998
4999         bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5000                    &cp->addr.bdaddr, cp->addr.type);
5001
5002         hci_dev_lock(hdev);
5003
5004         supported_flags = hdev->conn_flags;
5005
5006         memset(&rp, 0, sizeof(rp));
5007
5008         if (cp->addr.type == BDADDR_BREDR) {
5009                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5010                                                               &cp->addr.bdaddr,
5011                                                               cp->addr.type);
5012                 if (!br_params)
5013                         goto done;
5014
5015                 current_flags = br_params->flags;
5016         } else {
5017                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5018                                                 le_addr_type(cp->addr.type));
5019                 if (!params)
5020                         goto done;
5021
5022                 supported_flags = get_params_flags(hdev, params);
5023                 current_flags = params->flags;
5024         }
5025
5026         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5027         rp.addr.type = cp->addr.type;
5028         rp.supported_flags = cpu_to_le32(supported_flags);
5029         rp.current_flags = cpu_to_le32(current_flags);
5030
5031         status = MGMT_STATUS_SUCCESS;
5032
5033 done:
5034         hci_dev_unlock(hdev);
5035
5036         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5037                                 &rp, sizeof(rp));
5038 }
5039
5040 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5041                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5042                                  u32 supported_flags, u32 current_flags)
5043 {
5044         struct mgmt_ev_device_flags_changed ev;
5045
5046         bacpy(&ev.addr.bdaddr, bdaddr);
5047         ev.addr.type = bdaddr_type;
5048         ev.supported_flags = cpu_to_le32(supported_flags);
5049         ev.current_flags = cpu_to_le32(current_flags);
5050
5051         mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5052 }
5053
5054 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5055                             u16 len)
5056 {
5057         struct mgmt_cp_set_device_flags *cp = data;
5058         struct bdaddr_list_with_flags *br_params;
5059         struct hci_conn_params *params;
5060         u8 status = MGMT_STATUS_INVALID_PARAMS;
5061         u32 supported_flags;
5062         u32 current_flags = __le32_to_cpu(cp->current_flags);
5063
5064         bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5065                    &cp->addr.bdaddr, cp->addr.type, current_flags);
5066
5067         // We should take hci_dev_lock() early, I think.. conn_flags can change
5068         supported_flags = hdev->conn_flags;
5069
5070         if ((supported_flags | current_flags) != supported_flags) {
5071                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5072                             current_flags, supported_flags);
5073                 goto done;
5074         }
5075
5076         hci_dev_lock(hdev);
5077
5078         if (cp->addr.type == BDADDR_BREDR) {
5079                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5080                                                               &cp->addr.bdaddr,
5081                                                               cp->addr.type);
5082
5083                 if (br_params) {
5084                         br_params->flags = current_flags;
5085                         status = MGMT_STATUS_SUCCESS;
5086                 } else {
5087                         bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5088                                     &cp->addr.bdaddr, cp->addr.type);
5089                 }
5090
5091                 goto unlock;
5092         }
5093
5094         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5095                                         le_addr_type(cp->addr.type));
5096         if (!params) {
5097                 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5098                             &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5099                 goto unlock;
5100         }
5101
5102         supported_flags = get_params_flags(hdev, params);
5103
5104         if ((supported_flags | current_flags) != supported_flags) {
5105                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5106                             current_flags, supported_flags);
5107                 goto unlock;
5108         }
5109
5110         WRITE_ONCE(params->flags, current_flags);
5111         status = MGMT_STATUS_SUCCESS;
5112
5113         /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5114          * has been set.
5115          */
5116         if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5117                 hci_update_passive_scan(hdev);
5118
5119 unlock:
5120         hci_dev_unlock(hdev);
5121
5122 done:
5123         if (status == MGMT_STATUS_SUCCESS)
5124                 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5125                                      supported_flags, current_flags);
5126
5127         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5128                                  &cp->addr, sizeof(cp->addr));
5129 }
5130
5131 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5132                                    u16 handle)
5133 {
5134         struct mgmt_ev_adv_monitor_added ev;
5135
5136         ev.monitor_handle = cpu_to_le16(handle);
5137
5138         mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5139 }
5140
5141 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5142 {
5143         struct mgmt_ev_adv_monitor_removed ev;
5144         struct mgmt_pending_cmd *cmd;
5145         struct sock *sk_skip = NULL;
5146         struct mgmt_cp_remove_adv_monitor *cp;
5147
5148         cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5149         if (cmd) {
5150                 cp = cmd->param;
5151
5152                 if (cp->monitor_handle)
5153                         sk_skip = cmd->sk;
5154         }
5155
5156         ev.monitor_handle = cpu_to_le16(handle);
5157
5158         mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5159 }
5160
5161 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5162                                  void *data, u16 len)
5163 {
5164         struct adv_monitor *monitor = NULL;
5165         struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5166         int handle, err;
5167         size_t rp_size = 0;
5168         __u32 supported = 0;
5169         __u32 enabled = 0;
5170         __u16 num_handles = 0;
5171         __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5172
5173         BT_DBG("request for %s", hdev->name);
5174
5175         hci_dev_lock(hdev);
5176
5177         if (msft_monitor_supported(hdev))
5178                 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5179
5180         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5181                 handles[num_handles++] = monitor->handle;
5182
5183         hci_dev_unlock(hdev);
5184
5185         rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5186         rp = kmalloc(rp_size, GFP_KERNEL);
5187         if (!rp)
5188                 return -ENOMEM;
5189
5190         /* All supported features are currently enabled */
5191         enabled = supported;
5192
5193         rp->supported_features = cpu_to_le32(supported);
5194         rp->enabled_features = cpu_to_le32(enabled);
5195         rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5196         rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5197         rp->num_handles = cpu_to_le16(num_handles);
5198         if (num_handles)
5199                 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5200
5201         err = mgmt_cmd_complete(sk, hdev->id,
5202                                 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5203                                 MGMT_STATUS_SUCCESS, rp, rp_size);
5204
5205         kfree(rp);
5206
5207         return err;
5208 }
5209
5210 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5211                                                    void *data, int status)
5212 {
5213         struct mgmt_rp_add_adv_patterns_monitor rp;
5214         struct mgmt_pending_cmd *cmd = data;
5215         struct adv_monitor *monitor = cmd->user_data;
5216
5217         hci_dev_lock(hdev);
5218
5219         rp.monitor_handle = cpu_to_le16(monitor->handle);
5220
5221         if (!status) {
5222                 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5223                 hdev->adv_monitors_cnt++;
5224                 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5225                         monitor->state = ADV_MONITOR_STATE_REGISTERED;
5226                 hci_update_passive_scan(hdev);
5227         }
5228
5229         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5230                           mgmt_status(status), &rp, sizeof(rp));
5231         mgmt_pending_remove(cmd);
5232
5233         hci_dev_unlock(hdev);
5234         bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5235                    rp.monitor_handle, status);
5236 }
5237
5238 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5239 {
5240         struct mgmt_pending_cmd *cmd = data;
5241         struct adv_monitor *monitor = cmd->user_data;
5242
5243         return hci_add_adv_monitor(hdev, monitor);
5244 }
5245
5246 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5247                                       struct adv_monitor *m, u8 status,
5248                                       void *data, u16 len, u16 op)
5249 {
5250         struct mgmt_pending_cmd *cmd;
5251         int err;
5252
5253         hci_dev_lock(hdev);
5254
5255         if (status)
5256                 goto unlock;
5257
5258         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5259             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5260             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5261             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5262                 status = MGMT_STATUS_BUSY;
5263                 goto unlock;
5264         }
5265
5266         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5267         if (!cmd) {
5268                 status = MGMT_STATUS_NO_RESOURCES;
5269                 goto unlock;
5270         }
5271
5272         cmd->user_data = m;
5273         err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5274                                  mgmt_add_adv_patterns_monitor_complete);
5275         if (err) {
5276                 if (err == -ENOMEM)
5277                         status = MGMT_STATUS_NO_RESOURCES;
5278                 else
5279                         status = MGMT_STATUS_FAILED;
5280
5281                 goto unlock;
5282         }
5283
5284         hci_dev_unlock(hdev);
5285
5286         return 0;
5287
5288 unlock:
5289         hci_free_adv_monitor(hdev, m);
5290         hci_dev_unlock(hdev);
5291         return mgmt_cmd_status(sk, hdev->id, op, status);
5292 }
5293
5294 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5295                                    struct mgmt_adv_rssi_thresholds *rssi)
5296 {
5297         if (rssi) {
5298                 m->rssi.low_threshold = rssi->low_threshold;
5299                 m->rssi.low_threshold_timeout =
5300                     __le16_to_cpu(rssi->low_threshold_timeout);
5301                 m->rssi.high_threshold = rssi->high_threshold;
5302                 m->rssi.high_threshold_timeout =
5303                     __le16_to_cpu(rssi->high_threshold_timeout);
5304                 m->rssi.sampling_period = rssi->sampling_period;
5305         } else {
5306                 /* Default values. These numbers are the least constricting
5307                  * parameters for MSFT API to work, so it behaves as if there
5308                  * are no rssi parameter to consider. May need to be changed
5309                  * if other API are to be supported.
5310                  */
5311                 m->rssi.low_threshold = -127;
5312                 m->rssi.low_threshold_timeout = 60;
5313                 m->rssi.high_threshold = -127;
5314                 m->rssi.high_threshold_timeout = 0;
5315                 m->rssi.sampling_period = 0;
5316         }
5317 }
5318
5319 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5320                                     struct mgmt_adv_pattern *patterns)
5321 {
5322         u8 offset = 0, length = 0;
5323         struct adv_pattern *p = NULL;
5324         int i;
5325
5326         for (i = 0; i < pattern_count; i++) {
5327                 offset = patterns[i].offset;
5328                 length = patterns[i].length;
5329                 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5330                     length > HCI_MAX_EXT_AD_LENGTH ||
5331                     (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5332                         return MGMT_STATUS_INVALID_PARAMS;
5333
5334                 p = kmalloc(sizeof(*p), GFP_KERNEL);
5335                 if (!p)
5336                         return MGMT_STATUS_NO_RESOURCES;
5337
5338                 p->ad_type = patterns[i].ad_type;
5339                 p->offset = patterns[i].offset;
5340                 p->length = patterns[i].length;
5341                 memcpy(p->value, patterns[i].value, p->length);
5342
5343                 INIT_LIST_HEAD(&p->list);
5344                 list_add(&p->list, &m->patterns);
5345         }
5346
5347         return MGMT_STATUS_SUCCESS;
5348 }
5349
5350 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5351                                     void *data, u16 len)
5352 {
5353         struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5354         struct adv_monitor *m = NULL;
5355         u8 status = MGMT_STATUS_SUCCESS;
5356         size_t expected_size = sizeof(*cp);
5357
5358         BT_DBG("request for %s", hdev->name);
5359
5360         if (len <= sizeof(*cp)) {
5361                 status = MGMT_STATUS_INVALID_PARAMS;
5362                 goto done;
5363         }
5364
5365         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5366         if (len != expected_size) {
5367                 status = MGMT_STATUS_INVALID_PARAMS;
5368                 goto done;
5369         }
5370
5371         m = kzalloc(sizeof(*m), GFP_KERNEL);
5372         if (!m) {
5373                 status = MGMT_STATUS_NO_RESOURCES;
5374                 goto done;
5375         }
5376
5377         INIT_LIST_HEAD(&m->patterns);
5378
5379         parse_adv_monitor_rssi(m, NULL);
5380         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5381
5382 done:
5383         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5384                                           MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5385 }
5386
5387 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5388                                          void *data, u16 len)
5389 {
5390         struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5391         struct adv_monitor *m = NULL;
5392         u8 status = MGMT_STATUS_SUCCESS;
5393         size_t expected_size = sizeof(*cp);
5394
5395         BT_DBG("request for %s", hdev->name);
5396
5397         if (len <= sizeof(*cp)) {
5398                 status = MGMT_STATUS_INVALID_PARAMS;
5399                 goto done;
5400         }
5401
5402         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5403         if (len != expected_size) {
5404                 status = MGMT_STATUS_INVALID_PARAMS;
5405                 goto done;
5406         }
5407
5408         m = kzalloc(sizeof(*m), GFP_KERNEL);
5409         if (!m) {
5410                 status = MGMT_STATUS_NO_RESOURCES;
5411                 goto done;
5412         }
5413
5414         INIT_LIST_HEAD(&m->patterns);
5415
5416         parse_adv_monitor_rssi(m, &cp->rssi);
5417         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5418
5419 done:
5420         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5421                                          MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5422 }
5423
5424 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5425                                              void *data, int status)
5426 {
5427         struct mgmt_rp_remove_adv_monitor rp;
5428         struct mgmt_pending_cmd *cmd = data;
5429         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5430
5431         hci_dev_lock(hdev);
5432
5433         rp.monitor_handle = cp->monitor_handle;
5434
5435         if (!status)
5436                 hci_update_passive_scan(hdev);
5437
5438         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5439                           mgmt_status(status), &rp, sizeof(rp));
5440         mgmt_pending_remove(cmd);
5441
5442         hci_dev_unlock(hdev);
5443         bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5444                    rp.monitor_handle, status);
5445 }
5446
5447 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5448 {
5449         struct mgmt_pending_cmd *cmd = data;
5450         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5451         u16 handle = __le16_to_cpu(cp->monitor_handle);
5452
5453         if (!handle)
5454                 return hci_remove_all_adv_monitor(hdev);
5455
5456         return hci_remove_single_adv_monitor(hdev, handle);
5457 }
5458
5459 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5460                               void *data, u16 len)
5461 {
5462         struct mgmt_pending_cmd *cmd;
5463         int err, status;
5464
5465         hci_dev_lock(hdev);
5466
5467         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5468             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5469             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5470             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5471                 status = MGMT_STATUS_BUSY;
5472                 goto unlock;
5473         }
5474
5475         cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5476         if (!cmd) {
5477                 status = MGMT_STATUS_NO_RESOURCES;
5478                 goto unlock;
5479         }
5480
5481         err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5482                                   mgmt_remove_adv_monitor_complete);
5483
5484         if (err) {
5485                 mgmt_pending_remove(cmd);
5486
5487                 if (err == -ENOMEM)
5488                         status = MGMT_STATUS_NO_RESOURCES;
5489                 else
5490                         status = MGMT_STATUS_FAILED;
5491
5492                 goto unlock;
5493         }
5494
5495         hci_dev_unlock(hdev);
5496
5497         return 0;
5498
5499 unlock:
5500         hci_dev_unlock(hdev);
5501         return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5502                                status);
5503 }
5504
5505 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5506 {
5507         struct mgmt_rp_read_local_oob_data mgmt_rp;
5508         size_t rp_size = sizeof(mgmt_rp);
5509         struct mgmt_pending_cmd *cmd = data;
5510         struct sk_buff *skb = cmd->skb;
5511         u8 status = mgmt_status(err);
5512
5513         if (!status) {
5514                 if (!skb)
5515                         status = MGMT_STATUS_FAILED;
5516                 else if (IS_ERR(skb))
5517                         status = mgmt_status(PTR_ERR(skb));
5518                 else
5519                         status = mgmt_status(skb->data[0]);
5520         }
5521
5522         bt_dev_dbg(hdev, "status %d", status);
5523
5524         if (status) {
5525                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5526                 goto remove;
5527         }
5528
5529         memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5530
5531         if (!bredr_sc_enabled(hdev)) {
5532                 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5533
5534                 if (skb->len < sizeof(*rp)) {
5535                         mgmt_cmd_status(cmd->sk, hdev->id,
5536                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5537                                         MGMT_STATUS_FAILED);
5538                         goto remove;
5539                 }
5540
5541                 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5542                 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5543
5544                 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5545         } else {
5546                 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5547
5548                 if (skb->len < sizeof(*rp)) {
5549                         mgmt_cmd_status(cmd->sk, hdev->id,
5550                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5551                                         MGMT_STATUS_FAILED);
5552                         goto remove;
5553                 }
5554
5555                 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5556                 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5557
5558                 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5559                 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5560         }
5561
5562         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5563                           MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5564
5565 remove:
5566         if (skb && !IS_ERR(skb))
5567                 kfree_skb(skb);
5568
5569         mgmt_pending_free(cmd);
5570 }
5571
5572 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5573 {
5574         struct mgmt_pending_cmd *cmd = data;
5575
5576         if (bredr_sc_enabled(hdev))
5577                 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5578         else
5579                 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5580
5581         if (IS_ERR(cmd->skb))
5582                 return PTR_ERR(cmd->skb);
5583         else
5584                 return 0;
5585 }
5586
5587 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5588                                void *data, u16 data_len)
5589 {
5590         struct mgmt_pending_cmd *cmd;
5591         int err;
5592
5593         bt_dev_dbg(hdev, "sock %p", sk);
5594
5595         hci_dev_lock(hdev);
5596
5597         if (!hdev_is_powered(hdev)) {
5598                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5599                                       MGMT_STATUS_NOT_POWERED);
5600                 goto unlock;
5601         }
5602
5603         if (!lmp_ssp_capable(hdev)) {
5604                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5605                                       MGMT_STATUS_NOT_SUPPORTED);
5606                 goto unlock;
5607         }
5608
5609         cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5610         if (!cmd)
5611                 err = -ENOMEM;
5612         else
5613                 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5614                                          read_local_oob_data_complete);
5615
5616         if (err < 0) {
5617                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5618                                       MGMT_STATUS_FAILED);
5619
5620                 if (cmd)
5621                         mgmt_pending_free(cmd);
5622         }
5623
5624 unlock:
5625         hci_dev_unlock(hdev);
5626         return err;
5627 }
5628
5629 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5630                                void *data, u16 len)
5631 {
5632         struct mgmt_addr_info *addr = data;
5633         int err;
5634
5635         bt_dev_dbg(hdev, "sock %p", sk);
5636
5637         if (!bdaddr_type_is_valid(addr->type))
5638                 return mgmt_cmd_complete(sk, hdev->id,
5639                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
5640                                          MGMT_STATUS_INVALID_PARAMS,
5641                                          addr, sizeof(*addr));
5642
5643         hci_dev_lock(hdev);
5644
5645         if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5646                 struct mgmt_cp_add_remote_oob_data *cp = data;
5647                 u8 status;
5648
5649                 if (cp->addr.type != BDADDR_BREDR) {
5650                         err = mgmt_cmd_complete(sk, hdev->id,
5651                                                 MGMT_OP_ADD_REMOTE_OOB_DATA,
5652                                                 MGMT_STATUS_INVALID_PARAMS,
5653                                                 &cp->addr, sizeof(cp->addr));
5654                         goto unlock;
5655                 }
5656
5657                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5658                                               cp->addr.type, cp->hash,
5659                                               cp->rand, NULL, NULL);
5660                 if (err < 0)
5661                         status = MGMT_STATUS_FAILED;
5662                 else
5663                         status = MGMT_STATUS_SUCCESS;
5664
5665                 err = mgmt_cmd_complete(sk, hdev->id,
5666                                         MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5667                                         &cp->addr, sizeof(cp->addr));
5668         } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5669                 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5670                 u8 *rand192, *hash192, *rand256, *hash256;
5671                 u8 status;
5672
5673                 if (bdaddr_type_is_le(cp->addr.type)) {
5674                         /* Enforce zero-valued 192-bit parameters as
5675                          * long as legacy SMP OOB isn't implemented.
5676                          */
5677                         if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5678                             memcmp(cp->hash192, ZERO_KEY, 16)) {
5679                                 err = mgmt_cmd_complete(sk, hdev->id,
5680                                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5681                                                         MGMT_STATUS_INVALID_PARAMS,
5682                                                         addr, sizeof(*addr));
5683                                 goto unlock;
5684                         }
5685
5686                         rand192 = NULL;
5687                         hash192 = NULL;
5688                 } else {
5689                         /* In case one of the P-192 values is set to zero,
5690                          * then just disable OOB data for P-192.
5691                          */
5692                         if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5693                             !memcmp(cp->hash192, ZERO_KEY, 16)) {
5694                                 rand192 = NULL;
5695                                 hash192 = NULL;
5696                         } else {
5697                                 rand192 = cp->rand192;
5698                                 hash192 = cp->hash192;
5699                         }
5700                 }
5701
5702                 /* In case one of the P-256 values is set to zero, then just
5703                  * disable OOB data for P-256.
5704                  */
5705                 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5706                     !memcmp(cp->hash256, ZERO_KEY, 16)) {
5707                         rand256 = NULL;
5708                         hash256 = NULL;
5709                 } else {
5710                         rand256 = cp->rand256;
5711                         hash256 = cp->hash256;
5712                 }
5713
5714                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5715                                               cp->addr.type, hash192, rand192,
5716                                               hash256, rand256);
5717                 if (err < 0)
5718                         status = MGMT_STATUS_FAILED;
5719                 else
5720                         status = MGMT_STATUS_SUCCESS;
5721
5722                 err = mgmt_cmd_complete(sk, hdev->id,
5723                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5724                                         status, &cp->addr, sizeof(cp->addr));
5725         } else {
5726                 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5727                            len);
5728                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5729                                       MGMT_STATUS_INVALID_PARAMS);
5730         }
5731
5732 unlock:
5733         hci_dev_unlock(hdev);
5734         return err;
5735 }
5736
5737 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5738                                   void *data, u16 len)
5739 {
5740         struct mgmt_cp_remove_remote_oob_data *cp = data;
5741         u8 status;
5742         int err;
5743
5744         bt_dev_dbg(hdev, "sock %p", sk);
5745
5746         if (cp->addr.type != BDADDR_BREDR)
5747                 return mgmt_cmd_complete(sk, hdev->id,
5748                                          MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5749                                          MGMT_STATUS_INVALID_PARAMS,
5750                                          &cp->addr, sizeof(cp->addr));
5751
5752         hci_dev_lock(hdev);
5753
5754         if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5755                 hci_remote_oob_data_clear(hdev);
5756                 status = MGMT_STATUS_SUCCESS;
5757                 goto done;
5758         }
5759
5760         err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5761         if (err < 0)
5762                 status = MGMT_STATUS_INVALID_PARAMS;
5763         else
5764                 status = MGMT_STATUS_SUCCESS;
5765
5766 done:
5767         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5768                                 status, &cp->addr, sizeof(cp->addr));
5769
5770         hci_dev_unlock(hdev);
5771         return err;
5772 }
5773
5774 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5775 {
5776         struct mgmt_pending_cmd *cmd;
5777
5778         bt_dev_dbg(hdev, "status %u", status);
5779
5780         hci_dev_lock(hdev);
5781
5782         cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5783         if (!cmd)
5784                 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5785
5786         if (!cmd)
5787                 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5788
5789         if (cmd) {
5790                 cmd->cmd_complete(cmd, mgmt_status(status));
5791                 mgmt_pending_remove(cmd);
5792         }
5793
5794         hci_dev_unlock(hdev);
5795 }
5796
5797 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5798                                     uint8_t *mgmt_status)
5799 {
5800         switch (type) {
5801         case DISCOV_TYPE_LE:
5802                 *mgmt_status = mgmt_le_support(hdev);
5803                 if (*mgmt_status)
5804                         return false;
5805                 break;
5806         case DISCOV_TYPE_INTERLEAVED:
5807                 *mgmt_status = mgmt_le_support(hdev);
5808                 if (*mgmt_status)
5809                         return false;
5810                 fallthrough;
5811         case DISCOV_TYPE_BREDR:
5812                 *mgmt_status = mgmt_bredr_support(hdev);
5813                 if (*mgmt_status)
5814                         return false;
5815                 break;
5816         default:
5817                 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5818                 return false;
5819         }
5820
5821         return true;
5822 }
5823
5824 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5825 {
5826         struct mgmt_pending_cmd *cmd = data;
5827
5828         if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5829             cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5830             cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5831                 return;
5832
5833         bt_dev_dbg(hdev, "err %d", err);
5834
5835         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5836                           cmd->param, 1);
5837         mgmt_pending_remove(cmd);
5838
5839         hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5840                                 DISCOVERY_FINDING);
5841 }
5842
5843 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5844 {
5845         return hci_start_discovery_sync(hdev);
5846 }
5847
5848 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5849                                     u16 op, void *data, u16 len)
5850 {
5851         struct mgmt_cp_start_discovery *cp = data;
5852         struct mgmt_pending_cmd *cmd;
5853         u8 status;
5854         int err;
5855
5856         bt_dev_dbg(hdev, "sock %p", sk);
5857
5858         hci_dev_lock(hdev);
5859
5860         if (!hdev_is_powered(hdev)) {
5861                 err = mgmt_cmd_complete(sk, hdev->id, op,
5862                                         MGMT_STATUS_NOT_POWERED,
5863                                         &cp->type, sizeof(cp->type));
5864                 goto failed;
5865         }
5866
5867         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5868             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5869                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5870                                         &cp->type, sizeof(cp->type));
5871                 goto failed;
5872         }
5873
5874         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5875                 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5876                                         &cp->type, sizeof(cp->type));
5877                 goto failed;
5878         }
5879
5880         /* Can't start discovery when it is paused */
5881         if (hdev->discovery_paused) {
5882                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5883                                         &cp->type, sizeof(cp->type));
5884                 goto failed;
5885         }
5886
5887         /* Clear the discovery filter first to free any previously
5888          * allocated memory for the UUID list.
5889          */
5890         hci_discovery_filter_clear(hdev);
5891
5892         hdev->discovery.type = cp->type;
5893         hdev->discovery.report_invalid_rssi = false;
5894         if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5895                 hdev->discovery.limited = true;
5896         else
5897                 hdev->discovery.limited = false;
5898
5899         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5900         if (!cmd) {
5901                 err = -ENOMEM;
5902                 goto failed;
5903         }
5904
5905         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5906                                  start_discovery_complete);
5907         if (err < 0) {
5908                 mgmt_pending_remove(cmd);
5909                 goto failed;
5910         }
5911
5912         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5913
5914 failed:
5915         hci_dev_unlock(hdev);
5916         return err;
5917 }
5918
5919 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5920                            void *data, u16 len)
5921 {
5922         return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5923                                         data, len);
5924 }
5925
5926 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5927                                    void *data, u16 len)
5928 {
5929         return start_discovery_internal(sk, hdev,
5930                                         MGMT_OP_START_LIMITED_DISCOVERY,
5931                                         data, len);
5932 }
5933
5934 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5935                                    void *data, u16 len)
5936 {
5937         struct mgmt_cp_start_service_discovery *cp = data;
5938         struct mgmt_pending_cmd *cmd;
5939         const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5940         u16 uuid_count, expected_len;
5941         u8 status;
5942         int err;
5943
5944         bt_dev_dbg(hdev, "sock %p", sk);
5945
5946         hci_dev_lock(hdev);
5947
5948         if (!hdev_is_powered(hdev)) {
5949                 err = mgmt_cmd_complete(sk, hdev->id,
5950                                         MGMT_OP_START_SERVICE_DISCOVERY,
5951                                         MGMT_STATUS_NOT_POWERED,
5952                                         &cp->type, sizeof(cp->type));
5953                 goto failed;
5954         }
5955
5956         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5957             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5958                 err = mgmt_cmd_complete(sk, hdev->id,
5959                                         MGMT_OP_START_SERVICE_DISCOVERY,
5960                                         MGMT_STATUS_BUSY, &cp->type,
5961                                         sizeof(cp->type));
5962                 goto failed;
5963         }
5964
5965         if (hdev->discovery_paused) {
5966                 err = mgmt_cmd_complete(sk, hdev->id,
5967                                         MGMT_OP_START_SERVICE_DISCOVERY,
5968                                         MGMT_STATUS_BUSY, &cp->type,
5969                                         sizeof(cp->type));
5970                 goto failed;
5971         }
5972
5973         uuid_count = __le16_to_cpu(cp->uuid_count);
5974         if (uuid_count > max_uuid_count) {
5975                 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5976                            uuid_count);
5977                 err = mgmt_cmd_complete(sk, hdev->id,
5978                                         MGMT_OP_START_SERVICE_DISCOVERY,
5979                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
5980                                         sizeof(cp->type));
5981                 goto failed;
5982         }
5983
5984         expected_len = sizeof(*cp) + uuid_count * 16;
5985         if (expected_len != len) {
5986                 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5987                            expected_len, len);
5988                 err = mgmt_cmd_complete(sk, hdev->id,
5989                                         MGMT_OP_START_SERVICE_DISCOVERY,
5990                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
5991                                         sizeof(cp->type));
5992                 goto failed;
5993         }
5994
5995         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5996                 err = mgmt_cmd_complete(sk, hdev->id,
5997                                         MGMT_OP_START_SERVICE_DISCOVERY,
5998                                         status, &cp->type, sizeof(cp->type));
5999                 goto failed;
6000         }
6001
6002         cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6003                                hdev, data, len);
6004         if (!cmd) {
6005                 err = -ENOMEM;
6006                 goto failed;
6007         }
6008
6009         /* Clear the discovery filter first to free any previously
6010          * allocated memory for the UUID list.
6011          */
6012         hci_discovery_filter_clear(hdev);
6013
6014         hdev->discovery.result_filtering = true;
6015         hdev->discovery.type = cp->type;
6016         hdev->discovery.rssi = cp->rssi;
6017         hdev->discovery.uuid_count = uuid_count;
6018
6019         if (uuid_count > 0) {
6020                 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6021                                                 GFP_KERNEL);
6022                 if (!hdev->discovery.uuids) {
6023                         err = mgmt_cmd_complete(sk, hdev->id,
6024                                                 MGMT_OP_START_SERVICE_DISCOVERY,
6025                                                 MGMT_STATUS_FAILED,
6026                                                 &cp->type, sizeof(cp->type));
6027                         mgmt_pending_remove(cmd);
6028                         goto failed;
6029                 }
6030         }
6031
6032         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6033                                  start_discovery_complete);
6034         if (err < 0) {
6035                 mgmt_pending_remove(cmd);
6036                 goto failed;
6037         }
6038
6039         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6040
6041 failed:
6042         hci_dev_unlock(hdev);
6043         return err;
6044 }
6045
6046 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6047 {
6048         struct mgmt_pending_cmd *cmd;
6049
6050         bt_dev_dbg(hdev, "status %u", status);
6051
6052         hci_dev_lock(hdev);
6053
6054         cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6055         if (cmd) {
6056                 cmd->cmd_complete(cmd, mgmt_status(status));
6057                 mgmt_pending_remove(cmd);
6058         }
6059
6060         hci_dev_unlock(hdev);
6061 }
6062
6063 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6064 {
6065         struct mgmt_pending_cmd *cmd = data;
6066
6067         if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6068                 return;
6069
6070         bt_dev_dbg(hdev, "err %d", err);
6071
6072         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6073                           cmd->param, 1);
6074         mgmt_pending_remove(cmd);
6075
6076         if (!err)
6077                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6078 }
6079
6080 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6081 {
6082         return hci_stop_discovery_sync(hdev);
6083 }
6084
6085 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6086                           u16 len)
6087 {
6088         struct mgmt_cp_stop_discovery *mgmt_cp = data;
6089         struct mgmt_pending_cmd *cmd;
6090         int err;
6091
6092         bt_dev_dbg(hdev, "sock %p", sk);
6093
6094         hci_dev_lock(hdev);
6095
6096         if (!hci_discovery_active(hdev)) {
6097                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6098                                         MGMT_STATUS_REJECTED, &mgmt_cp->type,
6099                                         sizeof(mgmt_cp->type));
6100                 goto unlock;
6101         }
6102
6103         if (hdev->discovery.type != mgmt_cp->type) {
6104                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6105                                         MGMT_STATUS_INVALID_PARAMS,
6106                                         &mgmt_cp->type, sizeof(mgmt_cp->type));
6107                 goto unlock;
6108         }
6109
6110         cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6111         if (!cmd) {
6112                 err = -ENOMEM;
6113                 goto unlock;
6114         }
6115
6116         err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6117                                  stop_discovery_complete);
6118         if (err < 0) {
6119                 mgmt_pending_remove(cmd);
6120                 goto unlock;
6121         }
6122
6123         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6124
6125 unlock:
6126         hci_dev_unlock(hdev);
6127         return err;
6128 }
6129
6130 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6131                         u16 len)
6132 {
6133         struct mgmt_cp_confirm_name *cp = data;
6134         struct inquiry_entry *e;
6135         int err;
6136
6137         bt_dev_dbg(hdev, "sock %p", sk);
6138
6139         hci_dev_lock(hdev);
6140
6141         if (!hci_discovery_active(hdev)) {
6142                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6143                                         MGMT_STATUS_FAILED, &cp->addr,
6144                                         sizeof(cp->addr));
6145                 goto failed;
6146         }
6147
6148         e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6149         if (!e) {
6150                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6151                                         MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6152                                         sizeof(cp->addr));
6153                 goto failed;
6154         }
6155
6156         if (cp->name_known) {
6157                 e->name_state = NAME_KNOWN;
6158                 list_del(&e->list);
6159         } else {
6160                 e->name_state = NAME_NEEDED;
6161                 hci_inquiry_cache_update_resolve(hdev, e);
6162         }
6163
6164         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6165                                 &cp->addr, sizeof(cp->addr));
6166
6167 failed:
6168         hci_dev_unlock(hdev);
6169         return err;
6170 }
6171
6172 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6173                         u16 len)
6174 {
6175         struct mgmt_cp_block_device *cp = data;
6176         u8 status;
6177         int err;
6178
6179         bt_dev_dbg(hdev, "sock %p", sk);
6180
6181         if (!bdaddr_type_is_valid(cp->addr.type))
6182                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6183                                          MGMT_STATUS_INVALID_PARAMS,
6184                                          &cp->addr, sizeof(cp->addr));
6185
6186         hci_dev_lock(hdev);
6187
6188         err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6189                                   cp->addr.type);
6190         if (err < 0) {
6191                 status = MGMT_STATUS_FAILED;
6192                 goto done;
6193         }
6194
6195         mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6196                    sk);
6197         status = MGMT_STATUS_SUCCESS;
6198
6199 done:
6200         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6201                                 &cp->addr, sizeof(cp->addr));
6202
6203         hci_dev_unlock(hdev);
6204
6205         return err;
6206 }
6207
6208 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6209                           u16 len)
6210 {
6211         struct mgmt_cp_unblock_device *cp = data;
6212         u8 status;
6213         int err;
6214
6215         bt_dev_dbg(hdev, "sock %p", sk);
6216
6217         if (!bdaddr_type_is_valid(cp->addr.type))
6218                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6219                                          MGMT_STATUS_INVALID_PARAMS,
6220                                          &cp->addr, sizeof(cp->addr));
6221
6222         hci_dev_lock(hdev);
6223
6224         err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6225                                   cp->addr.type);
6226         if (err < 0) {
6227                 status = MGMT_STATUS_INVALID_PARAMS;
6228                 goto done;
6229         }
6230
6231         mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6232                    sk);
6233         status = MGMT_STATUS_SUCCESS;
6234
6235 done:
6236         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6237                                 &cp->addr, sizeof(cp->addr));
6238
6239         hci_dev_unlock(hdev);
6240
6241         return err;
6242 }
6243
6244 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6245 {
6246         return hci_update_eir_sync(hdev);
6247 }
6248
6249 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6250                          u16 len)
6251 {
6252         struct mgmt_cp_set_device_id *cp = data;
6253         int err;
6254         __u16 source;
6255
6256         bt_dev_dbg(hdev, "sock %p", sk);
6257
6258         source = __le16_to_cpu(cp->source);
6259
6260         if (source > 0x0002)
6261                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6262                                        MGMT_STATUS_INVALID_PARAMS);
6263
6264         hci_dev_lock(hdev);
6265
6266         hdev->devid_source = source;
6267         hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6268         hdev->devid_product = __le16_to_cpu(cp->product);
6269         hdev->devid_version = __le16_to_cpu(cp->version);
6270
6271         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6272                                 NULL, 0);
6273
6274         hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6275
6276         hci_dev_unlock(hdev);
6277
6278         return err;
6279 }
6280
6281 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6282 {
6283         if (err)
6284                 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6285         else
6286                 bt_dev_dbg(hdev, "status %d", err);
6287 }
6288
6289 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6290 {
6291         struct cmd_lookup match = { NULL, hdev };
6292         u8 instance;
6293         struct adv_info *adv_instance;
6294         u8 status = mgmt_status(err);
6295
6296         if (status) {
6297                 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6298                                      cmd_status_rsp, &status);
6299                 return;
6300         }
6301
6302         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6303                 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6304         else
6305                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6306
6307         mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6308                              &match);
6309
6310         new_settings(hdev, match.sk);
6311
6312         if (match.sk)
6313                 sock_put(match.sk);
6314
6315         /* If "Set Advertising" was just disabled and instance advertising was
6316          * set up earlier, then re-enable multi-instance advertising.
6317          */
6318         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6319             list_empty(&hdev->adv_instances))
6320                 return;
6321
6322         instance = hdev->cur_adv_instance;
6323         if (!instance) {
6324                 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6325                                                         struct adv_info, list);
6326                 if (!adv_instance)
6327                         return;
6328
6329                 instance = adv_instance->instance;
6330         }
6331
6332         err = hci_schedule_adv_instance_sync(hdev, instance, true);
6333
6334         enable_advertising_instance(hdev, err);
6335 }
6336
6337 static int set_adv_sync(struct hci_dev *hdev, void *data)
6338 {
6339         struct mgmt_pending_cmd *cmd = data;
6340         struct mgmt_mode *cp = cmd->param;
6341         u8 val = !!cp->val;
6342
6343         if (cp->val == 0x02)
6344                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6345         else
6346                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6347
6348         cancel_adv_timeout(hdev);
6349
6350         if (val) {
6351                 /* Switch to instance "0" for the Set Advertising setting.
6352                  * We cannot use update_[adv|scan_rsp]_data() here as the
6353                  * HCI_ADVERTISING flag is not yet set.
6354                  */
6355                 hdev->cur_adv_instance = 0x00;
6356
6357                 if (ext_adv_capable(hdev)) {
6358                         hci_start_ext_adv_sync(hdev, 0x00);
6359                 } else {
6360                         hci_update_adv_data_sync(hdev, 0x00);
6361                         hci_update_scan_rsp_data_sync(hdev, 0x00);
6362                         hci_enable_advertising_sync(hdev);
6363                 }
6364         } else {
6365                 hci_disable_advertising_sync(hdev);
6366         }
6367
6368         return 0;
6369 }
6370
6371 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6372                            u16 len)
6373 {
6374         struct mgmt_mode *cp = data;
6375         struct mgmt_pending_cmd *cmd;
6376         u8 val, status;
6377         int err;
6378
6379         bt_dev_dbg(hdev, "sock %p", sk);
6380
6381         status = mgmt_le_support(hdev);
6382         if (status)
6383                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6384                                        status);
6385
6386         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6387                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6388                                        MGMT_STATUS_INVALID_PARAMS);
6389
6390         if (hdev->advertising_paused)
6391                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6392                                        MGMT_STATUS_BUSY);
6393
6394         hci_dev_lock(hdev);
6395
6396         val = !!cp->val;
6397
6398         /* The following conditions are ones which mean that we should
6399          * not do any HCI communication but directly send a mgmt
6400          * response to user space (after toggling the flag if
6401          * necessary).
6402          */
6403         if (!hdev_is_powered(hdev) ||
6404             (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6405              (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6406             hci_dev_test_flag(hdev, HCI_MESH) ||
6407             hci_conn_num(hdev, LE_LINK) > 0 ||
6408             (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6409              hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6410                 bool changed;
6411
6412                 if (cp->val) {
6413                         hdev->cur_adv_instance = 0x00;
6414                         changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6415                         if (cp->val == 0x02)
6416                                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6417                         else
6418                                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6419                 } else {
6420                         changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6421                         hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6422                 }
6423
6424                 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6425                 if (err < 0)
6426                         goto unlock;
6427
6428                 if (changed)
6429                         err = new_settings(hdev, sk);
6430
6431                 goto unlock;
6432         }
6433
6434         if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6435             pending_find(MGMT_OP_SET_LE, hdev)) {
6436                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6437                                       MGMT_STATUS_BUSY);
6438                 goto unlock;
6439         }
6440
6441         cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6442         if (!cmd)
6443                 err = -ENOMEM;
6444         else
6445                 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6446                                          set_advertising_complete);
6447
6448         if (err < 0 && cmd)
6449                 mgmt_pending_remove(cmd);
6450
6451 unlock:
6452         hci_dev_unlock(hdev);
6453         return err;
6454 }
6455
6456 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6457                               void *data, u16 len)
6458 {
6459         struct mgmt_cp_set_static_address *cp = data;
6460         int err;
6461
6462         bt_dev_dbg(hdev, "sock %p", sk);
6463
6464         if (!lmp_le_capable(hdev))
6465                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6466                                        MGMT_STATUS_NOT_SUPPORTED);
6467
6468         if (hdev_is_powered(hdev))
6469                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6470                                        MGMT_STATUS_REJECTED);
6471
6472         if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6473                 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6474                         return mgmt_cmd_status(sk, hdev->id,
6475                                                MGMT_OP_SET_STATIC_ADDRESS,
6476                                                MGMT_STATUS_INVALID_PARAMS);
6477
6478                 /* Two most significant bits shall be set */
6479                 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6480                         return mgmt_cmd_status(sk, hdev->id,
6481                                                MGMT_OP_SET_STATIC_ADDRESS,
6482                                                MGMT_STATUS_INVALID_PARAMS);
6483         }
6484
6485         hci_dev_lock(hdev);
6486
6487         bacpy(&hdev->static_addr, &cp->bdaddr);
6488
6489         err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6490         if (err < 0)
6491                 goto unlock;
6492
6493         err = new_settings(hdev, sk);
6494
6495 unlock:
6496         hci_dev_unlock(hdev);
6497         return err;
6498 }
6499
6500 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6501                            void *data, u16 len)
6502 {
6503         struct mgmt_cp_set_scan_params *cp = data;
6504         __u16 interval, window;
6505         int err;
6506
6507         bt_dev_dbg(hdev, "sock %p", sk);
6508
6509         if (!lmp_le_capable(hdev))
6510                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6511                                        MGMT_STATUS_NOT_SUPPORTED);
6512
6513         interval = __le16_to_cpu(cp->interval);
6514
6515         if (interval < 0x0004 || interval > 0x4000)
6516                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6517                                        MGMT_STATUS_INVALID_PARAMS);
6518
6519         window = __le16_to_cpu(cp->window);
6520
6521         if (window < 0x0004 || window > 0x4000)
6522                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6523                                        MGMT_STATUS_INVALID_PARAMS);
6524
6525         if (window > interval)
6526                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6527                                        MGMT_STATUS_INVALID_PARAMS);
6528
6529         hci_dev_lock(hdev);
6530
6531         hdev->le_scan_interval = interval;
6532         hdev->le_scan_window = window;
6533
6534         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6535                                 NULL, 0);
6536
6537         /* If background scan is running, restart it so new parameters are
6538          * loaded.
6539          */
6540         if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6541             hdev->discovery.state == DISCOVERY_STOPPED)
6542                 hci_update_passive_scan(hdev);
6543
6544         hci_dev_unlock(hdev);
6545
6546         return err;
6547 }
6548
6549 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6550 {
6551         struct mgmt_pending_cmd *cmd = data;
6552
6553         bt_dev_dbg(hdev, "err %d", err);
6554
6555         if (err) {
6556                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6557                                 mgmt_status(err));
6558         } else {
6559                 struct mgmt_mode *cp = cmd->param;
6560
6561                 if (cp->val)
6562                         hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6563                 else
6564                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6565
6566                 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6567                 new_settings(hdev, cmd->sk);
6568         }
6569
6570         mgmt_pending_free(cmd);
6571 }
6572
6573 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6574 {
6575         struct mgmt_pending_cmd *cmd = data;
6576         struct mgmt_mode *cp = cmd->param;
6577
6578         return hci_write_fast_connectable_sync(hdev, cp->val);
6579 }
6580
6581 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6582                                 void *data, u16 len)
6583 {
6584         struct mgmt_mode *cp = data;
6585         struct mgmt_pending_cmd *cmd;
6586         int err;
6587
6588         bt_dev_dbg(hdev, "sock %p", sk);
6589
6590         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6591             hdev->hci_ver < BLUETOOTH_VER_1_2)
6592                 return mgmt_cmd_status(sk, hdev->id,
6593                                        MGMT_OP_SET_FAST_CONNECTABLE,
6594                                        MGMT_STATUS_NOT_SUPPORTED);
6595
6596         if (cp->val != 0x00 && cp->val != 0x01)
6597                 return mgmt_cmd_status(sk, hdev->id,
6598                                        MGMT_OP_SET_FAST_CONNECTABLE,
6599                                        MGMT_STATUS_INVALID_PARAMS);
6600
6601         hci_dev_lock(hdev);
6602
6603         if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6604                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6605                 goto unlock;
6606         }
6607
6608         if (!hdev_is_powered(hdev)) {
6609                 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6610                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6611                 new_settings(hdev, sk);
6612                 goto unlock;
6613         }
6614
6615         cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6616                                len);
6617         if (!cmd)
6618                 err = -ENOMEM;
6619         else
6620                 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6621                                          fast_connectable_complete);
6622
6623         if (err < 0) {
6624                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6625                                 MGMT_STATUS_FAILED);
6626
6627                 if (cmd)
6628                         mgmt_pending_free(cmd);
6629         }
6630
6631 unlock:
6632         hci_dev_unlock(hdev);
6633
6634         return err;
6635 }
6636
6637 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6638 {
6639         struct mgmt_pending_cmd *cmd = data;
6640
6641         bt_dev_dbg(hdev, "err %d", err);
6642
6643         if (err) {
6644                 u8 mgmt_err = mgmt_status(err);
6645
6646                 /* We need to restore the flag if related HCI commands
6647                  * failed.
6648                  */
6649                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6650
6651                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6652         } else {
6653                 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6654                 new_settings(hdev, cmd->sk);
6655         }
6656
6657         mgmt_pending_free(cmd);
6658 }
6659
6660 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6661 {
6662         int status;
6663
6664         status = hci_write_fast_connectable_sync(hdev, false);
6665
6666         if (!status)
6667                 status = hci_update_scan_sync(hdev);
6668
6669         /* Since only the advertising data flags will change, there
6670          * is no need to update the scan response data.
6671          */
6672         if (!status)
6673                 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6674
6675         return status;
6676 }
6677
6678 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6679 {
6680         struct mgmt_mode *cp = data;
6681         struct mgmt_pending_cmd *cmd;
6682         int err;
6683
6684         bt_dev_dbg(hdev, "sock %p", sk);
6685
6686         if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6687                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6688                                        MGMT_STATUS_NOT_SUPPORTED);
6689
6690         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6691                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6692                                        MGMT_STATUS_REJECTED);
6693
6694         if (cp->val != 0x00 && cp->val != 0x01)
6695                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6696                                        MGMT_STATUS_INVALID_PARAMS);
6697
6698         hci_dev_lock(hdev);
6699
6700         if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6701                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6702                 goto unlock;
6703         }
6704
6705         if (!hdev_is_powered(hdev)) {
6706                 if (!cp->val) {
6707                         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6708                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6709                         hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6710                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6711                 }
6712
6713                 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6714
6715                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6716                 if (err < 0)
6717                         goto unlock;
6718
6719                 err = new_settings(hdev, sk);
6720                 goto unlock;
6721         }
6722
6723         /* Reject disabling when powered on */
6724         if (!cp->val) {
6725                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6726                                       MGMT_STATUS_REJECTED);
6727                 goto unlock;
6728         } else {
6729                 /* When configuring a dual-mode controller to operate
6730                  * with LE only and using a static address, then switching
6731                  * BR/EDR back on is not allowed.
6732                  *
6733                  * Dual-mode controllers shall operate with the public
6734                  * address as its identity address for BR/EDR and LE. So
6735                  * reject the attempt to create an invalid configuration.
6736                  *
6737                  * The same restrictions applies when secure connections
6738                  * has been enabled. For BR/EDR this is a controller feature
6739                  * while for LE it is a host stack feature. This means that
6740                  * switching BR/EDR back on when secure connections has been
6741                  * enabled is not a supported transaction.
6742                  */
6743                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6744                     (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6745                      hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6746                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6747                                               MGMT_STATUS_REJECTED);
6748                         goto unlock;
6749                 }
6750         }
6751
6752         cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6753         if (!cmd)
6754                 err = -ENOMEM;
6755         else
6756                 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6757                                          set_bredr_complete);
6758
6759         if (err < 0) {
6760                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6761                                 MGMT_STATUS_FAILED);
6762                 if (cmd)
6763                         mgmt_pending_free(cmd);
6764
6765                 goto unlock;
6766         }
6767
6768         /* We need to flip the bit already here so that
6769          * hci_req_update_adv_data generates the correct flags.
6770          */
6771         hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6772
6773 unlock:
6774         hci_dev_unlock(hdev);
6775         return err;
6776 }
6777
6778 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6779 {
6780         struct mgmt_pending_cmd *cmd = data;
6781         struct mgmt_mode *cp;
6782
6783         bt_dev_dbg(hdev, "err %d", err);
6784
6785         if (err) {
6786                 u8 mgmt_err = mgmt_status(err);
6787
6788                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6789                 goto done;
6790         }
6791
6792         cp = cmd->param;
6793
6794         switch (cp->val) {
6795         case 0x00:
6796                 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6797                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6798                 break;
6799         case 0x01:
6800                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6801                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6802                 break;
6803         case 0x02:
6804                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6805                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6806                 break;
6807         }
6808
6809         send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6810         new_settings(hdev, cmd->sk);
6811
6812 done:
6813         mgmt_pending_free(cmd);
6814 }
6815
6816 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6817 {
6818         struct mgmt_pending_cmd *cmd = data;
6819         struct mgmt_mode *cp = cmd->param;
6820         u8 val = !!cp->val;
6821
6822         /* Force write of val */
6823         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6824
6825         return hci_write_sc_support_sync(hdev, val);
6826 }
6827
6828 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6829                            void *data, u16 len)
6830 {
6831         struct mgmt_mode *cp = data;
6832         struct mgmt_pending_cmd *cmd;
6833         u8 val;
6834         int err;
6835
6836         bt_dev_dbg(hdev, "sock %p", sk);
6837
6838         if (!lmp_sc_capable(hdev) &&
6839             !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6840                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6841                                        MGMT_STATUS_NOT_SUPPORTED);
6842
6843         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6844             lmp_sc_capable(hdev) &&
6845             !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6846                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6847                                        MGMT_STATUS_REJECTED);
6848
6849         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6850                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6851                                        MGMT_STATUS_INVALID_PARAMS);
6852
6853         hci_dev_lock(hdev);
6854
6855         if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6856             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6857                 bool changed;
6858
6859                 if (cp->val) {
6860                         changed = !hci_dev_test_and_set_flag(hdev,
6861                                                              HCI_SC_ENABLED);
6862                         if (cp->val == 0x02)
6863                                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6864                         else
6865                                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6866                 } else {
6867                         changed = hci_dev_test_and_clear_flag(hdev,
6868                                                               HCI_SC_ENABLED);
6869                         hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6870                 }
6871
6872                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6873                 if (err < 0)
6874                         goto failed;
6875
6876                 if (changed)
6877                         err = new_settings(hdev, sk);
6878
6879                 goto failed;
6880         }
6881
6882         val = !!cp->val;
6883
6884         if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6885             (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6886                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6887                 goto failed;
6888         }
6889
6890         cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6891         if (!cmd)
6892                 err = -ENOMEM;
6893         else
6894                 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6895                                          set_secure_conn_complete);
6896
6897         if (err < 0) {
6898                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6899                                 MGMT_STATUS_FAILED);
6900                 if (cmd)
6901                         mgmt_pending_free(cmd);
6902         }
6903
6904 failed:
6905         hci_dev_unlock(hdev);
6906         return err;
6907 }
6908
6909 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6910                           void *data, u16 len)
6911 {
6912         struct mgmt_mode *cp = data;
6913         bool changed, use_changed;
6914         int err;
6915
6916         bt_dev_dbg(hdev, "sock %p", sk);
6917
6918         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6919                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6920                                        MGMT_STATUS_INVALID_PARAMS);
6921
6922         hci_dev_lock(hdev);
6923
6924         if (cp->val)
6925                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6926         else
6927                 changed = hci_dev_test_and_clear_flag(hdev,
6928                                                       HCI_KEEP_DEBUG_KEYS);
6929
6930         if (cp->val == 0x02)
6931                 use_changed = !hci_dev_test_and_set_flag(hdev,
6932                                                          HCI_USE_DEBUG_KEYS);
6933         else
6934                 use_changed = hci_dev_test_and_clear_flag(hdev,
6935                                                           HCI_USE_DEBUG_KEYS);
6936
6937         if (hdev_is_powered(hdev) && use_changed &&
6938             hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6939                 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6940                 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6941                              sizeof(mode), &mode);
6942         }
6943
6944         err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6945         if (err < 0)
6946                 goto unlock;
6947
6948         if (changed)
6949                 err = new_settings(hdev, sk);
6950
6951 unlock:
6952         hci_dev_unlock(hdev);
6953         return err;
6954 }
6955
6956 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6957                        u16 len)
6958 {
6959         struct mgmt_cp_set_privacy *cp = cp_data;
6960         bool changed;
6961         int err;
6962
6963         bt_dev_dbg(hdev, "sock %p", sk);
6964
6965         if (!lmp_le_capable(hdev))
6966                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6967                                        MGMT_STATUS_NOT_SUPPORTED);
6968
6969         if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6970                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6971                                        MGMT_STATUS_INVALID_PARAMS);
6972
6973         if (hdev_is_powered(hdev))
6974                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6975                                        MGMT_STATUS_REJECTED);
6976
6977         hci_dev_lock(hdev);
6978
6979         /* If user space supports this command it is also expected to
6980          * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6981          */
6982         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6983
6984         if (cp->privacy) {
6985                 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6986                 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6987                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6988                 hci_adv_instances_set_rpa_expired(hdev, true);
6989                 if (cp->privacy == 0x02)
6990                         hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6991                 else
6992                         hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6993         } else {
6994                 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6995                 memset(hdev->irk, 0, sizeof(hdev->irk));
6996                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6997                 hci_adv_instances_set_rpa_expired(hdev, false);
6998                 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6999         }
7000
7001         err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7002         if (err < 0)
7003                 goto unlock;
7004
7005         if (changed)
7006                 err = new_settings(hdev, sk);
7007
7008 unlock:
7009         hci_dev_unlock(hdev);
7010         return err;
7011 }
7012
7013 static bool irk_is_valid(struct mgmt_irk_info *irk)
7014 {
7015         switch (irk->addr.type) {
7016         case BDADDR_LE_PUBLIC:
7017                 return true;
7018
7019         case BDADDR_LE_RANDOM:
7020                 /* Two most significant bits shall be set */
7021                 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7022                         return false;
7023                 return true;
7024         }
7025
7026         return false;
7027 }
7028
7029 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7030                      u16 len)
7031 {
7032         struct mgmt_cp_load_irks *cp = cp_data;
7033         const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7034                                    sizeof(struct mgmt_irk_info));
7035         u16 irk_count, expected_len;
7036         int i, err;
7037
7038         bt_dev_dbg(hdev, "sock %p", sk);
7039
7040         if (!lmp_le_capable(hdev))
7041                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7042                                        MGMT_STATUS_NOT_SUPPORTED);
7043
7044         irk_count = __le16_to_cpu(cp->irk_count);
7045         if (irk_count > max_irk_count) {
7046                 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7047                            irk_count);
7048                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7049                                        MGMT_STATUS_INVALID_PARAMS);
7050         }
7051
7052         expected_len = struct_size(cp, irks, irk_count);
7053         if (expected_len != len) {
7054                 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7055                            expected_len, len);
7056                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7057                                        MGMT_STATUS_INVALID_PARAMS);
7058         }
7059
7060         bt_dev_dbg(hdev, "irk_count %u", irk_count);
7061
7062         for (i = 0; i < irk_count; i++) {
7063                 struct mgmt_irk_info *key = &cp->irks[i];
7064
7065                 if (!irk_is_valid(key))
7066                         return mgmt_cmd_status(sk, hdev->id,
7067                                                MGMT_OP_LOAD_IRKS,
7068                                                MGMT_STATUS_INVALID_PARAMS);
7069         }
7070
7071         hci_dev_lock(hdev);
7072
7073         hci_smp_irks_clear(hdev);
7074
7075         for (i = 0; i < irk_count; i++) {
7076                 struct mgmt_irk_info *irk = &cp->irks[i];
7077                 u8 addr_type = le_addr_type(irk->addr.type);
7078
7079                 if (hci_is_blocked_key(hdev,
7080                                        HCI_BLOCKED_KEY_TYPE_IRK,
7081                                        irk->val)) {
7082                         bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7083                                     &irk->addr.bdaddr);
7084                         continue;
7085                 }
7086
7087                 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7088                 if (irk->addr.type == BDADDR_BREDR)
7089                         addr_type = BDADDR_BREDR;
7090
7091                 hci_add_irk(hdev, &irk->addr.bdaddr,
7092                             addr_type, irk->val,
7093                             BDADDR_ANY);
7094         }
7095
7096         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7097
7098         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7099
7100         hci_dev_unlock(hdev);
7101
7102         return err;
7103 }
7104
7105 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7106 {
7107         if (key->initiator != 0x00 && key->initiator != 0x01)
7108                 return false;
7109
7110         switch (key->addr.type) {
7111         case BDADDR_LE_PUBLIC:
7112                 return true;
7113
7114         case BDADDR_LE_RANDOM:
7115                 /* Two most significant bits shall be set */
7116                 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7117                         return false;
7118                 return true;
7119         }
7120
7121         return false;
7122 }
7123
7124 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7125                                void *cp_data, u16 len)
7126 {
7127         struct mgmt_cp_load_long_term_keys *cp = cp_data;
7128         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7129                                    sizeof(struct mgmt_ltk_info));
7130         u16 key_count, expected_len;
7131         int i, err;
7132
7133         bt_dev_dbg(hdev, "sock %p", sk);
7134
7135         if (!lmp_le_capable(hdev))
7136                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7137                                        MGMT_STATUS_NOT_SUPPORTED);
7138
7139         key_count = __le16_to_cpu(cp->key_count);
7140         if (key_count > max_key_count) {
7141                 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7142                            key_count);
7143                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7144                                        MGMT_STATUS_INVALID_PARAMS);
7145         }
7146
7147         expected_len = struct_size(cp, keys, key_count);
7148         if (expected_len != len) {
7149                 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7150                            expected_len, len);
7151                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7152                                        MGMT_STATUS_INVALID_PARAMS);
7153         }
7154
7155         bt_dev_dbg(hdev, "key_count %u", key_count);
7156
7157         for (i = 0; i < key_count; i++) {
7158                 struct mgmt_ltk_info *key = &cp->keys[i];
7159
7160                 if (!ltk_is_valid(key))
7161                         return mgmt_cmd_status(sk, hdev->id,
7162                                                MGMT_OP_LOAD_LONG_TERM_KEYS,
7163                                                MGMT_STATUS_INVALID_PARAMS);
7164         }
7165
7166         hci_dev_lock(hdev);
7167
7168         hci_smp_ltks_clear(hdev);
7169
7170         for (i = 0; i < key_count; i++) {
7171                 struct mgmt_ltk_info *key = &cp->keys[i];
7172                 u8 type, authenticated;
7173                 u8 addr_type = le_addr_type(key->addr.type);
7174
7175                 if (hci_is_blocked_key(hdev,
7176                                        HCI_BLOCKED_KEY_TYPE_LTK,
7177                                        key->val)) {
7178                         bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7179                                     &key->addr.bdaddr);
7180                         continue;
7181                 }
7182
7183                 switch (key->type) {
7184                 case MGMT_LTK_UNAUTHENTICATED:
7185                         authenticated = 0x00;
7186                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7187                         break;
7188                 case MGMT_LTK_AUTHENTICATED:
7189                         authenticated = 0x01;
7190                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7191                         break;
7192                 case MGMT_LTK_P256_UNAUTH:
7193                         authenticated = 0x00;
7194                         type = SMP_LTK_P256;
7195                         break;
7196                 case MGMT_LTK_P256_AUTH:
7197                         authenticated = 0x01;
7198                         type = SMP_LTK_P256;
7199                         break;
7200                 case MGMT_LTK_P256_DEBUG:
7201                         authenticated = 0x00;
7202                         type = SMP_LTK_P256_DEBUG;
7203                         fallthrough;
7204                 default:
7205                         continue;
7206                 }
7207
7208                 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7209                 if (key->addr.type == BDADDR_BREDR)
7210                         addr_type = BDADDR_BREDR;
7211
7212                 hci_add_ltk(hdev, &key->addr.bdaddr,
7213                             addr_type, type, authenticated,
7214                             key->val, key->enc_size, key->ediv, key->rand);
7215         }
7216
7217         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7218                            NULL, 0);
7219
7220         hci_dev_unlock(hdev);
7221
7222         return err;
7223 }
7224
7225 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7226 {
7227         struct mgmt_pending_cmd *cmd = data;
7228         struct hci_conn *conn = cmd->user_data;
7229         struct mgmt_cp_get_conn_info *cp = cmd->param;
7230         struct mgmt_rp_get_conn_info rp;
7231         u8 status;
7232
7233         bt_dev_dbg(hdev, "err %d", err);
7234
7235         memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7236
7237         status = mgmt_status(err);
7238         if (status == MGMT_STATUS_SUCCESS) {
7239                 rp.rssi = conn->rssi;
7240                 rp.tx_power = conn->tx_power;
7241                 rp.max_tx_power = conn->max_tx_power;
7242         } else {
7243                 rp.rssi = HCI_RSSI_INVALID;
7244                 rp.tx_power = HCI_TX_POWER_INVALID;
7245                 rp.max_tx_power = HCI_TX_POWER_INVALID;
7246         }
7247
7248         mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7249                           &rp, sizeof(rp));
7250
7251         mgmt_pending_free(cmd);
7252 }
7253
7254 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7255 {
7256         struct mgmt_pending_cmd *cmd = data;
7257         struct mgmt_cp_get_conn_info *cp = cmd->param;
7258         struct hci_conn *conn;
7259         int err;
7260         __le16   handle;
7261
7262         /* Make sure we are still connected */
7263         if (cp->addr.type == BDADDR_BREDR)
7264                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7265                                                &cp->addr.bdaddr);
7266         else
7267                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7268
7269         if (!conn || conn->state != BT_CONNECTED)
7270                 return MGMT_STATUS_NOT_CONNECTED;
7271
7272         cmd->user_data = conn;
7273         handle = cpu_to_le16(conn->handle);
7274
7275         /* Refresh RSSI each time */
7276         err = hci_read_rssi_sync(hdev, handle);
7277
7278         /* For LE links TX power does not change thus we don't need to
7279          * query for it once value is known.
7280          */
7281         if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7282                      conn->tx_power == HCI_TX_POWER_INVALID))
7283                 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7284
7285         /* Max TX power needs to be read only once per connection */
7286         if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7287                 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7288
7289         return err;
7290 }
7291
7292 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7293                          u16 len)
7294 {
7295         struct mgmt_cp_get_conn_info *cp = data;
7296         struct mgmt_rp_get_conn_info rp;
7297         struct hci_conn *conn;
7298         unsigned long conn_info_age;
7299         int err = 0;
7300
7301         bt_dev_dbg(hdev, "sock %p", sk);
7302
7303         memset(&rp, 0, sizeof(rp));
7304         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7305         rp.addr.type = cp->addr.type;
7306
7307         if (!bdaddr_type_is_valid(cp->addr.type))
7308                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7309                                          MGMT_STATUS_INVALID_PARAMS,
7310                                          &rp, sizeof(rp));
7311
7312         hci_dev_lock(hdev);
7313
7314         if (!hdev_is_powered(hdev)) {
7315                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7316                                         MGMT_STATUS_NOT_POWERED, &rp,
7317                                         sizeof(rp));
7318                 goto unlock;
7319         }
7320
7321         if (cp->addr.type == BDADDR_BREDR)
7322                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7323                                                &cp->addr.bdaddr);
7324         else
7325                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7326
7327         if (!conn || conn->state != BT_CONNECTED) {
7328                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7329                                         MGMT_STATUS_NOT_CONNECTED, &rp,
7330                                         sizeof(rp));
7331                 goto unlock;
7332         }
7333
7334         /* To avoid client trying to guess when to poll again for information we
7335          * calculate conn info age as random value between min/max set in hdev.
7336          */
7337         conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7338                                                  hdev->conn_info_max_age - 1);
7339
7340         /* Query controller to refresh cached values if they are too old or were
7341          * never read.
7342          */
7343         if (time_after(jiffies, conn->conn_info_timestamp +
7344                        msecs_to_jiffies(conn_info_age)) ||
7345             !conn->conn_info_timestamp) {
7346                 struct mgmt_pending_cmd *cmd;
7347
7348                 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7349                                        len);
7350                 if (!cmd) {
7351                         err = -ENOMEM;
7352                 } else {
7353                         err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7354                                                  cmd, get_conn_info_complete);
7355                 }
7356
7357                 if (err < 0) {
7358                         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7359                                           MGMT_STATUS_FAILED, &rp, sizeof(rp));
7360
7361                         if (cmd)
7362                                 mgmt_pending_free(cmd);
7363
7364                         goto unlock;
7365                 }
7366
7367                 conn->conn_info_timestamp = jiffies;
7368         } else {
7369                 /* Cache is valid, just reply with values cached in hci_conn */
7370                 rp.rssi = conn->rssi;
7371                 rp.tx_power = conn->tx_power;
7372                 rp.max_tx_power = conn->max_tx_power;
7373
7374                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7375                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7376         }
7377
7378 unlock:
7379         hci_dev_unlock(hdev);
7380         return err;
7381 }
7382
7383 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7384 {
7385         struct mgmt_pending_cmd *cmd = data;
7386         struct mgmt_cp_get_clock_info *cp = cmd->param;
7387         struct mgmt_rp_get_clock_info rp;
7388         struct hci_conn *conn = cmd->user_data;
7389         u8 status = mgmt_status(err);
7390
7391         bt_dev_dbg(hdev, "err %d", err);
7392
7393         memset(&rp, 0, sizeof(rp));
7394         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7395         rp.addr.type = cp->addr.type;
7396
7397         if (err)
7398                 goto complete;
7399
7400         rp.local_clock = cpu_to_le32(hdev->clock);
7401
7402         if (conn) {
7403                 rp.piconet_clock = cpu_to_le32(conn->clock);
7404                 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7405         }
7406
7407 complete:
7408         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7409                           sizeof(rp));
7410
7411         mgmt_pending_free(cmd);
7412 }
7413
7414 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7415 {
7416         struct mgmt_pending_cmd *cmd = data;
7417         struct mgmt_cp_get_clock_info *cp = cmd->param;
7418         struct hci_cp_read_clock hci_cp;
7419         struct hci_conn *conn;
7420
7421         memset(&hci_cp, 0, sizeof(hci_cp));
7422         hci_read_clock_sync(hdev, &hci_cp);
7423
7424         /* Make sure connection still exists */
7425         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7426         if (!conn || conn->state != BT_CONNECTED)
7427                 return MGMT_STATUS_NOT_CONNECTED;
7428
7429         cmd->user_data = conn;
7430         hci_cp.handle = cpu_to_le16(conn->handle);
7431         hci_cp.which = 0x01; /* Piconet clock */
7432
7433         return hci_read_clock_sync(hdev, &hci_cp);
7434 }
7435
7436 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7437                                                                 u16 len)
7438 {
7439         struct mgmt_cp_get_clock_info *cp = data;
7440         struct mgmt_rp_get_clock_info rp;
7441         struct mgmt_pending_cmd *cmd;
7442         struct hci_conn *conn;
7443         int err;
7444
7445         bt_dev_dbg(hdev, "sock %p", sk);
7446
7447         memset(&rp, 0, sizeof(rp));
7448         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7449         rp.addr.type = cp->addr.type;
7450
7451         if (cp->addr.type != BDADDR_BREDR)
7452                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7453                                          MGMT_STATUS_INVALID_PARAMS,
7454                                          &rp, sizeof(rp));
7455
7456         hci_dev_lock(hdev);
7457
7458         if (!hdev_is_powered(hdev)) {
7459                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7460                                         MGMT_STATUS_NOT_POWERED, &rp,
7461                                         sizeof(rp));
7462                 goto unlock;
7463         }
7464
7465         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7466                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7467                                                &cp->addr.bdaddr);
7468                 if (!conn || conn->state != BT_CONNECTED) {
7469                         err = mgmt_cmd_complete(sk, hdev->id,
7470                                                 MGMT_OP_GET_CLOCK_INFO,
7471                                                 MGMT_STATUS_NOT_CONNECTED,
7472                                                 &rp, sizeof(rp));
7473                         goto unlock;
7474                 }
7475         } else {
7476                 conn = NULL;
7477         }
7478
7479         cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7480         if (!cmd)
7481                 err = -ENOMEM;
7482         else
7483                 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7484                                          get_clock_info_complete);
7485
7486         if (err < 0) {
7487                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7488                                         MGMT_STATUS_FAILED, &rp, sizeof(rp));
7489
7490                 if (cmd)
7491                         mgmt_pending_free(cmd);
7492         }
7493
7494
7495 unlock:
7496         hci_dev_unlock(hdev);
7497         return err;
7498 }
7499
7500 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7501 {
7502         struct hci_conn *conn;
7503
7504         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7505         if (!conn)
7506                 return false;
7507
7508         if (conn->dst_type != type)
7509                 return false;
7510
7511         if (conn->state != BT_CONNECTED)
7512                 return false;
7513
7514         return true;
7515 }
7516
7517 /* This function requires the caller holds hdev->lock */
7518 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7519                                u8 addr_type, u8 auto_connect)
7520 {
7521         struct hci_conn_params *params;
7522
7523         params = hci_conn_params_add(hdev, addr, addr_type);
7524         if (!params)
7525                 return -EIO;
7526
7527         if (params->auto_connect == auto_connect)
7528                 return 0;
7529
7530         hci_pend_le_list_del_init(params);
7531
7532         switch (auto_connect) {
7533         case HCI_AUTO_CONN_DISABLED:
7534         case HCI_AUTO_CONN_LINK_LOSS:
7535                 /* If auto connect is being disabled when we're trying to
7536                  * connect to device, keep connecting.
7537                  */
7538                 if (params->explicit_connect)
7539                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
7540                 break;
7541         case HCI_AUTO_CONN_REPORT:
7542                 if (params->explicit_connect)
7543                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
7544                 else
7545                         hci_pend_le_list_add(params, &hdev->pend_le_reports);
7546                 break;
7547         case HCI_AUTO_CONN_DIRECT:
7548         case HCI_AUTO_CONN_ALWAYS:
7549                 if (!is_connected(hdev, addr, addr_type))
7550                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
7551                 break;
7552         }
7553
7554         params->auto_connect = auto_connect;
7555
7556         bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7557                    addr, addr_type, auto_connect);
7558
7559         return 0;
7560 }
7561
7562 static void device_added(struct sock *sk, struct hci_dev *hdev,
7563                          bdaddr_t *bdaddr, u8 type, u8 action)
7564 {
7565         struct mgmt_ev_device_added ev;
7566
7567         bacpy(&ev.addr.bdaddr, bdaddr);
7568         ev.addr.type = type;
7569         ev.action = action;
7570
7571         mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7572 }
7573
7574 static int add_device_sync(struct hci_dev *hdev, void *data)
7575 {
7576         return hci_update_passive_scan_sync(hdev);
7577 }
7578
7579 static int add_device(struct sock *sk, struct hci_dev *hdev,
7580                       void *data, u16 len)
7581 {
7582         struct mgmt_cp_add_device *cp = data;
7583         u8 auto_conn, addr_type;
7584         struct hci_conn_params *params;
7585         int err;
7586         u32 current_flags = 0;
7587         u32 supported_flags;
7588
7589         bt_dev_dbg(hdev, "sock %p", sk);
7590
7591         if (!bdaddr_type_is_valid(cp->addr.type) ||
7592             !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7593                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7594                                          MGMT_STATUS_INVALID_PARAMS,
7595                                          &cp->addr, sizeof(cp->addr));
7596
7597         if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7598                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7599                                          MGMT_STATUS_INVALID_PARAMS,
7600                                          &cp->addr, sizeof(cp->addr));
7601
7602         hci_dev_lock(hdev);
7603
7604         if (cp->addr.type == BDADDR_BREDR) {
7605                 /* Only incoming connections action is supported for now */
7606                 if (cp->action != 0x01) {
7607                         err = mgmt_cmd_complete(sk, hdev->id,
7608                                                 MGMT_OP_ADD_DEVICE,
7609                                                 MGMT_STATUS_INVALID_PARAMS,
7610                                                 &cp->addr, sizeof(cp->addr));
7611                         goto unlock;
7612                 }
7613
7614                 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7615                                                      &cp->addr.bdaddr,
7616                                                      cp->addr.type, 0);
7617                 if (err)
7618                         goto unlock;
7619
7620                 hci_update_scan(hdev);
7621
7622                 goto added;
7623         }
7624
7625         addr_type = le_addr_type(cp->addr.type);
7626
7627         if (cp->action == 0x02)
7628                 auto_conn = HCI_AUTO_CONN_ALWAYS;
7629         else if (cp->action == 0x01)
7630                 auto_conn = HCI_AUTO_CONN_DIRECT;
7631         else
7632                 auto_conn = HCI_AUTO_CONN_REPORT;
7633
7634         /* Kernel internally uses conn_params with resolvable private
7635          * address, but Add Device allows only identity addresses.
7636          * Make sure it is enforced before calling
7637          * hci_conn_params_lookup.
7638          */
7639         if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7640                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7641                                         MGMT_STATUS_INVALID_PARAMS,
7642                                         &cp->addr, sizeof(cp->addr));
7643                 goto unlock;
7644         }
7645
7646         /* If the connection parameters don't exist for this device,
7647          * they will be created and configured with defaults.
7648          */
7649         if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7650                                 auto_conn) < 0) {
7651                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7652                                         MGMT_STATUS_FAILED, &cp->addr,
7653                                         sizeof(cp->addr));
7654                 goto unlock;
7655         } else {
7656                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7657                                                 addr_type);
7658                 if (params)
7659                         current_flags = params->flags;
7660         }
7661
7662         err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7663         if (err < 0)
7664                 goto unlock;
7665
7666 added:
7667         device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7668         supported_flags = hdev->conn_flags;
7669         device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7670                              supported_flags, current_flags);
7671
7672         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7673                                 MGMT_STATUS_SUCCESS, &cp->addr,
7674                                 sizeof(cp->addr));
7675
7676 unlock:
7677         hci_dev_unlock(hdev);
7678         return err;
7679 }
7680
7681 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7682                            bdaddr_t *bdaddr, u8 type)
7683 {
7684         struct mgmt_ev_device_removed ev;
7685
7686         bacpy(&ev.addr.bdaddr, bdaddr);
7687         ev.addr.type = type;
7688
7689         mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7690 }
7691
7692 static int remove_device_sync(struct hci_dev *hdev, void *data)
7693 {
7694         return hci_update_passive_scan_sync(hdev);
7695 }
7696
7697 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7698                          void *data, u16 len)
7699 {
7700         struct mgmt_cp_remove_device *cp = data;
7701         int err;
7702
7703         bt_dev_dbg(hdev, "sock %p", sk);
7704
7705         hci_dev_lock(hdev);
7706
7707         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7708                 struct hci_conn_params *params;
7709                 u8 addr_type;
7710
7711                 if (!bdaddr_type_is_valid(cp->addr.type)) {
7712                         err = mgmt_cmd_complete(sk, hdev->id,
7713                                                 MGMT_OP_REMOVE_DEVICE,
7714                                                 MGMT_STATUS_INVALID_PARAMS,
7715                                                 &cp->addr, sizeof(cp->addr));
7716                         goto unlock;
7717                 }
7718
7719                 if (cp->addr.type == BDADDR_BREDR) {
7720                         err = hci_bdaddr_list_del(&hdev->accept_list,
7721                                                   &cp->addr.bdaddr,
7722                                                   cp->addr.type);
7723                         if (err) {
7724                                 err = mgmt_cmd_complete(sk, hdev->id,
7725                                                         MGMT_OP_REMOVE_DEVICE,
7726                                                         MGMT_STATUS_INVALID_PARAMS,
7727                                                         &cp->addr,
7728                                                         sizeof(cp->addr));
7729                                 goto unlock;
7730                         }
7731
7732                         hci_update_scan(hdev);
7733
7734                         device_removed(sk, hdev, &cp->addr.bdaddr,
7735                                        cp->addr.type);
7736                         goto complete;
7737                 }
7738
7739                 addr_type = le_addr_type(cp->addr.type);
7740
7741                 /* Kernel internally uses conn_params with resolvable private
7742                  * address, but Remove Device allows only identity addresses.
7743                  * Make sure it is enforced before calling
7744                  * hci_conn_params_lookup.
7745                  */
7746                 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7747                         err = mgmt_cmd_complete(sk, hdev->id,
7748                                                 MGMT_OP_REMOVE_DEVICE,
7749                                                 MGMT_STATUS_INVALID_PARAMS,
7750                                                 &cp->addr, sizeof(cp->addr));
7751                         goto unlock;
7752                 }
7753
7754                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7755                                                 addr_type);
7756                 if (!params) {
7757                         err = mgmt_cmd_complete(sk, hdev->id,
7758                                                 MGMT_OP_REMOVE_DEVICE,
7759                                                 MGMT_STATUS_INVALID_PARAMS,
7760                                                 &cp->addr, sizeof(cp->addr));
7761                         goto unlock;
7762                 }
7763
7764                 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7765                     params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7766                         err = mgmt_cmd_complete(sk, hdev->id,
7767                                                 MGMT_OP_REMOVE_DEVICE,
7768                                                 MGMT_STATUS_INVALID_PARAMS,
7769                                                 &cp->addr, sizeof(cp->addr));
7770                         goto unlock;
7771                 }
7772
7773                 hci_conn_params_free(params);
7774
7775                 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7776         } else {
7777                 struct hci_conn_params *p, *tmp;
7778                 struct bdaddr_list *b, *btmp;
7779
7780                 if (cp->addr.type) {
7781                         err = mgmt_cmd_complete(sk, hdev->id,
7782                                                 MGMT_OP_REMOVE_DEVICE,
7783                                                 MGMT_STATUS_INVALID_PARAMS,
7784                                                 &cp->addr, sizeof(cp->addr));
7785                         goto unlock;
7786                 }
7787
7788                 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7789                         device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7790                         list_del(&b->list);
7791                         kfree(b);
7792                 }
7793
7794                 hci_update_scan(hdev);
7795
7796                 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7797                         if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7798                                 continue;
7799                         device_removed(sk, hdev, &p->addr, p->addr_type);
7800                         if (p->explicit_connect) {
7801                                 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7802                                 continue;
7803                         }
7804                         hci_conn_params_free(p);
7805                 }
7806
7807                 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7808         }
7809
7810         hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7811
7812 complete:
7813         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7814                                 MGMT_STATUS_SUCCESS, &cp->addr,
7815                                 sizeof(cp->addr));
7816 unlock:
7817         hci_dev_unlock(hdev);
7818         return err;
7819 }
7820
7821 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7822                            u16 len)
7823 {
7824         struct mgmt_cp_load_conn_param *cp = data;
7825         const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7826                                      sizeof(struct mgmt_conn_param));
7827         u16 param_count, expected_len;
7828         int i;
7829
7830         if (!lmp_le_capable(hdev))
7831                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7832                                        MGMT_STATUS_NOT_SUPPORTED);
7833
7834         param_count = __le16_to_cpu(cp->param_count);
7835         if (param_count > max_param_count) {
7836                 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7837                            param_count);
7838                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7839                                        MGMT_STATUS_INVALID_PARAMS);
7840         }
7841
7842         expected_len = struct_size(cp, params, param_count);
7843         if (expected_len != len) {
7844                 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7845                            expected_len, len);
7846                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7847                                        MGMT_STATUS_INVALID_PARAMS);
7848         }
7849
7850         bt_dev_dbg(hdev, "param_count %u", param_count);
7851
7852         hci_dev_lock(hdev);
7853
7854         hci_conn_params_clear_disabled(hdev);
7855
7856         for (i = 0; i < param_count; i++) {
7857                 struct mgmt_conn_param *param = &cp->params[i];
7858                 struct hci_conn_params *hci_param;
7859                 u16 min, max, latency, timeout;
7860                 u8 addr_type;
7861
7862                 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7863                            param->addr.type);
7864
7865                 if (param->addr.type == BDADDR_LE_PUBLIC) {
7866                         addr_type = ADDR_LE_DEV_PUBLIC;
7867                 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7868                         addr_type = ADDR_LE_DEV_RANDOM;
7869                 } else {
7870                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7871                         continue;
7872                 }
7873
7874                 min = le16_to_cpu(param->min_interval);
7875                 max = le16_to_cpu(param->max_interval);
7876                 latency = le16_to_cpu(param->latency);
7877                 timeout = le16_to_cpu(param->timeout);
7878
7879                 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7880                            min, max, latency, timeout);
7881
7882                 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7883                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7884                         continue;
7885                 }
7886
7887                 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7888                                                 addr_type);
7889                 if (!hci_param) {
7890                         bt_dev_err(hdev, "failed to add connection parameters");
7891                         continue;
7892                 }
7893
7894                 hci_param->conn_min_interval = min;
7895                 hci_param->conn_max_interval = max;
7896                 hci_param->conn_latency = latency;
7897                 hci_param->supervision_timeout = timeout;
7898         }
7899
7900         hci_dev_unlock(hdev);
7901
7902         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7903                                  NULL, 0);
7904 }
7905
7906 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7907                                void *data, u16 len)
7908 {
7909         struct mgmt_cp_set_external_config *cp = data;
7910         bool changed;
7911         int err;
7912
7913         bt_dev_dbg(hdev, "sock %p", sk);
7914
7915         if (hdev_is_powered(hdev))
7916                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7917                                        MGMT_STATUS_REJECTED);
7918
7919         if (cp->config != 0x00 && cp->config != 0x01)
7920                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7921                                          MGMT_STATUS_INVALID_PARAMS);
7922
7923         if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7924                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7925                                        MGMT_STATUS_NOT_SUPPORTED);
7926
7927         hci_dev_lock(hdev);
7928
7929         if (cp->config)
7930                 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7931         else
7932                 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7933
7934         err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7935         if (err < 0)
7936                 goto unlock;
7937
7938         if (!changed)
7939                 goto unlock;
7940
7941         err = new_options(hdev, sk);
7942
7943         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7944                 mgmt_index_removed(hdev);
7945
7946                 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7947                         hci_dev_set_flag(hdev, HCI_CONFIG);
7948                         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7949
7950                         queue_work(hdev->req_workqueue, &hdev->power_on);
7951                 } else {
7952                         set_bit(HCI_RAW, &hdev->flags);
7953                         mgmt_index_added(hdev);
7954                 }
7955         }
7956
7957 unlock:
7958         hci_dev_unlock(hdev);
7959         return err;
7960 }
7961
7962 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7963                               void *data, u16 len)
7964 {
7965         struct mgmt_cp_set_public_address *cp = data;
7966         bool changed;
7967         int err;
7968
7969         bt_dev_dbg(hdev, "sock %p", sk);
7970
7971         if (hdev_is_powered(hdev))
7972                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7973                                        MGMT_STATUS_REJECTED);
7974
7975         if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7976                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7977                                        MGMT_STATUS_INVALID_PARAMS);
7978
7979         if (!hdev->set_bdaddr)
7980                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7981                                        MGMT_STATUS_NOT_SUPPORTED);
7982
7983         hci_dev_lock(hdev);
7984
7985         changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7986         bacpy(&hdev->public_addr, &cp->bdaddr);
7987
7988         err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7989         if (err < 0)
7990                 goto unlock;
7991
7992         if (!changed)
7993                 goto unlock;
7994
7995         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7996                 err = new_options(hdev, sk);
7997
7998         if (is_configured(hdev)) {
7999                 mgmt_index_removed(hdev);
8000
8001                 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8002
8003                 hci_dev_set_flag(hdev, HCI_CONFIG);
8004                 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8005
8006                 queue_work(hdev->req_workqueue, &hdev->power_on);
8007         }
8008
8009 unlock:
8010         hci_dev_unlock(hdev);
8011         return err;
8012 }
8013
8014 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8015                                              int err)
8016 {
8017         const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8018         struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8019         u8 *h192, *r192, *h256, *r256;
8020         struct mgmt_pending_cmd *cmd = data;
8021         struct sk_buff *skb = cmd->skb;
8022         u8 status = mgmt_status(err);
8023         u16 eir_len;
8024
8025         if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8026                 return;
8027
8028         if (!status) {
8029                 if (!skb)
8030                         status = MGMT_STATUS_FAILED;
8031                 else if (IS_ERR(skb))
8032                         status = mgmt_status(PTR_ERR(skb));
8033                 else
8034                         status = mgmt_status(skb->data[0]);
8035         }
8036
8037         bt_dev_dbg(hdev, "status %u", status);
8038
8039         mgmt_cp = cmd->param;
8040
8041         if (status) {
8042                 status = mgmt_status(status);
8043                 eir_len = 0;
8044
8045                 h192 = NULL;
8046                 r192 = NULL;
8047                 h256 = NULL;
8048                 r256 = NULL;
8049         } else if (!bredr_sc_enabled(hdev)) {
8050                 struct hci_rp_read_local_oob_data *rp;
8051
8052                 if (skb->len != sizeof(*rp)) {
8053                         status = MGMT_STATUS_FAILED;
8054                         eir_len = 0;
8055                 } else {
8056                         status = MGMT_STATUS_SUCCESS;
8057                         rp = (void *)skb->data;
8058
8059                         eir_len = 5 + 18 + 18;
8060                         h192 = rp->hash;
8061                         r192 = rp->rand;
8062                         h256 = NULL;
8063                         r256 = NULL;
8064                 }
8065         } else {
8066                 struct hci_rp_read_local_oob_ext_data *rp;
8067
8068                 if (skb->len != sizeof(*rp)) {
8069                         status = MGMT_STATUS_FAILED;
8070                         eir_len = 0;
8071                 } else {
8072                         status = MGMT_STATUS_SUCCESS;
8073                         rp = (void *)skb->data;
8074
8075                         if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8076                                 eir_len = 5 + 18 + 18;
8077                                 h192 = NULL;
8078                                 r192 = NULL;
8079                         } else {
8080                                 eir_len = 5 + 18 + 18 + 18 + 18;
8081                                 h192 = rp->hash192;
8082                                 r192 = rp->rand192;
8083                         }
8084
8085                         h256 = rp->hash256;
8086                         r256 = rp->rand256;
8087                 }
8088         }
8089
8090         mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8091         if (!mgmt_rp)
8092                 goto done;
8093
8094         if (eir_len == 0)
8095                 goto send_rsp;
8096
8097         eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8098                                   hdev->dev_class, 3);
8099
8100         if (h192 && r192) {
8101                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8102                                           EIR_SSP_HASH_C192, h192, 16);
8103                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8104                                           EIR_SSP_RAND_R192, r192, 16);
8105         }
8106
8107         if (h256 && r256) {
8108                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8109                                           EIR_SSP_HASH_C256, h256, 16);
8110                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8111                                           EIR_SSP_RAND_R256, r256, 16);
8112         }
8113
8114 send_rsp:
8115         mgmt_rp->type = mgmt_cp->type;
8116         mgmt_rp->eir_len = cpu_to_le16(eir_len);
8117
8118         err = mgmt_cmd_complete(cmd->sk, hdev->id,
8119                                 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8120                                 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8121         if (err < 0 || status)
8122                 goto done;
8123
8124         hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8125
8126         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8127                                  mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8128                                  HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8129 done:
8130         if (skb && !IS_ERR(skb))
8131                 kfree_skb(skb);
8132
8133         kfree(mgmt_rp);
8134         mgmt_pending_remove(cmd);
8135 }
8136
8137 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8138                                   struct mgmt_cp_read_local_oob_ext_data *cp)
8139 {
8140         struct mgmt_pending_cmd *cmd;
8141         int err;
8142
8143         cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8144                                cp, sizeof(*cp));
8145         if (!cmd)
8146                 return -ENOMEM;
8147
8148         err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8149                                  read_local_oob_ext_data_complete);
8150
8151         if (err < 0) {
8152                 mgmt_pending_remove(cmd);
8153                 return err;
8154         }
8155
8156         return 0;
8157 }
8158
8159 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8160                                    void *data, u16 data_len)
8161 {
8162         struct mgmt_cp_read_local_oob_ext_data *cp = data;
8163         struct mgmt_rp_read_local_oob_ext_data *rp;
8164         size_t rp_len;
8165         u16 eir_len;
8166         u8 status, flags, role, addr[7], hash[16], rand[16];
8167         int err;
8168
8169         bt_dev_dbg(hdev, "sock %p", sk);
8170
8171         if (hdev_is_powered(hdev)) {
8172                 switch (cp->type) {
8173                 case BIT(BDADDR_BREDR):
8174                         status = mgmt_bredr_support(hdev);
8175                         if (status)
8176                                 eir_len = 0;
8177                         else
8178                                 eir_len = 5;
8179                         break;
8180                 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8181                         status = mgmt_le_support(hdev);
8182                         if (status)
8183                                 eir_len = 0;
8184                         else
8185                                 eir_len = 9 + 3 + 18 + 18 + 3;
8186                         break;
8187                 default:
8188                         status = MGMT_STATUS_INVALID_PARAMS;
8189                         eir_len = 0;
8190                         break;
8191                 }
8192         } else {
8193                 status = MGMT_STATUS_NOT_POWERED;
8194                 eir_len = 0;
8195         }
8196
8197         rp_len = sizeof(*rp) + eir_len;
8198         rp = kmalloc(rp_len, GFP_ATOMIC);
8199         if (!rp)
8200                 return -ENOMEM;
8201
8202         if (!status && !lmp_ssp_capable(hdev)) {
8203                 status = MGMT_STATUS_NOT_SUPPORTED;
8204                 eir_len = 0;
8205         }
8206
8207         if (status)
8208                 goto complete;
8209
8210         hci_dev_lock(hdev);
8211
8212         eir_len = 0;
8213         switch (cp->type) {
8214         case BIT(BDADDR_BREDR):
8215                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8216                         err = read_local_ssp_oob_req(hdev, sk, cp);
8217                         hci_dev_unlock(hdev);
8218                         if (!err)
8219                                 goto done;
8220
8221                         status = MGMT_STATUS_FAILED;
8222                         goto complete;
8223                 } else {
8224                         eir_len = eir_append_data(rp->eir, eir_len,
8225                                                   EIR_CLASS_OF_DEV,
8226                                                   hdev->dev_class, 3);
8227                 }
8228                 break;
8229         case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8230                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8231                     smp_generate_oob(hdev, hash, rand) < 0) {
8232                         hci_dev_unlock(hdev);
8233                         status = MGMT_STATUS_FAILED;
8234                         goto complete;
8235                 }
8236
8237                 /* This should return the active RPA, but since the RPA
8238                  * is only programmed on demand, it is really hard to fill
8239                  * this in at the moment. For now disallow retrieving
8240                  * local out-of-band data when privacy is in use.
8241                  *
8242                  * Returning the identity address will not help here since
8243                  * pairing happens before the identity resolving key is
8244                  * known and thus the connection establishment happens
8245                  * based on the RPA and not the identity address.
8246                  */
8247                 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8248                         hci_dev_unlock(hdev);
8249                         status = MGMT_STATUS_REJECTED;
8250                         goto complete;
8251                 }
8252
8253                 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8254                    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8255                    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8256                     bacmp(&hdev->static_addr, BDADDR_ANY))) {
8257                         memcpy(addr, &hdev->static_addr, 6);
8258                         addr[6] = 0x01;
8259                 } else {
8260                         memcpy(addr, &hdev->bdaddr, 6);
8261                         addr[6] = 0x00;
8262                 }
8263
8264                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8265                                           addr, sizeof(addr));
8266
8267                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8268                         role = 0x02;
8269                 else
8270                         role = 0x01;
8271
8272                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8273                                           &role, sizeof(role));
8274
8275                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8276                         eir_len = eir_append_data(rp->eir, eir_len,
8277                                                   EIR_LE_SC_CONFIRM,
8278                                                   hash, sizeof(hash));
8279
8280                         eir_len = eir_append_data(rp->eir, eir_len,
8281                                                   EIR_LE_SC_RANDOM,
8282                                                   rand, sizeof(rand));
8283                 }
8284
8285                 flags = mgmt_get_adv_discov_flags(hdev);
8286
8287                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8288                         flags |= LE_AD_NO_BREDR;
8289
8290                 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8291                                           &flags, sizeof(flags));
8292                 break;
8293         }
8294
8295         hci_dev_unlock(hdev);
8296
8297         hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8298
8299         status = MGMT_STATUS_SUCCESS;
8300
8301 complete:
8302         rp->type = cp->type;
8303         rp->eir_len = cpu_to_le16(eir_len);
8304
8305         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8306                                 status, rp, sizeof(*rp) + eir_len);
8307         if (err < 0 || status)
8308                 goto done;
8309
8310         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8311                                  rp, sizeof(*rp) + eir_len,
8312                                  HCI_MGMT_OOB_DATA_EVENTS, sk);
8313
8314 done:
8315         kfree(rp);
8316
8317         return err;
8318 }
8319
8320 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8321 {
8322         u32 flags = 0;
8323
8324         flags |= MGMT_ADV_FLAG_CONNECTABLE;
8325         flags |= MGMT_ADV_FLAG_DISCOV;
8326         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8327         flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8328         flags |= MGMT_ADV_FLAG_APPEARANCE;
8329         flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8330         flags |= MGMT_ADV_PARAM_DURATION;
8331         flags |= MGMT_ADV_PARAM_TIMEOUT;
8332         flags |= MGMT_ADV_PARAM_INTERVALS;
8333         flags |= MGMT_ADV_PARAM_TX_POWER;
8334         flags |= MGMT_ADV_PARAM_SCAN_RSP;
8335
8336         /* In extended adv TX_POWER returned from Set Adv Param
8337          * will be always valid.
8338          */
8339         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8340                 flags |= MGMT_ADV_FLAG_TX_POWER;
8341
8342         if (ext_adv_capable(hdev)) {
8343                 flags |= MGMT_ADV_FLAG_SEC_1M;
8344                 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8345                 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8346
8347                 if (le_2m_capable(hdev))
8348                         flags |= MGMT_ADV_FLAG_SEC_2M;
8349
8350                 if (le_coded_capable(hdev))
8351                         flags |= MGMT_ADV_FLAG_SEC_CODED;
8352         }
8353
8354         return flags;
8355 }
8356
8357 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8358                              void *data, u16 data_len)
8359 {
8360         struct mgmt_rp_read_adv_features *rp;
8361         size_t rp_len;
8362         int err;
8363         struct adv_info *adv_instance;
8364         u32 supported_flags;
8365         u8 *instance;
8366
8367         bt_dev_dbg(hdev, "sock %p", sk);
8368
8369         if (!lmp_le_capable(hdev))
8370                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8371                                        MGMT_STATUS_REJECTED);
8372
8373         hci_dev_lock(hdev);
8374
8375         rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8376         rp = kmalloc(rp_len, GFP_ATOMIC);
8377         if (!rp) {
8378                 hci_dev_unlock(hdev);
8379                 return -ENOMEM;
8380         }
8381
8382         supported_flags = get_supported_adv_flags(hdev);
8383
8384         rp->supported_flags = cpu_to_le32(supported_flags);
8385         rp->max_adv_data_len = max_adv_len(hdev);
8386         rp->max_scan_rsp_len = max_adv_len(hdev);
8387         rp->max_instances = hdev->le_num_of_adv_sets;
8388         rp->num_instances = hdev->adv_instance_cnt;
8389
8390         instance = rp->instance;
8391         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8392                 /* Only instances 1-le_num_of_adv_sets are externally visible */
8393                 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8394                         *instance = adv_instance->instance;
8395                         instance++;
8396                 } else {
8397                         rp->num_instances--;
8398                         rp_len--;
8399                 }
8400         }
8401
8402         hci_dev_unlock(hdev);
8403
8404         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8405                                 MGMT_STATUS_SUCCESS, rp, rp_len);
8406
8407         kfree(rp);
8408
8409         return err;
8410 }
8411
8412 static u8 calculate_name_len(struct hci_dev *hdev)
8413 {
8414         u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8415
8416         return eir_append_local_name(hdev, buf, 0);
8417 }
8418
8419 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8420                            bool is_adv_data)
8421 {
8422         u8 max_len = max_adv_len(hdev);
8423
8424         if (is_adv_data) {
8425                 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8426                                  MGMT_ADV_FLAG_LIMITED_DISCOV |
8427                                  MGMT_ADV_FLAG_MANAGED_FLAGS))
8428                         max_len -= 3;
8429
8430                 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8431                         max_len -= 3;
8432         } else {
8433                 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8434                         max_len -= calculate_name_len(hdev);
8435
8436                 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8437                         max_len -= 4;
8438         }
8439
8440         return max_len;
8441 }
8442
8443 static bool flags_managed(u32 adv_flags)
8444 {
8445         return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8446                             MGMT_ADV_FLAG_LIMITED_DISCOV |
8447                             MGMT_ADV_FLAG_MANAGED_FLAGS);
8448 }
8449
8450 static bool tx_power_managed(u32 adv_flags)
8451 {
8452         return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8453 }
8454
8455 static bool name_managed(u32 adv_flags)
8456 {
8457         return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8458 }
8459
8460 static bool appearance_managed(u32 adv_flags)
8461 {
8462         return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8463 }
8464
8465 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8466                               u8 len, bool is_adv_data)
8467 {
8468         int i, cur_len;
8469         u8 max_len;
8470
8471         max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8472
8473         if (len > max_len)
8474                 return false;
8475
8476         /* Make sure that the data is correctly formatted. */
8477         for (i = 0; i < len; i += (cur_len + 1)) {
8478                 cur_len = data[i];
8479
8480                 if (!cur_len)
8481                         continue;
8482
8483                 if (data[i + 1] == EIR_FLAGS &&
8484                     (!is_adv_data || flags_managed(adv_flags)))
8485                         return false;
8486
8487                 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8488                         return false;
8489
8490                 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8491                         return false;
8492
8493                 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8494                         return false;
8495
8496                 if (data[i + 1] == EIR_APPEARANCE &&
8497                     appearance_managed(adv_flags))
8498                         return false;
8499
8500                 /* If the current field length would exceed the total data
8501                  * length, then it's invalid.
8502                  */
8503                 if (i + cur_len >= len)
8504                         return false;
8505         }
8506
8507         return true;
8508 }
8509
8510 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8511 {
8512         u32 supported_flags, phy_flags;
8513
8514         /* The current implementation only supports a subset of the specified
8515          * flags. Also need to check mutual exclusiveness of sec flags.
8516          */
8517         supported_flags = get_supported_adv_flags(hdev);
8518         phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8519         if (adv_flags & ~supported_flags ||
8520             ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8521                 return false;
8522
8523         return true;
8524 }
8525
8526 static bool adv_busy(struct hci_dev *hdev)
8527 {
8528         return pending_find(MGMT_OP_SET_LE, hdev);
8529 }
8530
8531 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8532                              int err)
8533 {
8534         struct adv_info *adv, *n;
8535
8536         bt_dev_dbg(hdev, "err %d", err);
8537
8538         hci_dev_lock(hdev);
8539
8540         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8541                 u8 instance;
8542
8543                 if (!adv->pending)
8544                         continue;
8545
8546                 if (!err) {
8547                         adv->pending = false;
8548                         continue;
8549                 }
8550
8551                 instance = adv->instance;
8552
8553                 if (hdev->cur_adv_instance == instance)
8554                         cancel_adv_timeout(hdev);
8555
8556                 hci_remove_adv_instance(hdev, instance);
8557                 mgmt_advertising_removed(sk, hdev, instance);
8558         }
8559
8560         hci_dev_unlock(hdev);
8561 }
8562
8563 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8564 {
8565         struct mgmt_pending_cmd *cmd = data;
8566         struct mgmt_cp_add_advertising *cp = cmd->param;
8567         struct mgmt_rp_add_advertising rp;
8568
8569         memset(&rp, 0, sizeof(rp));
8570
8571         rp.instance = cp->instance;
8572
8573         if (err)
8574                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8575                                 mgmt_status(err));
8576         else
8577                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8578                                   mgmt_status(err), &rp, sizeof(rp));
8579
8580         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8581
8582         mgmt_pending_free(cmd);
8583 }
8584
8585 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8586 {
8587         struct mgmt_pending_cmd *cmd = data;
8588         struct mgmt_cp_add_advertising *cp = cmd->param;
8589
8590         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8591 }
8592
8593 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8594                            void *data, u16 data_len)
8595 {
8596         struct mgmt_cp_add_advertising *cp = data;
8597         struct mgmt_rp_add_advertising rp;
8598         u32 flags;
8599         u8 status;
8600         u16 timeout, duration;
8601         unsigned int prev_instance_cnt;
8602         u8 schedule_instance = 0;
8603         struct adv_info *adv, *next_instance;
8604         int err;
8605         struct mgmt_pending_cmd *cmd;
8606
8607         bt_dev_dbg(hdev, "sock %p", sk);
8608
8609         status = mgmt_le_support(hdev);
8610         if (status)
8611                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8612                                        status);
8613
8614         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8615                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8616                                        MGMT_STATUS_INVALID_PARAMS);
8617
8618         if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8619                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8620                                        MGMT_STATUS_INVALID_PARAMS);
8621
8622         flags = __le32_to_cpu(cp->flags);
8623         timeout = __le16_to_cpu(cp->timeout);
8624         duration = __le16_to_cpu(cp->duration);
8625
8626         if (!requested_adv_flags_are_valid(hdev, flags))
8627                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8628                                        MGMT_STATUS_INVALID_PARAMS);
8629
8630         hci_dev_lock(hdev);
8631
8632         if (timeout && !hdev_is_powered(hdev)) {
8633                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8634                                       MGMT_STATUS_REJECTED);
8635                 goto unlock;
8636         }
8637
8638         if (adv_busy(hdev)) {
8639                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8640                                       MGMT_STATUS_BUSY);
8641                 goto unlock;
8642         }
8643
8644         if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8645             !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8646                                cp->scan_rsp_len, false)) {
8647                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8648                                       MGMT_STATUS_INVALID_PARAMS);
8649                 goto unlock;
8650         }
8651
8652         prev_instance_cnt = hdev->adv_instance_cnt;
8653
8654         adv = hci_add_adv_instance(hdev, cp->instance, flags,
8655                                    cp->adv_data_len, cp->data,
8656                                    cp->scan_rsp_len,
8657                                    cp->data + cp->adv_data_len,
8658                                    timeout, duration,
8659                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
8660                                    hdev->le_adv_min_interval,
8661                                    hdev->le_adv_max_interval, 0);
8662         if (IS_ERR(adv)) {
8663                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8664                                       MGMT_STATUS_FAILED);
8665                 goto unlock;
8666         }
8667
8668         /* Only trigger an advertising added event if a new instance was
8669          * actually added.
8670          */
8671         if (hdev->adv_instance_cnt > prev_instance_cnt)
8672                 mgmt_advertising_added(sk, hdev, cp->instance);
8673
8674         if (hdev->cur_adv_instance == cp->instance) {
8675                 /* If the currently advertised instance is being changed then
8676                  * cancel the current advertising and schedule the next
8677                  * instance. If there is only one instance then the overridden
8678                  * advertising data will be visible right away.
8679                  */
8680                 cancel_adv_timeout(hdev);
8681
8682                 next_instance = hci_get_next_instance(hdev, cp->instance);
8683                 if (next_instance)
8684                         schedule_instance = next_instance->instance;
8685         } else if (!hdev->adv_instance_timeout) {
8686                 /* Immediately advertise the new instance if no other
8687                  * instance is currently being advertised.
8688                  */
8689                 schedule_instance = cp->instance;
8690         }
8691
8692         /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8693          * there is no instance to be advertised then we have no HCI
8694          * communication to make. Simply return.
8695          */
8696         if (!hdev_is_powered(hdev) ||
8697             hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8698             !schedule_instance) {
8699                 rp.instance = cp->instance;
8700                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8701                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8702                 goto unlock;
8703         }
8704
8705         /* We're good to go, update advertising data, parameters, and start
8706          * advertising.
8707          */
8708         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8709                                data_len);
8710         if (!cmd) {
8711                 err = -ENOMEM;
8712                 goto unlock;
8713         }
8714
8715         cp->instance = schedule_instance;
8716
8717         err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8718                                  add_advertising_complete);
8719         if (err < 0)
8720                 mgmt_pending_free(cmd);
8721
8722 unlock:
8723         hci_dev_unlock(hdev);
8724
8725         return err;
8726 }
8727
8728 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8729                                         int err)
8730 {
8731         struct mgmt_pending_cmd *cmd = data;
8732         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8733         struct mgmt_rp_add_ext_adv_params rp;
8734         struct adv_info *adv;
8735         u32 flags;
8736
8737         BT_DBG("%s", hdev->name);
8738
8739         hci_dev_lock(hdev);
8740
8741         adv = hci_find_adv_instance(hdev, cp->instance);
8742         if (!adv)
8743                 goto unlock;
8744
8745         rp.instance = cp->instance;
8746         rp.tx_power = adv->tx_power;
8747
8748         /* While we're at it, inform userspace of the available space for this
8749          * advertisement, given the flags that will be used.
8750          */
8751         flags = __le32_to_cpu(cp->flags);
8752         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8753         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8754
8755         if (err) {
8756                 /* If this advertisement was previously advertising and we
8757                  * failed to update it, we signal that it has been removed and
8758                  * delete its structure
8759                  */
8760                 if (!adv->pending)
8761                         mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8762
8763                 hci_remove_adv_instance(hdev, cp->instance);
8764
8765                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8766                                 mgmt_status(err));
8767         } else {
8768                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8769                                   mgmt_status(err), &rp, sizeof(rp));
8770         }
8771
8772 unlock:
8773         if (cmd)
8774                 mgmt_pending_free(cmd);
8775
8776         hci_dev_unlock(hdev);
8777 }
8778
8779 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8780 {
8781         struct mgmt_pending_cmd *cmd = data;
8782         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8783
8784         return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8785 }
8786
8787 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8788                               void *data, u16 data_len)
8789 {
8790         struct mgmt_cp_add_ext_adv_params *cp = data;
8791         struct mgmt_rp_add_ext_adv_params rp;
8792         struct mgmt_pending_cmd *cmd = NULL;
8793         struct adv_info *adv;
8794         u32 flags, min_interval, max_interval;
8795         u16 timeout, duration;
8796         u8 status;
8797         s8 tx_power;
8798         int err;
8799
8800         BT_DBG("%s", hdev->name);
8801
8802         status = mgmt_le_support(hdev);
8803         if (status)
8804                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8805                                        status);
8806
8807         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8808                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8809                                        MGMT_STATUS_INVALID_PARAMS);
8810
8811         /* The purpose of breaking add_advertising into two separate MGMT calls
8812          * for params and data is to allow more parameters to be added to this
8813          * structure in the future. For this reason, we verify that we have the
8814          * bare minimum structure we know of when the interface was defined. Any
8815          * extra parameters we don't know about will be ignored in this request.
8816          */
8817         if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8818                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8819                                        MGMT_STATUS_INVALID_PARAMS);
8820
8821         flags = __le32_to_cpu(cp->flags);
8822
8823         if (!requested_adv_flags_are_valid(hdev, flags))
8824                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8825                                        MGMT_STATUS_INVALID_PARAMS);
8826
8827         hci_dev_lock(hdev);
8828
8829         /* In new interface, we require that we are powered to register */
8830         if (!hdev_is_powered(hdev)) {
8831                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8832                                       MGMT_STATUS_REJECTED);
8833                 goto unlock;
8834         }
8835
8836         if (adv_busy(hdev)) {
8837                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8838                                       MGMT_STATUS_BUSY);
8839                 goto unlock;
8840         }
8841
8842         /* Parse defined parameters from request, use defaults otherwise */
8843         timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8844                   __le16_to_cpu(cp->timeout) : 0;
8845
8846         duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8847                    __le16_to_cpu(cp->duration) :
8848                    hdev->def_multi_adv_rotation_duration;
8849
8850         min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8851                        __le32_to_cpu(cp->min_interval) :
8852                        hdev->le_adv_min_interval;
8853
8854         max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8855                        __le32_to_cpu(cp->max_interval) :
8856                        hdev->le_adv_max_interval;
8857
8858         tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8859                    cp->tx_power :
8860                    HCI_ADV_TX_POWER_NO_PREFERENCE;
8861
8862         /* Create advertising instance with no advertising or response data */
8863         adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8864                                    timeout, duration, tx_power, min_interval,
8865                                    max_interval, 0);
8866
8867         if (IS_ERR(adv)) {
8868                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8869                                       MGMT_STATUS_FAILED);
8870                 goto unlock;
8871         }
8872
8873         /* Submit request for advertising params if ext adv available */
8874         if (ext_adv_capable(hdev)) {
8875                 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8876                                        data, data_len);
8877                 if (!cmd) {
8878                         err = -ENOMEM;
8879                         hci_remove_adv_instance(hdev, cp->instance);
8880                         goto unlock;
8881                 }
8882
8883                 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8884                                          add_ext_adv_params_complete);
8885                 if (err < 0)
8886                         mgmt_pending_free(cmd);
8887         } else {
8888                 rp.instance = cp->instance;
8889                 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8890                 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8891                 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8892                 err = mgmt_cmd_complete(sk, hdev->id,
8893                                         MGMT_OP_ADD_EXT_ADV_PARAMS,
8894                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8895         }
8896
8897 unlock:
8898         hci_dev_unlock(hdev);
8899
8900         return err;
8901 }
8902
8903 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8904 {
8905         struct mgmt_pending_cmd *cmd = data;
8906         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8907         struct mgmt_rp_add_advertising rp;
8908
8909         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8910
8911         memset(&rp, 0, sizeof(rp));
8912
8913         rp.instance = cp->instance;
8914
8915         if (err)
8916                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8917                                 mgmt_status(err));
8918         else
8919                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8920                                   mgmt_status(err), &rp, sizeof(rp));
8921
8922         mgmt_pending_free(cmd);
8923 }
8924
8925 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8926 {
8927         struct mgmt_pending_cmd *cmd = data;
8928         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8929         int err;
8930
8931         if (ext_adv_capable(hdev)) {
8932                 err = hci_update_adv_data_sync(hdev, cp->instance);
8933                 if (err)
8934                         return err;
8935
8936                 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8937                 if (err)
8938                         return err;
8939
8940                 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8941         }
8942
8943         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8944 }
8945
8946 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8947                             u16 data_len)
8948 {
8949         struct mgmt_cp_add_ext_adv_data *cp = data;
8950         struct mgmt_rp_add_ext_adv_data rp;
8951         u8 schedule_instance = 0;
8952         struct adv_info *next_instance;
8953         struct adv_info *adv_instance;
8954         int err = 0;
8955         struct mgmt_pending_cmd *cmd;
8956
8957         BT_DBG("%s", hdev->name);
8958
8959         hci_dev_lock(hdev);
8960
8961         adv_instance = hci_find_adv_instance(hdev, cp->instance);
8962
8963         if (!adv_instance) {
8964                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8965                                       MGMT_STATUS_INVALID_PARAMS);
8966                 goto unlock;
8967         }
8968
8969         /* In new interface, we require that we are powered to register */
8970         if (!hdev_is_powered(hdev)) {
8971                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8972                                       MGMT_STATUS_REJECTED);
8973                 goto clear_new_instance;
8974         }
8975
8976         if (adv_busy(hdev)) {
8977                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8978                                       MGMT_STATUS_BUSY);
8979                 goto clear_new_instance;
8980         }
8981
8982         /* Validate new data */
8983         if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8984                                cp->adv_data_len, true) ||
8985             !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8986                                cp->adv_data_len, cp->scan_rsp_len, false)) {
8987                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8988                                       MGMT_STATUS_INVALID_PARAMS);
8989                 goto clear_new_instance;
8990         }
8991
8992         /* Set the data in the advertising instance */
8993         hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8994                                   cp->data, cp->scan_rsp_len,
8995                                   cp->data + cp->adv_data_len);
8996
8997         /* If using software rotation, determine next instance to use */
8998         if (hdev->cur_adv_instance == cp->instance) {
8999                 /* If the currently advertised instance is being changed
9000                  * then cancel the current advertising and schedule the
9001                  * next instance. If there is only one instance then the
9002                  * overridden advertising data will be visible right
9003                  * away
9004                  */
9005                 cancel_adv_timeout(hdev);
9006
9007                 next_instance = hci_get_next_instance(hdev, cp->instance);
9008                 if (next_instance)
9009                         schedule_instance = next_instance->instance;
9010         } else if (!hdev->adv_instance_timeout) {
9011                 /* Immediately advertise the new instance if no other
9012                  * instance is currently being advertised.
9013                  */
9014                 schedule_instance = cp->instance;
9015         }
9016
9017         /* If the HCI_ADVERTISING flag is set or there is no instance to
9018          * be advertised then we have no HCI communication to make.
9019          * Simply return.
9020          */
9021         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9022                 if (adv_instance->pending) {
9023                         mgmt_advertising_added(sk, hdev, cp->instance);
9024                         adv_instance->pending = false;
9025                 }
9026                 rp.instance = cp->instance;
9027                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9028                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9029                 goto unlock;
9030         }
9031
9032         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9033                                data_len);
9034         if (!cmd) {
9035                 err = -ENOMEM;
9036                 goto clear_new_instance;
9037         }
9038
9039         err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9040                                  add_ext_adv_data_complete);
9041         if (err < 0) {
9042                 mgmt_pending_free(cmd);
9043                 goto clear_new_instance;
9044         }
9045
9046         /* We were successful in updating data, so trigger advertising_added
9047          * event if this is an instance that wasn't previously advertising. If
9048          * a failure occurs in the requests we initiated, we will remove the
9049          * instance again in add_advertising_complete
9050          */
9051         if (adv_instance->pending)
9052                 mgmt_advertising_added(sk, hdev, cp->instance);
9053
9054         goto unlock;
9055
9056 clear_new_instance:
9057         hci_remove_adv_instance(hdev, cp->instance);
9058
9059 unlock:
9060         hci_dev_unlock(hdev);
9061
9062         return err;
9063 }
9064
9065 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9066                                         int err)
9067 {
9068         struct mgmt_pending_cmd *cmd = data;
9069         struct mgmt_cp_remove_advertising *cp = cmd->param;
9070         struct mgmt_rp_remove_advertising rp;
9071
9072         bt_dev_dbg(hdev, "err %d", err);
9073
9074         memset(&rp, 0, sizeof(rp));
9075         rp.instance = cp->instance;
9076
9077         if (err)
9078                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9079                                 mgmt_status(err));
9080         else
9081                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9082                                   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9083
9084         mgmt_pending_free(cmd);
9085 }
9086
9087 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9088 {
9089         struct mgmt_pending_cmd *cmd = data;
9090         struct mgmt_cp_remove_advertising *cp = cmd->param;
9091         int err;
9092
9093         err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9094         if (err)
9095                 return err;
9096
9097         if (list_empty(&hdev->adv_instances))
9098                 err = hci_disable_advertising_sync(hdev);
9099
9100         return err;
9101 }
9102
9103 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9104                               void *data, u16 data_len)
9105 {
9106         struct mgmt_cp_remove_advertising *cp = data;
9107         struct mgmt_pending_cmd *cmd;
9108         int err;
9109
9110         bt_dev_dbg(hdev, "sock %p", sk);
9111
9112         hci_dev_lock(hdev);
9113
9114         if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9115                 err = mgmt_cmd_status(sk, hdev->id,
9116                                       MGMT_OP_REMOVE_ADVERTISING,
9117                                       MGMT_STATUS_INVALID_PARAMS);
9118                 goto unlock;
9119         }
9120
9121         if (pending_find(MGMT_OP_SET_LE, hdev)) {
9122                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9123                                       MGMT_STATUS_BUSY);
9124                 goto unlock;
9125         }
9126
9127         if (list_empty(&hdev->adv_instances)) {
9128                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9129                                       MGMT_STATUS_INVALID_PARAMS);
9130                 goto unlock;
9131         }
9132
9133         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9134                                data_len);
9135         if (!cmd) {
9136                 err = -ENOMEM;
9137                 goto unlock;
9138         }
9139
9140         err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9141                                  remove_advertising_complete);
9142         if (err < 0)
9143                 mgmt_pending_free(cmd);
9144
9145 unlock:
9146         hci_dev_unlock(hdev);
9147
9148         return err;
9149 }
9150
9151 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9152                              void *data, u16 data_len)
9153 {
9154         struct mgmt_cp_get_adv_size_info *cp = data;
9155         struct mgmt_rp_get_adv_size_info rp;
9156         u32 flags, supported_flags;
9157
9158         bt_dev_dbg(hdev, "sock %p", sk);
9159
9160         if (!lmp_le_capable(hdev))
9161                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9162                                        MGMT_STATUS_REJECTED);
9163
9164         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9165                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9166                                        MGMT_STATUS_INVALID_PARAMS);
9167
9168         flags = __le32_to_cpu(cp->flags);
9169
9170         /* The current implementation only supports a subset of the specified
9171          * flags.
9172          */
9173         supported_flags = get_supported_adv_flags(hdev);
9174         if (flags & ~supported_flags)
9175                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9176                                        MGMT_STATUS_INVALID_PARAMS);
9177
9178         rp.instance = cp->instance;
9179         rp.flags = cp->flags;
9180         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9181         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9182
9183         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9184                                  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9185 }
9186
9187 static const struct hci_mgmt_handler mgmt_handlers[] = {
9188         { NULL }, /* 0x0000 (no command) */
9189         { read_version,            MGMT_READ_VERSION_SIZE,
9190                                                 HCI_MGMT_NO_HDEV |
9191                                                 HCI_MGMT_UNTRUSTED },
9192         { read_commands,           MGMT_READ_COMMANDS_SIZE,
9193                                                 HCI_MGMT_NO_HDEV |
9194                                                 HCI_MGMT_UNTRUSTED },
9195         { read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9196                                                 HCI_MGMT_NO_HDEV |
9197                                                 HCI_MGMT_UNTRUSTED },
9198         { read_controller_info,    MGMT_READ_INFO_SIZE,
9199                                                 HCI_MGMT_UNTRUSTED },
9200         { set_powered,             MGMT_SETTING_SIZE },
9201         { set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9202         { set_connectable,         MGMT_SETTING_SIZE },
9203         { set_fast_connectable,    MGMT_SETTING_SIZE },
9204         { set_bondable,            MGMT_SETTING_SIZE },
9205         { set_link_security,       MGMT_SETTING_SIZE },
9206         { set_ssp,                 MGMT_SETTING_SIZE },
9207         { set_hs,                  MGMT_SETTING_SIZE },
9208         { set_le,                  MGMT_SETTING_SIZE },
9209         { set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9210         { set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9211         { add_uuid,                MGMT_ADD_UUID_SIZE },
9212         { remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9213         { load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9214                                                 HCI_MGMT_VAR_LEN },
9215         { load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9216                                                 HCI_MGMT_VAR_LEN },
9217         { disconnect,              MGMT_DISCONNECT_SIZE },
9218         { get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9219         { pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9220         { pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9221         { set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9222         { pair_device,             MGMT_PAIR_DEVICE_SIZE },
9223         { cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9224         { unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9225         { user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9226         { user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9227         { user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9228         { user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9229         { read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9230         { add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9231                                                 HCI_MGMT_VAR_LEN },
9232         { remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9233         { start_discovery,         MGMT_START_DISCOVERY_SIZE },
9234         { stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9235         { confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9236         { block_device,            MGMT_BLOCK_DEVICE_SIZE },
9237         { unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9238         { set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9239         { set_advertising,         MGMT_SETTING_SIZE },
9240         { set_bredr,               MGMT_SETTING_SIZE },
9241         { set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9242         { set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9243         { set_secure_conn,         MGMT_SETTING_SIZE },
9244         { set_debug_keys,          MGMT_SETTING_SIZE },
9245         { set_privacy,             MGMT_SET_PRIVACY_SIZE },
9246         { load_irks,               MGMT_LOAD_IRKS_SIZE,
9247                                                 HCI_MGMT_VAR_LEN },
9248         { get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9249         { get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9250         { add_device,              MGMT_ADD_DEVICE_SIZE },
9251         { remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9252         { load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9253                                                 HCI_MGMT_VAR_LEN },
9254         { read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9255                                                 HCI_MGMT_NO_HDEV |
9256                                                 HCI_MGMT_UNTRUSTED },
9257         { read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9258                                                 HCI_MGMT_UNCONFIGURED |
9259                                                 HCI_MGMT_UNTRUSTED },
9260         { set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9261                                                 HCI_MGMT_UNCONFIGURED },
9262         { set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9263                                                 HCI_MGMT_UNCONFIGURED },
9264         { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9265                                                 HCI_MGMT_VAR_LEN },
9266         { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9267         { read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9268                                                 HCI_MGMT_NO_HDEV |
9269                                                 HCI_MGMT_UNTRUSTED },
9270         { read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9271         { add_advertising,         MGMT_ADD_ADVERTISING_SIZE,
9272                                                 HCI_MGMT_VAR_LEN },
9273         { remove_advertising,      MGMT_REMOVE_ADVERTISING_SIZE },
9274         { get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9275         { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9276         { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9277                                                 HCI_MGMT_UNTRUSTED },
9278         { set_appearance,          MGMT_SET_APPEARANCE_SIZE },
9279         { get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9280         { set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9281         { set_blocked_keys,        MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9282                                                 HCI_MGMT_VAR_LEN },
9283         { set_wideband_speech,     MGMT_SETTING_SIZE },
9284         { read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9285                                                 HCI_MGMT_UNTRUSTED },
9286         { read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9287                                                 HCI_MGMT_UNTRUSTED |
9288                                                 HCI_MGMT_HDEV_OPTIONAL },
9289         { set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9290                                                 HCI_MGMT_VAR_LEN |
9291                                                 HCI_MGMT_HDEV_OPTIONAL },
9292         { read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9293                                                 HCI_MGMT_UNTRUSTED },
9294         { set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9295                                                 HCI_MGMT_VAR_LEN },
9296         { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9297                                                 HCI_MGMT_UNTRUSTED },
9298         { set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9299                                                 HCI_MGMT_VAR_LEN },
9300         { get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9301         { set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9302         { read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9303         { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9304                                                 HCI_MGMT_VAR_LEN },
9305         { remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9306         { add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9307                                                 HCI_MGMT_VAR_LEN },
9308         { add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9309                                                 HCI_MGMT_VAR_LEN },
9310         { add_adv_patterns_monitor_rssi,
9311                                    MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9312                                                 HCI_MGMT_VAR_LEN },
9313         { set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9314                                                 HCI_MGMT_VAR_LEN },
9315         { mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9316         { mesh_send,               MGMT_MESH_SEND_SIZE,
9317                                                 HCI_MGMT_VAR_LEN },
9318         { mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9319 };
9320
9321 void mgmt_index_added(struct hci_dev *hdev)
9322 {
9323         struct mgmt_ev_ext_index ev;
9324
9325         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9326                 return;
9327
9328         switch (hdev->dev_type) {
9329         case HCI_PRIMARY:
9330                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9331                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9332                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9333                         ev.type = 0x01;
9334                 } else {
9335                         mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9336                                          HCI_MGMT_INDEX_EVENTS);
9337                         ev.type = 0x00;
9338                 }
9339                 break;
9340         case HCI_AMP:
9341                 ev.type = 0x02;
9342                 break;
9343         default:
9344                 return;
9345         }
9346
9347         ev.bus = hdev->bus;
9348
9349         mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9350                          HCI_MGMT_EXT_INDEX_EVENTS);
9351 }
9352
9353 void mgmt_index_removed(struct hci_dev *hdev)
9354 {
9355         struct mgmt_ev_ext_index ev;
9356         u8 status = MGMT_STATUS_INVALID_INDEX;
9357
9358         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9359                 return;
9360
9361         switch (hdev->dev_type) {
9362         case HCI_PRIMARY:
9363                 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9364
9365                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9366                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9367                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9368                         ev.type = 0x01;
9369                 } else {
9370                         mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9371                                          HCI_MGMT_INDEX_EVENTS);
9372                         ev.type = 0x00;
9373                 }
9374                 break;
9375         case HCI_AMP:
9376                 ev.type = 0x02;
9377                 break;
9378         default:
9379                 return;
9380         }
9381
9382         ev.bus = hdev->bus;
9383
9384         mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9385                          HCI_MGMT_EXT_INDEX_EVENTS);
9386
9387         /* Cancel any remaining timed work */
9388         if (!hci_dev_test_flag(hdev, HCI_MGMT))
9389                 return;
9390         cancel_delayed_work_sync(&hdev->discov_off);
9391         cancel_delayed_work_sync(&hdev->service_cache);
9392         cancel_delayed_work_sync(&hdev->rpa_expired);
9393 }
9394
9395 void mgmt_power_on(struct hci_dev *hdev, int err)
9396 {
9397         struct cmd_lookup match = { NULL, hdev };
9398
9399         bt_dev_dbg(hdev, "err %d", err);
9400
9401         hci_dev_lock(hdev);
9402
9403         if (!err) {
9404                 restart_le_actions(hdev);
9405                 hci_update_passive_scan(hdev);
9406         }
9407
9408         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9409
9410         new_settings(hdev, match.sk);
9411
9412         if (match.sk)
9413                 sock_put(match.sk);
9414
9415         hci_dev_unlock(hdev);
9416 }
9417
9418 void __mgmt_power_off(struct hci_dev *hdev)
9419 {
9420         struct cmd_lookup match = { NULL, hdev };
9421         u8 status, zero_cod[] = { 0, 0, 0 };
9422
9423         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9424
9425         /* If the power off is because of hdev unregistration let
9426          * use the appropriate INVALID_INDEX status. Otherwise use
9427          * NOT_POWERED. We cover both scenarios here since later in
9428          * mgmt_index_removed() any hci_conn callbacks will have already
9429          * been triggered, potentially causing misleading DISCONNECTED
9430          * status responses.
9431          */
9432         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9433                 status = MGMT_STATUS_INVALID_INDEX;
9434         else
9435                 status = MGMT_STATUS_NOT_POWERED;
9436
9437         mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9438
9439         if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9440                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9441                                    zero_cod, sizeof(zero_cod),
9442                                    HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9443                 ext_info_changed(hdev, NULL);
9444         }
9445
9446         new_settings(hdev, match.sk);
9447
9448         if (match.sk)
9449                 sock_put(match.sk);
9450 }
9451
9452 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9453 {
9454         struct mgmt_pending_cmd *cmd;
9455         u8 status;
9456
9457         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9458         if (!cmd)
9459                 return;
9460
9461         if (err == -ERFKILL)
9462                 status = MGMT_STATUS_RFKILLED;
9463         else
9464                 status = MGMT_STATUS_FAILED;
9465
9466         mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9467
9468         mgmt_pending_remove(cmd);
9469 }
9470
9471 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9472                        bool persistent)
9473 {
9474         struct mgmt_ev_new_link_key ev;
9475
9476         memset(&ev, 0, sizeof(ev));
9477
9478         ev.store_hint = persistent;
9479         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9480         ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9481         ev.key.type = key->type;
9482         memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9483         ev.key.pin_len = key->pin_len;
9484
9485         mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9486 }
9487
9488 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9489 {
9490         switch (ltk->type) {
9491         case SMP_LTK:
9492         case SMP_LTK_RESPONDER:
9493                 if (ltk->authenticated)
9494                         return MGMT_LTK_AUTHENTICATED;
9495                 return MGMT_LTK_UNAUTHENTICATED;
9496         case SMP_LTK_P256:
9497                 if (ltk->authenticated)
9498                         return MGMT_LTK_P256_AUTH;
9499                 return MGMT_LTK_P256_UNAUTH;
9500         case SMP_LTK_P256_DEBUG:
9501                 return MGMT_LTK_P256_DEBUG;
9502         }
9503
9504         return MGMT_LTK_UNAUTHENTICATED;
9505 }
9506
9507 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9508 {
9509         struct mgmt_ev_new_long_term_key ev;
9510
9511         memset(&ev, 0, sizeof(ev));
9512
9513         /* Devices using resolvable or non-resolvable random addresses
9514          * without providing an identity resolving key don't require
9515          * to store long term keys. Their addresses will change the
9516          * next time around.
9517          *
9518          * Only when a remote device provides an identity address
9519          * make sure the long term key is stored. If the remote
9520          * identity is known, the long term keys are internally
9521          * mapped to the identity address. So allow static random
9522          * and public addresses here.
9523          */
9524         if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9525             (key->bdaddr.b[5] & 0xc0) != 0xc0)
9526                 ev.store_hint = 0x00;
9527         else
9528                 ev.store_hint = persistent;
9529
9530         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9531         ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9532         ev.key.type = mgmt_ltk_type(key);
9533         ev.key.enc_size = key->enc_size;
9534         ev.key.ediv = key->ediv;
9535         ev.key.rand = key->rand;
9536
9537         if (key->type == SMP_LTK)
9538                 ev.key.initiator = 1;
9539
9540         /* Make sure we copy only the significant bytes based on the
9541          * encryption key size, and set the rest of the value to zeroes.
9542          */
9543         memcpy(ev.key.val, key->val, key->enc_size);
9544         memset(ev.key.val + key->enc_size, 0,
9545                sizeof(ev.key.val) - key->enc_size);
9546
9547         mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9548 }
9549
9550 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9551 {
9552         struct mgmt_ev_new_irk ev;
9553
9554         memset(&ev, 0, sizeof(ev));
9555
9556         ev.store_hint = persistent;
9557
9558         bacpy(&ev.rpa, &irk->rpa);
9559         bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9560         ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9561         memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9562
9563         mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9564 }
9565
9566 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9567                    bool persistent)
9568 {
9569         struct mgmt_ev_new_csrk ev;
9570
9571         memset(&ev, 0, sizeof(ev));
9572
9573         /* Devices using resolvable or non-resolvable random addresses
9574          * without providing an identity resolving key don't require
9575          * to store signature resolving keys. Their addresses will change
9576          * the next time around.
9577          *
9578          * Only when a remote device provides an identity address
9579          * make sure the signature resolving key is stored. So allow
9580          * static random and public addresses here.
9581          */
9582         if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9583             (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9584                 ev.store_hint = 0x00;
9585         else
9586                 ev.store_hint = persistent;
9587
9588         bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9589         ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9590         ev.key.type = csrk->type;
9591         memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9592
9593         mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9594 }
9595
9596 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9597                          u8 bdaddr_type, u8 store_hint, u16 min_interval,
9598                          u16 max_interval, u16 latency, u16 timeout)
9599 {
9600         struct mgmt_ev_new_conn_param ev;
9601
9602         if (!hci_is_identity_address(bdaddr, bdaddr_type))
9603                 return;
9604
9605         memset(&ev, 0, sizeof(ev));
9606         bacpy(&ev.addr.bdaddr, bdaddr);
9607         ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9608         ev.store_hint = store_hint;
9609         ev.min_interval = cpu_to_le16(min_interval);
9610         ev.max_interval = cpu_to_le16(max_interval);
9611         ev.latency = cpu_to_le16(latency);
9612         ev.timeout = cpu_to_le16(timeout);
9613
9614         mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9615 }
9616
9617 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9618                            u8 *name, u8 name_len)
9619 {
9620         struct sk_buff *skb;
9621         struct mgmt_ev_device_connected *ev;
9622         u16 eir_len = 0;
9623         u32 flags = 0;
9624
9625         if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9626                 return;
9627
9628         /* allocate buff for LE or BR/EDR adv */
9629         if (conn->le_adv_data_len > 0)
9630                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9631                                      sizeof(*ev) + conn->le_adv_data_len);
9632         else
9633                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9634                                      sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9635                                      eir_precalc_len(sizeof(conn->dev_class)));
9636
9637         ev = skb_put(skb, sizeof(*ev));
9638         bacpy(&ev->addr.bdaddr, &conn->dst);
9639         ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9640
9641         if (conn->out)
9642                 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9643
9644         ev->flags = __cpu_to_le32(flags);
9645
9646         /* We must ensure that the EIR Data fields are ordered and
9647          * unique. Keep it simple for now and avoid the problem by not
9648          * adding any BR/EDR data to the LE adv.
9649          */
9650         if (conn->le_adv_data_len > 0) {
9651                 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9652                 eir_len = conn->le_adv_data_len;
9653         } else {
9654                 if (name)
9655                         eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9656
9657                 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9658                         eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9659                                                     conn->dev_class, sizeof(conn->dev_class));
9660         }
9661
9662         ev->eir_len = cpu_to_le16(eir_len);
9663
9664         mgmt_event_skb(skb, NULL);
9665 }
9666
9667 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9668 {
9669         struct sock **sk = data;
9670
9671         cmd->cmd_complete(cmd, 0);
9672
9673         *sk = cmd->sk;
9674         sock_hold(*sk);
9675
9676         mgmt_pending_remove(cmd);
9677 }
9678
9679 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9680 {
9681         struct hci_dev *hdev = data;
9682         struct mgmt_cp_unpair_device *cp = cmd->param;
9683
9684         device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9685
9686         cmd->cmd_complete(cmd, 0);
9687         mgmt_pending_remove(cmd);
9688 }
9689
9690 bool mgmt_powering_down(struct hci_dev *hdev)
9691 {
9692         struct mgmt_pending_cmd *cmd;
9693         struct mgmt_mode *cp;
9694
9695         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9696         if (!cmd)
9697                 return false;
9698
9699         cp = cmd->param;
9700         if (!cp->val)
9701                 return true;
9702
9703         return false;
9704 }
9705
9706 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9707                               u8 link_type, u8 addr_type, u8 reason,
9708                               bool mgmt_connected)
9709 {
9710         struct mgmt_ev_device_disconnected ev;
9711         struct sock *sk = NULL;
9712
9713         if (!mgmt_connected)
9714                 return;
9715
9716         if (link_type != ACL_LINK && link_type != LE_LINK)
9717                 return;
9718
9719         mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9720
9721         bacpy(&ev.addr.bdaddr, bdaddr);
9722         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9723         ev.reason = reason;
9724
9725         /* Report disconnects due to suspend */
9726         if (hdev->suspended)
9727                 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9728
9729         mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9730
9731         if (sk)
9732                 sock_put(sk);
9733
9734         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9735                              hdev);
9736 }
9737
9738 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9739                             u8 link_type, u8 addr_type, u8 status)
9740 {
9741         u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9742         struct mgmt_cp_disconnect *cp;
9743         struct mgmt_pending_cmd *cmd;
9744
9745         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9746                              hdev);
9747
9748         cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9749         if (!cmd)
9750                 return;
9751
9752         cp = cmd->param;
9753
9754         if (bacmp(bdaddr, &cp->addr.bdaddr))
9755                 return;
9756
9757         if (cp->addr.type != bdaddr_type)
9758                 return;
9759
9760         cmd->cmd_complete(cmd, mgmt_status(status));
9761         mgmt_pending_remove(cmd);
9762 }
9763
9764 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9765                          u8 addr_type, u8 status)
9766 {
9767         struct mgmt_ev_connect_failed ev;
9768
9769         bacpy(&ev.addr.bdaddr, bdaddr);
9770         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9771         ev.status = mgmt_status(status);
9772
9773         mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9774 }
9775
9776 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9777 {
9778         struct mgmt_ev_pin_code_request ev;
9779
9780         bacpy(&ev.addr.bdaddr, bdaddr);
9781         ev.addr.type = BDADDR_BREDR;
9782         ev.secure = secure;
9783
9784         mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9785 }
9786
9787 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9788                                   u8 status)
9789 {
9790         struct mgmt_pending_cmd *cmd;
9791
9792         cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9793         if (!cmd)
9794                 return;
9795
9796         cmd->cmd_complete(cmd, mgmt_status(status));
9797         mgmt_pending_remove(cmd);
9798 }
9799
9800 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9801                                       u8 status)
9802 {
9803         struct mgmt_pending_cmd *cmd;
9804
9805         cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9806         if (!cmd)
9807                 return;
9808
9809         cmd->cmd_complete(cmd, mgmt_status(status));
9810         mgmt_pending_remove(cmd);
9811 }
9812
9813 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9814                               u8 link_type, u8 addr_type, u32 value,
9815                               u8 confirm_hint)
9816 {
9817         struct mgmt_ev_user_confirm_request ev;
9818
9819         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9820
9821         bacpy(&ev.addr.bdaddr, bdaddr);
9822         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9823         ev.confirm_hint = confirm_hint;
9824         ev.value = cpu_to_le32(value);
9825
9826         return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9827                           NULL);
9828 }
9829
9830 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9831                               u8 link_type, u8 addr_type)
9832 {
9833         struct mgmt_ev_user_passkey_request ev;
9834
9835         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9836
9837         bacpy(&ev.addr.bdaddr, bdaddr);
9838         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9839
9840         return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9841                           NULL);
9842 }
9843
9844 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9845                                       u8 link_type, u8 addr_type, u8 status,
9846                                       u8 opcode)
9847 {
9848         struct mgmt_pending_cmd *cmd;
9849
9850         cmd = pending_find(opcode, hdev);
9851         if (!cmd)
9852                 return -ENOENT;
9853
9854         cmd->cmd_complete(cmd, mgmt_status(status));
9855         mgmt_pending_remove(cmd);
9856
9857         return 0;
9858 }
9859
9860 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9861                                      u8 link_type, u8 addr_type, u8 status)
9862 {
9863         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9864                                           status, MGMT_OP_USER_CONFIRM_REPLY);
9865 }
9866
9867 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9868                                          u8 link_type, u8 addr_type, u8 status)
9869 {
9870         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9871                                           status,
9872                                           MGMT_OP_USER_CONFIRM_NEG_REPLY);
9873 }
9874
9875 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9876                                      u8 link_type, u8 addr_type, u8 status)
9877 {
9878         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9879                                           status, MGMT_OP_USER_PASSKEY_REPLY);
9880 }
9881
9882 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9883                                          u8 link_type, u8 addr_type, u8 status)
9884 {
9885         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9886                                           status,
9887                                           MGMT_OP_USER_PASSKEY_NEG_REPLY);
9888 }
9889
9890 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9891                              u8 link_type, u8 addr_type, u32 passkey,
9892                              u8 entered)
9893 {
9894         struct mgmt_ev_passkey_notify ev;
9895
9896         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9897
9898         bacpy(&ev.addr.bdaddr, bdaddr);
9899         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9900         ev.passkey = __cpu_to_le32(passkey);
9901         ev.entered = entered;
9902
9903         return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9904 }
9905
9906 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9907 {
9908         struct mgmt_ev_auth_failed ev;
9909         struct mgmt_pending_cmd *cmd;
9910         u8 status = mgmt_status(hci_status);
9911
9912         bacpy(&ev.addr.bdaddr, &conn->dst);
9913         ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9914         ev.status = status;
9915
9916         cmd = find_pairing(conn);
9917
9918         mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9919                     cmd ? cmd->sk : NULL);
9920
9921         if (cmd) {
9922                 cmd->cmd_complete(cmd, status);
9923                 mgmt_pending_remove(cmd);
9924         }
9925 }
9926
9927 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9928 {
9929         struct cmd_lookup match = { NULL, hdev };
9930         bool changed;
9931
9932         if (status) {
9933                 u8 mgmt_err = mgmt_status(status);
9934                 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9935                                      cmd_status_rsp, &mgmt_err);
9936                 return;
9937         }
9938
9939         if (test_bit(HCI_AUTH, &hdev->flags))
9940                 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9941         else
9942                 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9943
9944         mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9945                              &match);
9946
9947         if (changed)
9948                 new_settings(hdev, match.sk);
9949
9950         if (match.sk)
9951                 sock_put(match.sk);
9952 }
9953
9954 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9955 {
9956         struct cmd_lookup *match = data;
9957
9958         if (match->sk == NULL) {
9959                 match->sk = cmd->sk;
9960                 sock_hold(match->sk);
9961         }
9962 }
9963
9964 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9965                                     u8 status)
9966 {
9967         struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9968
9969         mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9970         mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9971         mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9972
9973         if (!status) {
9974                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9975                                    3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9976                 ext_info_changed(hdev, NULL);
9977         }
9978
9979         if (match.sk)
9980                 sock_put(match.sk);
9981 }
9982
9983 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9984 {
9985         struct mgmt_cp_set_local_name ev;
9986         struct mgmt_pending_cmd *cmd;
9987
9988         if (status)
9989                 return;
9990
9991         memset(&ev, 0, sizeof(ev));
9992         memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9993         memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9994
9995         cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9996         if (!cmd) {
9997                 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9998
9999                 /* If this is a HCI command related to powering on the
10000                  * HCI dev don't send any mgmt signals.
10001                  */
10002                 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10003                         return;
10004         }
10005
10006         mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10007                            HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10008         ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10009 }
10010
10011 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10012 {
10013         int i;
10014
10015         for (i = 0; i < uuid_count; i++) {
10016                 if (!memcmp(uuid, uuids[i], 16))
10017                         return true;
10018         }
10019
10020         return false;
10021 }
10022
10023 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10024 {
10025         u16 parsed = 0;
10026
10027         while (parsed < eir_len) {
10028                 u8 field_len = eir[0];
10029                 u8 uuid[16];
10030                 int i;
10031
10032                 if (field_len == 0)
10033                         break;
10034
10035                 if (eir_len - parsed < field_len + 1)
10036                         break;
10037
10038                 switch (eir[1]) {
10039                 case EIR_UUID16_ALL:
10040                 case EIR_UUID16_SOME:
10041                         for (i = 0; i + 3 <= field_len; i += 2) {
10042                                 memcpy(uuid, bluetooth_base_uuid, 16);
10043                                 uuid[13] = eir[i + 3];
10044                                 uuid[12] = eir[i + 2];
10045                                 if (has_uuid(uuid, uuid_count, uuids))
10046                                         return true;
10047                         }
10048                         break;
10049                 case EIR_UUID32_ALL:
10050                 case EIR_UUID32_SOME:
10051                         for (i = 0; i + 5 <= field_len; i += 4) {
10052                                 memcpy(uuid, bluetooth_base_uuid, 16);
10053                                 uuid[15] = eir[i + 5];
10054                                 uuid[14] = eir[i + 4];
10055                                 uuid[13] = eir[i + 3];
10056                                 uuid[12] = eir[i + 2];
10057                                 if (has_uuid(uuid, uuid_count, uuids))
10058                                         return true;
10059                         }
10060                         break;
10061                 case EIR_UUID128_ALL:
10062                 case EIR_UUID128_SOME:
10063                         for (i = 0; i + 17 <= field_len; i += 16) {
10064                                 memcpy(uuid, eir + i + 2, 16);
10065                                 if (has_uuid(uuid, uuid_count, uuids))
10066                                         return true;
10067                         }
10068                         break;
10069                 }
10070
10071                 parsed += field_len + 1;
10072                 eir += field_len + 1;
10073         }
10074
10075         return false;
10076 }
10077
10078 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10079                             u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10080 {
10081         /* If a RSSI threshold has been specified, and
10082          * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10083          * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10084          * is set, let it through for further processing, as we might need to
10085          * restart the scan.
10086          *
10087          * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10088          * the results are also dropped.
10089          */
10090         if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10091             (rssi == HCI_RSSI_INVALID ||
10092             (rssi < hdev->discovery.rssi &&
10093              !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10094                 return  false;
10095
10096         if (hdev->discovery.uuid_count != 0) {
10097                 /* If a list of UUIDs is provided in filter, results with no
10098                  * matching UUID should be dropped.
10099                  */
10100                 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10101                                    hdev->discovery.uuids) &&
10102                     !eir_has_uuids(scan_rsp, scan_rsp_len,
10103                                    hdev->discovery.uuid_count,
10104                                    hdev->discovery.uuids))
10105                         return false;
10106         }
10107
10108         /* If duplicate filtering does not report RSSI changes, then restart
10109          * scanning to ensure updated result with updated RSSI values.
10110          */
10111         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10112                 /* Validate RSSI value against the RSSI threshold once more. */
10113                 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10114                     rssi < hdev->discovery.rssi)
10115                         return false;
10116         }
10117
10118         return true;
10119 }
10120
10121 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10122                                   bdaddr_t *bdaddr, u8 addr_type)
10123 {
10124         struct mgmt_ev_adv_monitor_device_lost ev;
10125
10126         ev.monitor_handle = cpu_to_le16(handle);
10127         bacpy(&ev.addr.bdaddr, bdaddr);
10128         ev.addr.type = addr_type;
10129
10130         mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10131                    NULL);
10132 }
10133
10134 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10135                                                struct sk_buff *skb,
10136                                                struct sock *skip_sk,
10137                                                u16 handle)
10138 {
10139         struct sk_buff *advmon_skb;
10140         size_t advmon_skb_len;
10141         __le16 *monitor_handle;
10142
10143         if (!skb)
10144                 return;
10145
10146         advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10147                           sizeof(struct mgmt_ev_device_found)) + skb->len;
10148         advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10149                                     advmon_skb_len);
10150         if (!advmon_skb)
10151                 return;
10152
10153         /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10154          * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10155          * store monitor_handle of the matched monitor.
10156          */
10157         monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10158         *monitor_handle = cpu_to_le16(handle);
10159         skb_put_data(advmon_skb, skb->data, skb->len);
10160
10161         mgmt_event_skb(advmon_skb, skip_sk);
10162 }
10163
10164 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10165                                           bdaddr_t *bdaddr, bool report_device,
10166                                           struct sk_buff *skb,
10167                                           struct sock *skip_sk)
10168 {
10169         struct monitored_device *dev, *tmp;
10170         bool matched = false;
10171         bool notified = false;
10172
10173         /* We have received the Advertisement Report because:
10174          * 1. the kernel has initiated active discovery
10175          * 2. if not, we have pend_le_reports > 0 in which case we are doing
10176          *    passive scanning
10177          * 3. if none of the above is true, we have one or more active
10178          *    Advertisement Monitor
10179          *
10180          * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10181          * and report ONLY one advertisement per device for the matched Monitor
10182          * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10183          *
10184          * For case 3, since we are not active scanning and all advertisements
10185          * received are due to a matched Advertisement Monitor, report all
10186          * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10187          */
10188         if (report_device && !hdev->advmon_pend_notify) {
10189                 mgmt_event_skb(skb, skip_sk);
10190                 return;
10191         }
10192
10193         hdev->advmon_pend_notify = false;
10194
10195         list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10196                 if (!bacmp(&dev->bdaddr, bdaddr)) {
10197                         matched = true;
10198
10199                         if (!dev->notified) {
10200                                 mgmt_send_adv_monitor_device_found(hdev, skb,
10201                                                                    skip_sk,
10202                                                                    dev->handle);
10203                                 notified = true;
10204                                 dev->notified = true;
10205                         }
10206                 }
10207
10208                 if (!dev->notified)
10209                         hdev->advmon_pend_notify = true;
10210         }
10211
10212         if (!report_device &&
10213             ((matched && !notified) || !msft_monitor_supported(hdev))) {
10214                 /* Handle 0 indicates that we are not active scanning and this
10215                  * is a subsequent advertisement report for an already matched
10216                  * Advertisement Monitor or the controller offloading support
10217                  * is not available.
10218                  */
10219                 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10220         }
10221
10222         if (report_device)
10223                 mgmt_event_skb(skb, skip_sk);
10224         else
10225                 kfree_skb(skb);
10226 }
10227
10228 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10229                               u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10230                               u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10231                               u64 instant)
10232 {
10233         struct sk_buff *skb;
10234         struct mgmt_ev_mesh_device_found *ev;
10235         int i, j;
10236
10237         if (!hdev->mesh_ad_types[0])
10238                 goto accepted;
10239
10240         /* Scan for requested AD types */
10241         if (eir_len > 0) {
10242                 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10243                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10244                                 if (!hdev->mesh_ad_types[j])
10245                                         break;
10246
10247                                 if (hdev->mesh_ad_types[j] == eir[i + 1])
10248                                         goto accepted;
10249                         }
10250                 }
10251         }
10252
10253         if (scan_rsp_len > 0) {
10254                 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10255                         for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10256                                 if (!hdev->mesh_ad_types[j])
10257                                         break;
10258
10259                                 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10260                                         goto accepted;
10261                         }
10262                 }
10263         }
10264
10265         return;
10266
10267 accepted:
10268         skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10269                              sizeof(*ev) + eir_len + scan_rsp_len);
10270         if (!skb)
10271                 return;
10272
10273         ev = skb_put(skb, sizeof(*ev));
10274
10275         bacpy(&ev->addr.bdaddr, bdaddr);
10276         ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10277         ev->rssi = rssi;
10278         ev->flags = cpu_to_le32(flags);
10279         ev->instant = cpu_to_le64(instant);
10280
10281         if (eir_len > 0)
10282                 /* Copy EIR or advertising data into event */
10283                 skb_put_data(skb, eir, eir_len);
10284
10285         if (scan_rsp_len > 0)
10286                 /* Append scan response data to event */
10287                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10288
10289         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10290
10291         mgmt_event_skb(skb, NULL);
10292 }
10293
10294 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10295                        u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10296                        u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10297                        u64 instant)
10298 {
10299         struct sk_buff *skb;
10300         struct mgmt_ev_device_found *ev;
10301         bool report_device = hci_discovery_active(hdev);
10302
10303         if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10304                 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10305                                   eir, eir_len, scan_rsp, scan_rsp_len,
10306                                   instant);
10307
10308         /* Don't send events for a non-kernel initiated discovery. With
10309          * LE one exception is if we have pend_le_reports > 0 in which
10310          * case we're doing passive scanning and want these events.
10311          */
10312         if (!hci_discovery_active(hdev)) {
10313                 if (link_type == ACL_LINK)
10314                         return;
10315                 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10316                         report_device = true;
10317                 else if (!hci_is_adv_monitoring(hdev))
10318                         return;
10319         }
10320
10321         if (hdev->discovery.result_filtering) {
10322                 /* We are using service discovery */
10323                 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10324                                      scan_rsp_len))
10325                         return;
10326         }
10327
10328         if (hdev->discovery.limited) {
10329                 /* Check for limited discoverable bit */
10330                 if (dev_class) {
10331                         if (!(dev_class[1] & 0x20))
10332                                 return;
10333                 } else {
10334                         u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10335                         if (!flags || !(flags[0] & LE_AD_LIMITED))
10336                                 return;
10337                 }
10338         }
10339
10340         /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10341         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10342                              sizeof(*ev) + eir_len + scan_rsp_len + 5);
10343         if (!skb)
10344                 return;
10345
10346         ev = skb_put(skb, sizeof(*ev));
10347
10348         /* In case of device discovery with BR/EDR devices (pre 1.2), the
10349          * RSSI value was reported as 0 when not available. This behavior
10350          * is kept when using device discovery. This is required for full
10351          * backwards compatibility with the API.
10352          *
10353          * However when using service discovery, the value 127 will be
10354          * returned when the RSSI is not available.
10355          */
10356         if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10357             link_type == ACL_LINK)
10358                 rssi = 0;
10359
10360         bacpy(&ev->addr.bdaddr, bdaddr);
10361         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10362         ev->rssi = rssi;
10363         ev->flags = cpu_to_le32(flags);
10364
10365         if (eir_len > 0)
10366                 /* Copy EIR or advertising data into event */
10367                 skb_put_data(skb, eir, eir_len);
10368
10369         if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10370                 u8 eir_cod[5];
10371
10372                 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10373                                            dev_class, 3);
10374                 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10375         }
10376
10377         if (scan_rsp_len > 0)
10378                 /* Append scan response data to event */
10379                 skb_put_data(skb, scan_rsp, scan_rsp_len);
10380
10381         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10382
10383         mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10384 }
10385
10386 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10387                       u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10388 {
10389         struct sk_buff *skb;
10390         struct mgmt_ev_device_found *ev;
10391         u16 eir_len = 0;
10392         u32 flags = 0;
10393
10394         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10395                              sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10396
10397         ev = skb_put(skb, sizeof(*ev));
10398         bacpy(&ev->addr.bdaddr, bdaddr);
10399         ev->addr.type = link_to_bdaddr(link_type, addr_type);
10400         ev->rssi = rssi;
10401
10402         if (name)
10403                 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10404         else
10405                 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10406
10407         ev->eir_len = cpu_to_le16(eir_len);
10408         ev->flags = cpu_to_le32(flags);
10409
10410         mgmt_event_skb(skb, NULL);
10411 }
10412
10413 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10414 {
10415         struct mgmt_ev_discovering ev;
10416
10417         bt_dev_dbg(hdev, "discovering %u", discovering);
10418
10419         memset(&ev, 0, sizeof(ev));
10420         ev.type = hdev->discovery.type;
10421         ev.discovering = discovering;
10422
10423         mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10424 }
10425
10426 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10427 {
10428         struct mgmt_ev_controller_suspend ev;
10429
10430         ev.suspend_state = state;
10431         mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10432 }
10433
10434 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10435                    u8 addr_type)
10436 {
10437         struct mgmt_ev_controller_resume ev;
10438
10439         ev.wake_reason = reason;
10440         if (bdaddr) {
10441                 bacpy(&ev.addr.bdaddr, bdaddr);
10442                 ev.addr.type = addr_type;
10443         } else {
10444                 memset(&ev.addr, 0, sizeof(ev.addr));
10445         }
10446
10447         mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10448 }
10449
10450 static struct hci_mgmt_chan chan = {
10451         .channel        = HCI_CHANNEL_CONTROL,
10452         .handler_count  = ARRAY_SIZE(mgmt_handlers),
10453         .handlers       = mgmt_handlers,
10454         .hdev_init      = mgmt_init_hdev,
10455 };
10456
10457 int mgmt_init(void)
10458 {
10459         return hci_mgmt_chan_register(&chan);
10460 }
10461
10462 void mgmt_exit(void)
10463 {
10464         hci_mgmt_chan_unregister(&chan);
10465 }
10466
10467 void mgmt_cleanup(struct sock *sk)
10468 {
10469         struct mgmt_mesh_tx *mesh_tx;
10470         struct hci_dev *hdev;
10471
10472         read_lock(&hci_dev_list_lock);
10473
10474         list_for_each_entry(hdev, &hci_dev_list, list) {
10475                 do {
10476                         mesh_tx = mgmt_mesh_next(hdev, sk);
10477
10478                         if (mesh_tx)
10479                                 mesh_send_complete(hdev, mesh_tx, true);
10480                 } while (mesh_tx);
10481         }
10482
10483         read_unlock(&hci_dev_list_lock);
10484 }