2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
135 MGMT_OP_MESH_SEND_CANCEL,
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
154 MGMT_EV_DEVICE_FOUND,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
198 static const u16 mgmt_untrusted_events[] = {
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
286 static u8 mgmt_errno_status(int err)
290 return MGMT_STATUS_SUCCESS;
292 return MGMT_STATUS_REJECTED;
294 return MGMT_STATUS_INVALID_PARAMS;
296 return MGMT_STATUS_NOT_SUPPORTED;
298 return MGMT_STATUS_BUSY;
300 return MGMT_STATUS_AUTH_FAILED;
302 return MGMT_STATUS_NO_RESOURCES;
304 return MGMT_STATUS_ALREADY_CONNECTED;
306 return MGMT_STATUS_DISCONNECTED;
309 return MGMT_STATUS_FAILED;
312 static u8 mgmt_status(int err)
315 return mgmt_errno_status(err);
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
320 return MGMT_STATUS_FAILED;
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
350 static u8 le_addr_type(u8 mgmt_addr_type)
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
355 return ADDR_LE_DEV_RANDOM;
358 void mgmt_fill_version_info(void *ver)
360 struct mgmt_rp_read_version *rp = ver;
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
369 struct mgmt_rp_read_version rp;
371 bt_dev_dbg(hdev, "sock %p", sk);
373 mgmt_fill_version_info(&rp);
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
387 bt_dev_dbg(hdev, "sock %p", sk);
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
399 rp = kmalloc(rp_size, GFP_KERNEL);
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
415 __le16 *opcode = rp->opcodes;
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
434 struct mgmt_rp_read_index_list *rp;
440 bt_dev_dbg(hdev, "sock %p", sk);
442 read_lock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
454 read_unlock(&hci_dev_list_lock);
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
481 read_unlock(&hci_dev_list_lock);
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
494 struct mgmt_rp_read_unconf_index_list *rp;
500 bt_dev_dbg(hdev, "sock %p", sk);
502 read_lock(&hci_dev_list_lock);
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
514 read_unlock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
541 read_unlock(&hci_dev_list_lock);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
554 struct mgmt_rp_read_ext_index_list *rp;
559 bt_dev_dbg(hdev, "sock %p", sk);
561 read_lock(&hci_dev_list_lock);
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
571 read_unlock(&hci_dev_list_lock);
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
604 rp->num_controllers = cpu_to_le16(count);
606 read_unlock(&hci_dev_list_lock);
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
625 static bool is_configured(struct hci_dev *hdev)
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
639 static __le32 get_missing_options(struct hci_dev *hdev)
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
652 return cpu_to_le32(options);
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
657 __le32 options = get_missing_options(hdev);
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
665 __le32 options = get_missing_options(hdev);
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
674 struct mgmt_rp_read_config_info rp;
677 bt_dev_dbg(hdev, "sock %p", sk);
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
693 hci_dev_unlock(hdev);
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
699 static u32 get_supported_phys(struct hci_dev *hdev)
701 u32 supported_phys = 0;
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
748 return supported_phys;
751 static u32 get_selected_phys(struct hci_dev *hdev)
753 u32 selected_phys = 0;
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
811 return selected_phys;
814 static u32 get_configurable_phys(struct hci_dev *hdev)
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
820 static u32 get_supported_settings(struct hci_dev *hdev)
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
840 if (lmp_sc_capable(hdev))
841 settings |= MGMT_SETTING_SECURE_CONN;
843 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
845 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 if (lmp_le_capable(hdev)) {
849 settings |= MGMT_SETTING_LE;
850 settings |= MGMT_SETTING_SECURE_CONN;
851 settings |= MGMT_SETTING_PRIVACY;
852 settings |= MGMT_SETTING_STATIC_ADDRESS;
853 settings |= MGMT_SETTING_ADVERTISING;
856 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
858 settings |= MGMT_SETTING_CONFIGURATION;
860 if (cis_central_capable(hdev))
861 settings |= MGMT_SETTING_CIS_CENTRAL;
863 if (cis_peripheral_capable(hdev))
864 settings |= MGMT_SETTING_CIS_PERIPHERAL;
866 settings |= MGMT_SETTING_PHY_CONFIGURATION;
871 static u32 get_current_settings(struct hci_dev *hdev)
875 if (hdev_is_powered(hdev))
876 settings |= MGMT_SETTING_POWERED;
878 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
879 settings |= MGMT_SETTING_CONNECTABLE;
881 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
882 settings |= MGMT_SETTING_FAST_CONNECTABLE;
884 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 settings |= MGMT_SETTING_DISCOVERABLE;
887 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
888 settings |= MGMT_SETTING_BONDABLE;
890 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
891 settings |= MGMT_SETTING_BREDR;
893 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
894 settings |= MGMT_SETTING_LE;
896 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
897 settings |= MGMT_SETTING_LINK_SECURITY;
899 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
900 settings |= MGMT_SETTING_SSP;
902 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
903 settings |= MGMT_SETTING_ADVERTISING;
905 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
906 settings |= MGMT_SETTING_SECURE_CONN;
908 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
909 settings |= MGMT_SETTING_DEBUG_KEYS;
911 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
912 settings |= MGMT_SETTING_PRIVACY;
914 /* The current setting for static address has two purposes. The
915 * first is to indicate if the static address will be used and
916 * the second is to indicate if it is actually set.
918 * This means if the static address is not configured, this flag
919 * will never be set. If the address is configured, then if the
920 * address is actually used decides if the flag is set or not.
922 * For single mode LE only controllers and dual-mode controllers
923 * with BR/EDR disabled, the existence of the static address will
926 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
927 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
928 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
929 if (bacmp(&hdev->static_addr, BDADDR_ANY))
930 settings |= MGMT_SETTING_STATIC_ADDRESS;
933 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
934 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
936 if (cis_central_capable(hdev))
937 settings |= MGMT_SETTING_CIS_CENTRAL;
939 if (cis_peripheral_capable(hdev))
940 settings |= MGMT_SETTING_CIS_PERIPHERAL;
942 if (bis_capable(hdev))
943 settings |= MGMT_SETTING_ISO_BROADCASTER;
945 if (sync_recv_capable(hdev))
946 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
951 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
953 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
956 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
958 struct mgmt_pending_cmd *cmd;
960 /* If there's a pending mgmt command the flags will not yet have
961 * their final values, so check for this first.
963 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
965 struct mgmt_mode *cp = cmd->param;
967 return LE_AD_GENERAL;
968 else if (cp->val == 0x02)
969 return LE_AD_LIMITED;
971 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
972 return LE_AD_LIMITED;
973 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
974 return LE_AD_GENERAL;
980 bool mgmt_get_connectable(struct hci_dev *hdev)
982 struct mgmt_pending_cmd *cmd;
984 /* If there's a pending mgmt command the flag will not yet have
985 * it's final value, so check for this first.
987 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
989 struct mgmt_mode *cp = cmd->param;
994 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
997 static int service_cache_sync(struct hci_dev *hdev, void *data)
999 hci_update_eir_sync(hdev);
1000 hci_update_class_sync(hdev);
1005 static void service_cache_off(struct work_struct *work)
1007 struct hci_dev *hdev = container_of(work, struct hci_dev,
1008 service_cache.work);
1010 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1013 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1016 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1018 /* The generation of a new RPA and programming it into the
1019 * controller happens in the hci_req_enable_advertising()
1022 if (ext_adv_capable(hdev))
1023 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1025 return hci_enable_advertising_sync(hdev);
1028 static void rpa_expired(struct work_struct *work)
1030 struct hci_dev *hdev = container_of(work, struct hci_dev,
1033 bt_dev_dbg(hdev, "");
1035 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1037 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1040 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1043 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1045 static void discov_off(struct work_struct *work)
1047 struct hci_dev *hdev = container_of(work, struct hci_dev,
1050 bt_dev_dbg(hdev, "");
1054 /* When discoverable timeout triggers, then just make sure
1055 * the limited discoverable flag is cleared. Even in the case
1056 * of a timeout triggered from general discoverable, it is
1057 * safe to unconditionally clear the flag.
1059 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061 hdev->discov_timeout = 0;
1063 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1065 mgmt_new_settings(hdev);
1067 hci_dev_unlock(hdev);
1070 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1072 static void mesh_send_complete(struct hci_dev *hdev,
1073 struct mgmt_mesh_tx *mesh_tx, bool silent)
1075 u8 handle = mesh_tx->handle;
1078 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1079 sizeof(handle), NULL);
1081 mgmt_mesh_remove(mesh_tx);
1084 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1086 struct mgmt_mesh_tx *mesh_tx;
1088 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089 hci_disable_advertising_sync(hdev);
1090 mesh_tx = mgmt_mesh_next(hdev, NULL);
1093 mesh_send_complete(hdev, mesh_tx, false);
1098 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1100 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1102 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1107 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1108 mesh_send_start_complete);
1111 mesh_send_complete(hdev, mesh_tx, false);
1113 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1116 static void mesh_send_done(struct work_struct *work)
1118 struct hci_dev *hdev = container_of(work, struct hci_dev,
1119 mesh_send_done.work);
1121 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1124 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1127 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1129 if (hci_dev_test_flag(hdev, HCI_MGMT))
1132 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1134 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1139 /* Non-mgmt controlled devices get this bit set
1140 * implicitly so that pairing works for them, however
1141 * for mgmt we require user-space to explicitly enable
1144 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1146 hci_dev_set_flag(hdev, HCI_MGMT);
1149 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150 void *data, u16 data_len)
1152 struct mgmt_rp_read_info rp;
1154 bt_dev_dbg(hdev, "sock %p", sk);
1158 memset(&rp, 0, sizeof(rp));
1160 bacpy(&rp.bdaddr, &hdev->bdaddr);
1162 rp.version = hdev->hci_ver;
1163 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1165 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1168 memcpy(rp.dev_class, hdev->dev_class, 3);
1170 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1173 hci_dev_unlock(hdev);
1175 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1179 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1184 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186 hdev->dev_class, 3);
1188 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1192 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1193 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194 hdev->dev_name, name_len);
1196 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1197 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198 hdev->short_name, name_len);
1203 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204 void *data, u16 data_len)
1207 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1210 bt_dev_dbg(hdev, "sock %p", sk);
1212 memset(&buf, 0, sizeof(buf));
1216 bacpy(&rp->bdaddr, &hdev->bdaddr);
1218 rp->version = hdev->hci_ver;
1219 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1221 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1225 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1226 rp->eir_len = cpu_to_le16(eir_len);
1228 hci_dev_unlock(hdev);
1230 /* If this command is called at least once, then the events
1231 * for class of device and local name changes are disabled
1232 * and only the new extended controller information event
1235 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1236 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1237 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1240 sizeof(*rp) + eir_len);
1243 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1246 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1249 memset(buf, 0, sizeof(buf));
1251 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1252 ev->eir_len = cpu_to_le16(eir_len);
1254 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1255 sizeof(*ev) + eir_len,
1256 HCI_MGMT_EXT_INFO_EVENTS, skip);
1259 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1261 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1263 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1267 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1269 struct mgmt_ev_advertising_added ev;
1271 ev.instance = instance;
1273 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1276 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1279 struct mgmt_ev_advertising_removed ev;
1281 ev.instance = instance;
1283 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1286 static void cancel_adv_timeout(struct hci_dev *hdev)
1288 if (hdev->adv_instance_timeout) {
1289 hdev->adv_instance_timeout = 0;
1290 cancel_delayed_work(&hdev->adv_instance_expire);
1294 /* This function requires the caller holds hdev->lock */
1295 static void restart_le_actions(struct hci_dev *hdev)
1297 struct hci_conn_params *p;
1299 list_for_each_entry(p, &hdev->le_conn_params, list) {
1300 /* Needed for AUTO_OFF case where might not "really"
1301 * have been powered off.
1303 hci_pend_le_list_del_init(p);
1305 switch (p->auto_connect) {
1306 case HCI_AUTO_CONN_DIRECT:
1307 case HCI_AUTO_CONN_ALWAYS:
1308 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1310 case HCI_AUTO_CONN_REPORT:
1311 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1319 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1321 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1323 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1324 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1327 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1329 struct mgmt_pending_cmd *cmd = data;
1330 struct mgmt_mode *cp;
1332 /* Make sure cmd still outstanding. */
1333 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1338 bt_dev_dbg(hdev, "err %d", err);
1343 restart_le_actions(hdev);
1344 hci_update_passive_scan(hdev);
1345 hci_dev_unlock(hdev);
1348 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1350 /* Only call new_setting for power on as power off is deferred
1351 * to hdev->power_off work which does call hci_dev_do_close.
1354 new_settings(hdev, cmd->sk);
1356 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1360 mgmt_pending_remove(cmd);
1363 static int set_powered_sync(struct hci_dev *hdev, void *data)
1365 struct mgmt_pending_cmd *cmd = data;
1366 struct mgmt_mode *cp = cmd->param;
1368 BT_DBG("%s", hdev->name);
1370 return hci_set_powered_sync(hdev, cp->val);
1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1376 struct mgmt_mode *cp = data;
1377 struct mgmt_pending_cmd *cmd;
1380 bt_dev_dbg(hdev, "sock %p", sk);
1382 if (cp->val != 0x00 && cp->val != 0x01)
1383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 MGMT_STATUS_INVALID_PARAMS);
1388 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1389 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1394 if (!!cp->val == hdev_is_powered(hdev)) {
1395 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1405 /* Cancel potentially blocking sync operation before power off */
1406 if (cp->val == 0x00) {
1407 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1408 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1409 mgmt_set_powered_complete);
1411 /* Use hci_cmd_sync_submit since hdev might not be running */
1412 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1413 mgmt_set_powered_complete);
1417 mgmt_pending_remove(cmd);
1420 hci_dev_unlock(hdev);
1424 int mgmt_new_settings(struct hci_dev *hdev)
1426 return new_settings(hdev, NULL);
1431 struct hci_dev *hdev;
1435 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1437 struct cmd_lookup *match = data;
1439 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1441 list_del(&cmd->list);
1443 if (match->sk == NULL) {
1444 match->sk = cmd->sk;
1445 sock_hold(match->sk);
1448 mgmt_pending_free(cmd);
1451 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1456 mgmt_pending_remove(cmd);
1459 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 if (cmd->cmd_complete) {
1464 cmd->cmd_complete(cmd, *status);
1465 mgmt_pending_remove(cmd);
1470 cmd_status_rsp(cmd, data);
1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1475 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476 cmd->param, cmd->param_len);
1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1481 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482 cmd->param, sizeof(struct mgmt_addr_info));
1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1487 if (!lmp_bredr_capable(hdev))
1488 return MGMT_STATUS_NOT_SUPPORTED;
1489 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490 return MGMT_STATUS_REJECTED;
1492 return MGMT_STATUS_SUCCESS;
1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1497 if (!lmp_le_capable(hdev))
1498 return MGMT_STATUS_NOT_SUPPORTED;
1499 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 return MGMT_STATUS_REJECTED;
1502 return MGMT_STATUS_SUCCESS;
1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1508 struct mgmt_pending_cmd *cmd = data;
1510 bt_dev_dbg(hdev, "err %d", err);
1512 /* Make sure cmd still outstanding. */
1513 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1519 u8 mgmt_err = mgmt_status(err);
1520 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1521 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1525 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1526 hdev->discov_timeout > 0) {
1527 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1528 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1531 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1532 new_settings(hdev, cmd->sk);
1535 mgmt_pending_remove(cmd);
1536 hci_dev_unlock(hdev);
1539 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1541 BT_DBG("%s", hdev->name);
1543 return hci_update_discoverable_sync(hdev);
1546 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1549 struct mgmt_cp_set_discoverable *cp = data;
1550 struct mgmt_pending_cmd *cmd;
1554 bt_dev_dbg(hdev, "sock %p", sk);
1556 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1557 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1559 MGMT_STATUS_REJECTED);
1561 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1562 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1563 MGMT_STATUS_INVALID_PARAMS);
1565 timeout = __le16_to_cpu(cp->timeout);
1567 /* Disabling discoverable requires that no timeout is set,
1568 * and enabling limited discoverable requires a timeout.
1570 if ((cp->val == 0x00 && timeout > 0) ||
1571 (cp->val == 0x02 && timeout == 0))
1572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 MGMT_STATUS_INVALID_PARAMS);
1577 if (!hdev_is_powered(hdev) && timeout > 0) {
1578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 MGMT_STATUS_NOT_POWERED);
1583 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1584 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1590 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1592 MGMT_STATUS_REJECTED);
1596 if (hdev->advertising_paused) {
1597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1602 if (!hdev_is_powered(hdev)) {
1603 bool changed = false;
1605 /* Setting limited discoverable when powered off is
1606 * not a valid operation since it requires a timeout
1607 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1609 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1610 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1614 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1619 err = new_settings(hdev, sk);
1624 /* If the current mode is the same, then just update the timeout
1625 * value with the new value. And if only the timeout gets updated,
1626 * then no need for any HCI transactions.
1628 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1629 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1630 HCI_LIMITED_DISCOVERABLE)) {
1631 cancel_delayed_work(&hdev->discov_off);
1632 hdev->discov_timeout = timeout;
1634 if (cp->val && hdev->discov_timeout > 0) {
1635 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1636 queue_delayed_work(hdev->req_workqueue,
1637 &hdev->discov_off, to);
1640 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1644 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1650 /* Cancel any potential discoverable timeout that might be
1651 * still active and store new timeout value. The arming of
1652 * the timeout happens in the complete handler.
1654 cancel_delayed_work(&hdev->discov_off);
1655 hdev->discov_timeout = timeout;
1658 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1660 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1662 /* Limited discoverable mode */
1663 if (cp->val == 0x02)
1664 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1666 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1668 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1669 mgmt_set_discoverable_complete);
1672 mgmt_pending_remove(cmd);
1675 hci_dev_unlock(hdev);
1679 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1682 struct mgmt_pending_cmd *cmd = data;
1684 bt_dev_dbg(hdev, "err %d", err);
1686 /* Make sure cmd still outstanding. */
1687 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1693 u8 mgmt_err = mgmt_status(err);
1694 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1698 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1699 new_settings(hdev, cmd->sk);
1703 mgmt_pending_remove(cmd);
1705 hci_dev_unlock(hdev);
1708 static int set_connectable_update_settings(struct hci_dev *hdev,
1709 struct sock *sk, u8 val)
1711 bool changed = false;
1714 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1718 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1720 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1721 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1724 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1729 hci_update_scan(hdev);
1730 hci_update_passive_scan(hdev);
1731 return new_settings(hdev, sk);
1737 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1739 BT_DBG("%s", hdev->name);
1741 return hci_update_connectable_sync(hdev);
1744 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1747 struct mgmt_mode *cp = data;
1748 struct mgmt_pending_cmd *cmd;
1751 bt_dev_dbg(hdev, "sock %p", sk);
1753 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1754 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1756 MGMT_STATUS_REJECTED);
1758 if (cp->val != 0x00 && cp->val != 0x01)
1759 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 MGMT_STATUS_INVALID_PARAMS);
1764 if (!hdev_is_powered(hdev)) {
1765 err = set_connectable_update_settings(hdev, sk, cp->val);
1769 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1770 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1771 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1776 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1783 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1785 if (hdev->discov_timeout > 0)
1786 cancel_delayed_work(&hdev->discov_off);
1788 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1789 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1790 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1793 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1794 mgmt_set_connectable_complete);
1797 mgmt_pending_remove(cmd);
1800 hci_dev_unlock(hdev);
1804 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1807 struct mgmt_mode *cp = data;
1811 bt_dev_dbg(hdev, "sock %p", sk);
1813 if (cp->val != 0x00 && cp->val != 0x01)
1814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1815 MGMT_STATUS_INVALID_PARAMS);
1820 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1822 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1824 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1829 /* In limited privacy mode the change of bondable mode
1830 * may affect the local advertising address.
1832 hci_update_discoverable(hdev);
1834 err = new_settings(hdev, sk);
1838 hci_dev_unlock(hdev);
1842 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1845 struct mgmt_mode *cp = data;
1846 struct mgmt_pending_cmd *cmd;
1850 bt_dev_dbg(hdev, "sock %p", sk);
1852 status = mgmt_bredr_support(hdev);
1854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1857 if (cp->val != 0x00 && cp->val != 0x01)
1858 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1859 MGMT_STATUS_INVALID_PARAMS);
1863 if (!hdev_is_powered(hdev)) {
1864 bool changed = false;
1866 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1867 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1871 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1876 err = new_settings(hdev, sk);
1881 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1882 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1889 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1890 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1900 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1902 mgmt_pending_remove(cmd);
1907 hci_dev_unlock(hdev);
1911 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1913 struct cmd_lookup match = { NULL, hdev };
1914 struct mgmt_pending_cmd *cmd = data;
1915 struct mgmt_mode *cp = cmd->param;
1916 u8 enable = cp->val;
1919 /* Make sure cmd still outstanding. */
1920 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1924 u8 mgmt_err = mgmt_status(err);
1926 if (enable && hci_dev_test_and_clear_flag(hdev,
1928 new_settings(hdev, NULL);
1931 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1937 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1939 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1942 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1945 new_settings(hdev, match.sk);
1950 hci_update_eir_sync(hdev);
1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1955 struct mgmt_pending_cmd *cmd = data;
1956 struct mgmt_mode *cp = cmd->param;
1957 bool changed = false;
1961 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1963 err = hci_write_ssp_mode_sync(hdev, cp->val);
1965 if (!err && changed)
1966 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1973 struct mgmt_mode *cp = data;
1974 struct mgmt_pending_cmd *cmd;
1978 bt_dev_dbg(hdev, "sock %p", sk);
1980 status = mgmt_bredr_support(hdev);
1982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1984 if (!lmp_ssp_capable(hdev))
1985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 MGMT_STATUS_NOT_SUPPORTED);
1988 if (cp->val != 0x00 && cp->val != 0x01)
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 MGMT_STATUS_INVALID_PARAMS);
1994 if (!hdev_is_powered(hdev)) {
1998 changed = !hci_dev_test_and_set_flag(hdev,
2001 changed = hci_dev_test_and_clear_flag(hdev,
2005 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2010 err = new_settings(hdev, sk);
2015 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2016 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2021 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2022 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2026 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2030 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2034 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035 MGMT_STATUS_FAILED);
2038 mgmt_pending_remove(cmd);
2042 hci_dev_unlock(hdev);
2046 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2048 bt_dev_dbg(hdev, "sock %p", sk);
2050 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2051 MGMT_STATUS_NOT_SUPPORTED);
2054 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2056 struct cmd_lookup match = { NULL, hdev };
2057 u8 status = mgmt_status(err);
2059 bt_dev_dbg(hdev, "err %d", err);
2062 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2067 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2069 new_settings(hdev, match.sk);
2075 static int set_le_sync(struct hci_dev *hdev, void *data)
2077 struct mgmt_pending_cmd *cmd = data;
2078 struct mgmt_mode *cp = cmd->param;
2083 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2085 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2086 hci_disable_advertising_sync(hdev);
2088 if (ext_adv_capable(hdev))
2089 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2091 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2094 err = hci_write_le_host_supported_sync(hdev, val, 0);
2096 /* Make sure the controller has a good default for
2097 * advertising data. Restrict the update to when LE
2098 * has actually been enabled. During power on, the
2099 * update in powered_update_hci will take care of it.
2101 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2102 if (ext_adv_capable(hdev)) {
2105 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2107 hci_update_scan_rsp_data_sync(hdev, 0x00);
2109 hci_update_adv_data_sync(hdev, 0x00);
2110 hci_update_scan_rsp_data_sync(hdev, 0x00);
2113 hci_update_passive_scan(hdev);
2119 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2121 struct mgmt_pending_cmd *cmd = data;
2122 u8 status = mgmt_status(err);
2123 struct sock *sk = cmd->sk;
2126 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2127 cmd_status_rsp, &status);
2131 mgmt_pending_remove(cmd);
2132 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2135 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2137 struct mgmt_pending_cmd *cmd = data;
2138 struct mgmt_cp_set_mesh *cp = cmd->param;
2139 size_t len = cmd->param_len;
2141 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2144 hci_dev_set_flag(hdev, HCI_MESH);
2146 hci_dev_clear_flag(hdev, HCI_MESH);
2150 /* If filters don't fit, forward all adv pkts */
2151 if (len <= sizeof(hdev->mesh_ad_types))
2152 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2154 hci_update_passive_scan_sync(hdev);
2158 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2160 struct mgmt_cp_set_mesh *cp = data;
2161 struct mgmt_pending_cmd *cmd;
2164 bt_dev_dbg(hdev, "sock %p", sk);
2166 if (!lmp_le_capable(hdev) ||
2167 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2169 MGMT_STATUS_NOT_SUPPORTED);
2171 if (cp->enable != 0x00 && cp->enable != 0x01)
2172 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2173 MGMT_STATUS_INVALID_PARAMS);
2177 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2181 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2185 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186 MGMT_STATUS_FAILED);
2189 mgmt_pending_remove(cmd);
2192 hci_dev_unlock(hdev);
2196 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2198 struct mgmt_mesh_tx *mesh_tx = data;
2199 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2200 unsigned long mesh_send_interval;
2201 u8 mgmt_err = mgmt_status(err);
2203 /* Report any errors here, but don't report completion */
2206 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2207 /* Send Complete Error Code for handle */
2208 mesh_send_complete(hdev, mesh_tx, false);
2212 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2213 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2214 mesh_send_interval);
2217 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2219 struct mgmt_mesh_tx *mesh_tx = data;
2220 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2221 struct adv_info *adv, *next_instance;
2222 u8 instance = hdev->le_num_of_adv_sets + 1;
2223 u16 timeout, duration;
2226 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2227 return MGMT_STATUS_BUSY;
2230 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2231 adv = hci_add_adv_instance(hdev, instance, 0,
2232 send->adv_data_len, send->adv_data,
2235 HCI_ADV_TX_POWER_NO_PREFERENCE,
2236 hdev->le_adv_min_interval,
2237 hdev->le_adv_max_interval,
2241 mesh_tx->instance = instance;
2245 if (hdev->cur_adv_instance == instance) {
2246 /* If the currently advertised instance is being changed then
2247 * cancel the current advertising and schedule the next
2248 * instance. If there is only one instance then the overridden
2249 * advertising data will be visible right away.
2251 cancel_adv_timeout(hdev);
2253 next_instance = hci_get_next_instance(hdev, instance);
2255 instance = next_instance->instance;
2258 } else if (hdev->adv_instance_timeout) {
2259 /* Immediately advertise the new instance if no other, or
2260 * let it go naturally from queue if ADV is already happening
2266 return hci_schedule_adv_instance_sync(hdev, instance, true);
2271 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2273 struct mgmt_rp_mesh_read_features *rp = data;
2275 if (rp->used_handles >= rp->max_handles)
2278 rp->handles[rp->used_handles++] = mesh_tx->handle;
2281 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2282 void *data, u16 len)
2284 struct mgmt_rp_mesh_read_features rp;
2286 if (!lmp_le_capable(hdev) ||
2287 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2288 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2289 MGMT_STATUS_NOT_SUPPORTED);
2291 memset(&rp, 0, sizeof(rp));
2292 rp.index = cpu_to_le16(hdev->id);
2293 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2294 rp.max_handles = MESH_HANDLES_MAX;
2299 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2301 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2302 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2304 hci_dev_unlock(hdev);
2308 static int send_cancel(struct hci_dev *hdev, void *data)
2310 struct mgmt_pending_cmd *cmd = data;
2311 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2312 struct mgmt_mesh_tx *mesh_tx;
2314 if (!cancel->handle) {
2316 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2319 mesh_send_complete(hdev, mesh_tx, false);
2322 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2324 if (mesh_tx && mesh_tx->sk == cmd->sk)
2325 mesh_send_complete(hdev, mesh_tx, false);
2328 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2330 mgmt_pending_free(cmd);
2335 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2336 void *data, u16 len)
2338 struct mgmt_pending_cmd *cmd;
2341 if (!lmp_le_capable(hdev) ||
2342 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 MGMT_STATUS_NOT_SUPPORTED);
2346 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2348 MGMT_STATUS_REJECTED);
2351 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2355 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2358 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 MGMT_STATUS_FAILED);
2362 mgmt_pending_free(cmd);
2365 hci_dev_unlock(hdev);
2369 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2371 struct mgmt_mesh_tx *mesh_tx;
2372 struct mgmt_cp_mesh_send *send = data;
2373 struct mgmt_rp_mesh_read_features rp;
2377 if (!lmp_le_capable(hdev) ||
2378 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2379 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2380 MGMT_STATUS_NOT_SUPPORTED);
2381 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2382 len <= MGMT_MESH_SEND_SIZE ||
2383 len > (MGMT_MESH_SEND_SIZE + 31))
2384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2385 MGMT_STATUS_REJECTED);
2389 memset(&rp, 0, sizeof(rp));
2390 rp.max_handles = MESH_HANDLES_MAX;
2392 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2394 if (rp.max_handles <= rp.used_handles) {
2395 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2401 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2406 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2407 mesh_send_start_complete);
2410 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2411 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2412 MGMT_STATUS_FAILED);
2416 mgmt_mesh_remove(mesh_tx);
2419 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2421 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2422 &mesh_tx->handle, 1);
2426 hci_dev_unlock(hdev);
2430 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2432 struct mgmt_mode *cp = data;
2433 struct mgmt_pending_cmd *cmd;
2437 bt_dev_dbg(hdev, "sock %p", sk);
2439 if (!lmp_le_capable(hdev))
2440 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2441 MGMT_STATUS_NOT_SUPPORTED);
2443 if (cp->val != 0x00 && cp->val != 0x01)
2444 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2445 MGMT_STATUS_INVALID_PARAMS);
2447 /* Bluetooth single mode LE only controllers or dual-mode
2448 * controllers configured as LE only devices, do not allow
2449 * switching LE off. These have either LE enabled explicitly
2450 * or BR/EDR has been previously switched off.
2452 * When trying to enable an already enabled LE, then gracefully
2453 * send a positive response. Trying to disable it however will
2454 * result into rejection.
2456 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2457 if (cp->val == 0x01)
2458 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2460 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2461 MGMT_STATUS_REJECTED);
2467 enabled = lmp_host_le_capable(hdev);
2469 if (!hdev_is_powered(hdev) || val == enabled) {
2470 bool changed = false;
2472 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2473 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2477 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2478 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2482 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2487 err = new_settings(hdev, sk);
2492 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2493 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2503 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2507 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2508 MGMT_STATUS_FAILED);
2511 mgmt_pending_remove(cmd);
2515 hci_dev_unlock(hdev);
2519 /* This is a helper function to test for pending mgmt commands that can
2520 * cause CoD or EIR HCI commands. We can only allow one such pending
2521 * mgmt command at a time since otherwise we cannot easily track what
2522 * the current values are, will be, and based on that calculate if a new
2523 * HCI command needs to be sent and if yes with what value.
2525 static bool pending_eir_or_class(struct hci_dev *hdev)
2527 struct mgmt_pending_cmd *cmd;
2529 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2530 switch (cmd->opcode) {
2531 case MGMT_OP_ADD_UUID:
2532 case MGMT_OP_REMOVE_UUID:
2533 case MGMT_OP_SET_DEV_CLASS:
2534 case MGMT_OP_SET_POWERED:
2542 static const u8 bluetooth_base_uuid[] = {
2543 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2544 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2547 static u8 get_uuid_size(const u8 *uuid)
2551 if (memcmp(uuid, bluetooth_base_uuid, 12))
2554 val = get_unaligned_le32(&uuid[12]);
2561 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2563 struct mgmt_pending_cmd *cmd = data;
2565 bt_dev_dbg(hdev, "err %d", err);
2567 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2568 mgmt_status(err), hdev->dev_class, 3);
2570 mgmt_pending_free(cmd);
2573 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2577 err = hci_update_class_sync(hdev);
2581 return hci_update_eir_sync(hdev);
2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2586 struct mgmt_cp_add_uuid *cp = data;
2587 struct mgmt_pending_cmd *cmd;
2588 struct bt_uuid *uuid;
2591 bt_dev_dbg(hdev, "sock %p", sk);
2595 if (pending_eir_or_class(hdev)) {
2596 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2601 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2607 memcpy(uuid->uuid, cp->uuid, 16);
2608 uuid->svc_hint = cp->svc_hint;
2609 uuid->size = get_uuid_size(cp->uuid);
2611 list_add_tail(&uuid->list, &hdev->uuids);
2613 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2619 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2621 mgmt_pending_free(cmd);
2626 hci_dev_unlock(hdev);
2630 static bool enable_service_cache(struct hci_dev *hdev)
2632 if (!hdev_is_powered(hdev))
2635 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2636 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2644 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2648 err = hci_update_class_sync(hdev);
2652 return hci_update_eir_sync(hdev);
2655 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2658 struct mgmt_cp_remove_uuid *cp = data;
2659 struct mgmt_pending_cmd *cmd;
2660 struct bt_uuid *match, *tmp;
2661 static const u8 bt_uuid_any[] = {
2662 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2666 bt_dev_dbg(hdev, "sock %p", sk);
2670 if (pending_eir_or_class(hdev)) {
2671 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2676 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2677 hci_uuids_clear(hdev);
2679 if (enable_service_cache(hdev)) {
2680 err = mgmt_cmd_complete(sk, hdev->id,
2681 MGMT_OP_REMOVE_UUID,
2682 0, hdev->dev_class, 3);
2691 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2692 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2695 list_del(&match->list);
2701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2702 MGMT_STATUS_INVALID_PARAMS);
2707 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2713 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2714 mgmt_class_complete);
2716 mgmt_pending_free(cmd);
2719 hci_dev_unlock(hdev);
2723 static int set_class_sync(struct hci_dev *hdev, void *data)
2727 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2728 cancel_delayed_work_sync(&hdev->service_cache);
2729 err = hci_update_eir_sync(hdev);
2735 return hci_update_class_sync(hdev);
2738 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2741 struct mgmt_cp_set_dev_class *cp = data;
2742 struct mgmt_pending_cmd *cmd;
2745 bt_dev_dbg(hdev, "sock %p", sk);
2747 if (!lmp_bredr_capable(hdev))
2748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2749 MGMT_STATUS_NOT_SUPPORTED);
2753 if (pending_eir_or_class(hdev)) {
2754 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2759 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2760 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2761 MGMT_STATUS_INVALID_PARAMS);
2765 hdev->major_class = cp->major;
2766 hdev->minor_class = cp->minor;
2768 if (!hdev_is_powered(hdev)) {
2769 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2770 hdev->dev_class, 3);
2774 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2780 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2781 mgmt_class_complete);
2783 mgmt_pending_free(cmd);
2786 hci_dev_unlock(hdev);
2790 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2793 struct mgmt_cp_load_link_keys *cp = data;
2794 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2795 sizeof(struct mgmt_link_key_info));
2796 u16 key_count, expected_len;
2800 bt_dev_dbg(hdev, "sock %p", sk);
2802 if (!lmp_bredr_capable(hdev))
2803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2804 MGMT_STATUS_NOT_SUPPORTED);
2806 key_count = __le16_to_cpu(cp->key_count);
2807 if (key_count > max_key_count) {
2808 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2810 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2811 MGMT_STATUS_INVALID_PARAMS);
2814 expected_len = struct_size(cp, keys, key_count);
2815 if (expected_len != len) {
2816 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2819 MGMT_STATUS_INVALID_PARAMS);
2822 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2824 MGMT_STATUS_INVALID_PARAMS);
2826 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2829 for (i = 0; i < key_count; i++) {
2830 struct mgmt_link_key_info *key = &cp->keys[i];
2832 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2833 if (key->type > 0x08)
2834 return mgmt_cmd_status(sk, hdev->id,
2835 MGMT_OP_LOAD_LINK_KEYS,
2836 MGMT_STATUS_INVALID_PARAMS);
2841 hci_link_keys_clear(hdev);
2844 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2846 changed = hci_dev_test_and_clear_flag(hdev,
2847 HCI_KEEP_DEBUG_KEYS);
2850 new_settings(hdev, NULL);
2852 for (i = 0; i < key_count; i++) {
2853 struct mgmt_link_key_info *key = &cp->keys[i];
2855 if (hci_is_blocked_key(hdev,
2856 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2858 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2863 /* Always ignore debug keys and require a new pairing if
2864 * the user wants to use them.
2866 if (key->type == HCI_LK_DEBUG_COMBINATION)
2869 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2870 key->type, key->pin_len, NULL);
2873 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2875 hci_dev_unlock(hdev);
2880 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2881 u8 addr_type, struct sock *skip_sk)
2883 struct mgmt_ev_device_unpaired ev;
2885 bacpy(&ev.addr.bdaddr, bdaddr);
2886 ev.addr.type = addr_type;
2888 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2892 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2894 struct mgmt_pending_cmd *cmd = data;
2895 struct mgmt_cp_unpair_device *cp = cmd->param;
2898 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2900 cmd->cmd_complete(cmd, err);
2901 mgmt_pending_free(cmd);
2904 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2906 struct mgmt_pending_cmd *cmd = data;
2907 struct mgmt_cp_unpair_device *cp = cmd->param;
2908 struct hci_conn *conn;
2910 if (cp->addr.type == BDADDR_BREDR)
2911 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2914 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2915 le_addr_type(cp->addr.type));
2920 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2923 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2926 struct mgmt_cp_unpair_device *cp = data;
2927 struct mgmt_rp_unpair_device rp;
2928 struct hci_conn_params *params;
2929 struct mgmt_pending_cmd *cmd;
2930 struct hci_conn *conn;
2934 memset(&rp, 0, sizeof(rp));
2935 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2936 rp.addr.type = cp->addr.type;
2938 if (!bdaddr_type_is_valid(cp->addr.type))
2939 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2940 MGMT_STATUS_INVALID_PARAMS,
2943 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2944 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2945 MGMT_STATUS_INVALID_PARAMS,
2950 if (!hdev_is_powered(hdev)) {
2951 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2952 MGMT_STATUS_NOT_POWERED, &rp,
2957 if (cp->addr.type == BDADDR_BREDR) {
2958 /* If disconnection is requested, then look up the
2959 * connection. If the remote device is connected, it
2960 * will be later used to terminate the link.
2962 * Setting it to NULL explicitly will cause no
2963 * termination of the link.
2966 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2971 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2973 err = mgmt_cmd_complete(sk, hdev->id,
2974 MGMT_OP_UNPAIR_DEVICE,
2975 MGMT_STATUS_NOT_PAIRED, &rp,
2983 /* LE address type */
2984 addr_type = le_addr_type(cp->addr.type);
2986 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2987 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2989 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2990 MGMT_STATUS_NOT_PAIRED, &rp,
2995 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2997 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3002 /* Defer clearing up the connection parameters until closing to
3003 * give a chance of keeping them if a repairing happens.
3005 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3007 /* Disable auto-connection parameters if present */
3008 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3010 if (params->explicit_connect)
3011 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3013 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3016 /* If disconnection is not requested, then clear the connection
3017 * variable so that the link is not terminated.
3019 if (!cp->disconnect)
3023 /* If the connection variable is set, then termination of the
3024 * link is requested.
3027 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3029 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3033 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3040 cmd->cmd_complete = addr_cmd_complete;
3042 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3043 unpair_device_complete);
3045 mgmt_pending_free(cmd);
3048 hci_dev_unlock(hdev);
3052 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3055 struct mgmt_cp_disconnect *cp = data;
3056 struct mgmt_rp_disconnect rp;
3057 struct mgmt_pending_cmd *cmd;
3058 struct hci_conn *conn;
3061 bt_dev_dbg(hdev, "sock %p", sk);
3063 memset(&rp, 0, sizeof(rp));
3064 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3065 rp.addr.type = cp->addr.type;
3067 if (!bdaddr_type_is_valid(cp->addr.type))
3068 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3069 MGMT_STATUS_INVALID_PARAMS,
3074 if (!test_bit(HCI_UP, &hdev->flags)) {
3075 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3076 MGMT_STATUS_NOT_POWERED, &rp,
3081 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3082 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3083 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3087 if (cp->addr.type == BDADDR_BREDR)
3088 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3091 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3092 le_addr_type(cp->addr.type));
3094 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3095 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3096 MGMT_STATUS_NOT_CONNECTED, &rp,
3101 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3107 cmd->cmd_complete = generic_cmd_complete;
3109 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3111 mgmt_pending_remove(cmd);
3114 hci_dev_unlock(hdev);
3118 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3120 switch (link_type) {
3123 switch (addr_type) {
3124 case ADDR_LE_DEV_PUBLIC:
3125 return BDADDR_LE_PUBLIC;
3128 /* Fallback to LE Random address type */
3129 return BDADDR_LE_RANDOM;
3133 /* Fallback to BR/EDR type */
3134 return BDADDR_BREDR;
3138 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3141 struct mgmt_rp_get_connections *rp;
3146 bt_dev_dbg(hdev, "sock %p", sk);
3150 if (!hdev_is_powered(hdev)) {
3151 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3152 MGMT_STATUS_NOT_POWERED);
3157 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3158 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3162 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3169 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3170 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3172 bacpy(&rp->addr[i].bdaddr, &c->dst);
3173 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3174 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3179 rp->conn_count = cpu_to_le16(i);
3181 /* Recalculate length in case of filtered SCO connections, etc */
3182 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3183 struct_size(rp, addr, i));
3188 hci_dev_unlock(hdev);
3192 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3193 struct mgmt_cp_pin_code_neg_reply *cp)
3195 struct mgmt_pending_cmd *cmd;
3198 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3203 cmd->cmd_complete = addr_cmd_complete;
3205 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3206 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3208 mgmt_pending_remove(cmd);
3213 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3216 struct hci_conn *conn;
3217 struct mgmt_cp_pin_code_reply *cp = data;
3218 struct hci_cp_pin_code_reply reply;
3219 struct mgmt_pending_cmd *cmd;
3222 bt_dev_dbg(hdev, "sock %p", sk);
3226 if (!hdev_is_powered(hdev)) {
3227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3228 MGMT_STATUS_NOT_POWERED);
3232 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3234 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3235 MGMT_STATUS_NOT_CONNECTED);
3239 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3240 struct mgmt_cp_pin_code_neg_reply ncp;
3242 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3244 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3246 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3248 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3249 MGMT_STATUS_INVALID_PARAMS);
3254 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3260 cmd->cmd_complete = addr_cmd_complete;
3262 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3263 reply.pin_len = cp->pin_len;
3264 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3266 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3268 mgmt_pending_remove(cmd);
3271 hci_dev_unlock(hdev);
3275 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3278 struct mgmt_cp_set_io_capability *cp = data;
3280 bt_dev_dbg(hdev, "sock %p", sk);
3282 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3284 MGMT_STATUS_INVALID_PARAMS);
3288 hdev->io_capability = cp->io_capability;
3290 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3292 hci_dev_unlock(hdev);
3294 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3298 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3300 struct hci_dev *hdev = conn->hdev;
3301 struct mgmt_pending_cmd *cmd;
3303 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3304 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3307 if (cmd->user_data != conn)
3316 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3318 struct mgmt_rp_pair_device rp;
3319 struct hci_conn *conn = cmd->user_data;
3322 bacpy(&rp.addr.bdaddr, &conn->dst);
3323 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3325 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3326 status, &rp, sizeof(rp));
3328 /* So we don't get further callbacks for this connection */
3329 conn->connect_cfm_cb = NULL;
3330 conn->security_cfm_cb = NULL;
3331 conn->disconn_cfm_cb = NULL;
3333 hci_conn_drop(conn);
3335 /* The device is paired so there is no need to remove
3336 * its connection parameters anymore.
3338 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3345 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3347 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3348 struct mgmt_pending_cmd *cmd;
3350 cmd = find_pairing(conn);
3352 cmd->cmd_complete(cmd, status);
3353 mgmt_pending_remove(cmd);
3357 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3359 struct mgmt_pending_cmd *cmd;
3361 BT_DBG("status %u", status);
3363 cmd = find_pairing(conn);
3365 BT_DBG("Unable to find a pending command");
3369 cmd->cmd_complete(cmd, mgmt_status(status));
3370 mgmt_pending_remove(cmd);
3373 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3375 struct mgmt_pending_cmd *cmd;
3377 BT_DBG("status %u", status);
3382 cmd = find_pairing(conn);
3384 BT_DBG("Unable to find a pending command");
3388 cmd->cmd_complete(cmd, mgmt_status(status));
3389 mgmt_pending_remove(cmd);
3392 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3395 struct mgmt_cp_pair_device *cp = data;
3396 struct mgmt_rp_pair_device rp;
3397 struct mgmt_pending_cmd *cmd;
3398 u8 sec_level, auth_type;
3399 struct hci_conn *conn;
3402 bt_dev_dbg(hdev, "sock %p", sk);
3404 memset(&rp, 0, sizeof(rp));
3405 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3406 rp.addr.type = cp->addr.type;
3408 if (!bdaddr_type_is_valid(cp->addr.type))
3409 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3410 MGMT_STATUS_INVALID_PARAMS,
3413 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3414 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3415 MGMT_STATUS_INVALID_PARAMS,
3420 if (!hdev_is_powered(hdev)) {
3421 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3422 MGMT_STATUS_NOT_POWERED, &rp,
3427 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3428 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3429 MGMT_STATUS_ALREADY_PAIRED, &rp,
3434 sec_level = BT_SECURITY_MEDIUM;
3435 auth_type = HCI_AT_DEDICATED_BONDING;
3437 if (cp->addr.type == BDADDR_BREDR) {
3438 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3439 auth_type, CONN_REASON_PAIR_DEVICE);
3441 u8 addr_type = le_addr_type(cp->addr.type);
3442 struct hci_conn_params *p;
3444 /* When pairing a new device, it is expected to remember
3445 * this device for future connections. Adding the connection
3446 * parameter information ahead of time allows tracking
3447 * of the peripheral preferred values and will speed up any
3448 * further connection establishment.
3450 * If connection parameters already exist, then they
3451 * will be kept and this function does nothing.
3453 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3455 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3456 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3458 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3459 sec_level, HCI_LE_CONN_TIMEOUT,
3460 CONN_REASON_PAIR_DEVICE);
3466 if (PTR_ERR(conn) == -EBUSY)
3467 status = MGMT_STATUS_BUSY;
3468 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3469 status = MGMT_STATUS_NOT_SUPPORTED;
3470 else if (PTR_ERR(conn) == -ECONNREFUSED)
3471 status = MGMT_STATUS_REJECTED;
3473 status = MGMT_STATUS_CONNECT_FAILED;
3475 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3476 status, &rp, sizeof(rp));
3480 if (conn->connect_cfm_cb) {
3481 hci_conn_drop(conn);
3482 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3483 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3487 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3490 hci_conn_drop(conn);
3494 cmd->cmd_complete = pairing_complete;
3496 /* For LE, just connecting isn't a proof that the pairing finished */
3497 if (cp->addr.type == BDADDR_BREDR) {
3498 conn->connect_cfm_cb = pairing_complete_cb;
3499 conn->security_cfm_cb = pairing_complete_cb;
3500 conn->disconn_cfm_cb = pairing_complete_cb;
3502 conn->connect_cfm_cb = le_pairing_complete_cb;
3503 conn->security_cfm_cb = le_pairing_complete_cb;
3504 conn->disconn_cfm_cb = le_pairing_complete_cb;
3507 conn->io_capability = cp->io_cap;
3508 cmd->user_data = hci_conn_get(conn);
3510 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3511 hci_conn_security(conn, sec_level, auth_type, true)) {
3512 cmd->cmd_complete(cmd, 0);
3513 mgmt_pending_remove(cmd);
3519 hci_dev_unlock(hdev);
3523 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3526 struct mgmt_addr_info *addr = data;
3527 struct mgmt_pending_cmd *cmd;
3528 struct hci_conn *conn;
3531 bt_dev_dbg(hdev, "sock %p", sk);
3535 if (!hdev_is_powered(hdev)) {
3536 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3537 MGMT_STATUS_NOT_POWERED);
3541 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3543 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3544 MGMT_STATUS_INVALID_PARAMS);
3548 conn = cmd->user_data;
3550 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3551 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3552 MGMT_STATUS_INVALID_PARAMS);
3556 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3557 mgmt_pending_remove(cmd);
3559 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3560 addr, sizeof(*addr));
3562 /* Since user doesn't want to proceed with the connection, abort any
3563 * ongoing pairing and then terminate the link if it was created
3564 * because of the pair device action.
3566 if (addr->type == BDADDR_BREDR)
3567 hci_remove_link_key(hdev, &addr->bdaddr);
3569 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3570 le_addr_type(addr->type));
3572 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3573 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3576 hci_dev_unlock(hdev);
3580 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3581 struct mgmt_addr_info *addr, u16 mgmt_op,
3582 u16 hci_op, __le32 passkey)
3584 struct mgmt_pending_cmd *cmd;
3585 struct hci_conn *conn;
3590 if (!hdev_is_powered(hdev)) {
3591 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3592 MGMT_STATUS_NOT_POWERED, addr,
3597 if (addr->type == BDADDR_BREDR)
3598 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3600 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3601 le_addr_type(addr->type));
3604 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3605 MGMT_STATUS_NOT_CONNECTED, addr,
3610 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3611 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3613 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3614 MGMT_STATUS_SUCCESS, addr,
3617 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3618 MGMT_STATUS_FAILED, addr,
3624 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3630 cmd->cmd_complete = addr_cmd_complete;
3632 /* Continue with pairing via HCI */
3633 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3634 struct hci_cp_user_passkey_reply cp;
3636 bacpy(&cp.bdaddr, &addr->bdaddr);
3637 cp.passkey = passkey;
3638 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3640 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3644 mgmt_pending_remove(cmd);
3647 hci_dev_unlock(hdev);
3651 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3652 void *data, u16 len)
3654 struct mgmt_cp_pin_code_neg_reply *cp = data;
3656 bt_dev_dbg(hdev, "sock %p", sk);
3658 return user_pairing_resp(sk, hdev, &cp->addr,
3659 MGMT_OP_PIN_CODE_NEG_REPLY,
3660 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3663 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3666 struct mgmt_cp_user_confirm_reply *cp = data;
3668 bt_dev_dbg(hdev, "sock %p", sk);
3670 if (len != sizeof(*cp))
3671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3672 MGMT_STATUS_INVALID_PARAMS);
3674 return user_pairing_resp(sk, hdev, &cp->addr,
3675 MGMT_OP_USER_CONFIRM_REPLY,
3676 HCI_OP_USER_CONFIRM_REPLY, 0);
3679 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3680 void *data, u16 len)
3682 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3684 bt_dev_dbg(hdev, "sock %p", sk);
3686 return user_pairing_resp(sk, hdev, &cp->addr,
3687 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3688 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3691 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3694 struct mgmt_cp_user_passkey_reply *cp = data;
3696 bt_dev_dbg(hdev, "sock %p", sk);
3698 return user_pairing_resp(sk, hdev, &cp->addr,
3699 MGMT_OP_USER_PASSKEY_REPLY,
3700 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3703 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3704 void *data, u16 len)
3706 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3708 bt_dev_dbg(hdev, "sock %p", sk);
3710 return user_pairing_resp(sk, hdev, &cp->addr,
3711 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3712 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3715 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3717 struct adv_info *adv_instance;
3719 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3723 /* stop if current instance doesn't need to be changed */
3724 if (!(adv_instance->flags & flags))
3727 cancel_adv_timeout(hdev);
3729 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3733 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3738 static int name_changed_sync(struct hci_dev *hdev, void *data)
3740 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3743 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3745 struct mgmt_pending_cmd *cmd = data;
3746 struct mgmt_cp_set_local_name *cp = cmd->param;
3747 u8 status = mgmt_status(err);
3749 bt_dev_dbg(hdev, "err %d", err);
3751 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3755 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3758 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3761 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3762 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3765 mgmt_pending_remove(cmd);
3768 static int set_name_sync(struct hci_dev *hdev, void *data)
3770 if (lmp_bredr_capable(hdev)) {
3771 hci_update_name_sync(hdev);
3772 hci_update_eir_sync(hdev);
3775 /* The name is stored in the scan response data and so
3776 * no need to update the advertising data here.
3778 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3779 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3784 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3787 struct mgmt_cp_set_local_name *cp = data;
3788 struct mgmt_pending_cmd *cmd;
3791 bt_dev_dbg(hdev, "sock %p", sk);
3795 /* If the old values are the same as the new ones just return a
3796 * direct command complete event.
3798 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3799 !memcmp(hdev->short_name, cp->short_name,
3800 sizeof(hdev->short_name))) {
3801 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3806 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3808 if (!hdev_is_powered(hdev)) {
3809 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3811 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3816 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3817 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3818 ext_info_changed(hdev, sk);
3823 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3827 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3831 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3832 MGMT_STATUS_FAILED);
3835 mgmt_pending_remove(cmd);
3840 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3843 hci_dev_unlock(hdev);
3847 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3849 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3852 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3855 struct mgmt_cp_set_appearance *cp = data;
3859 bt_dev_dbg(hdev, "sock %p", sk);
3861 if (!lmp_le_capable(hdev))
3862 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3863 MGMT_STATUS_NOT_SUPPORTED);
3865 appearance = le16_to_cpu(cp->appearance);
3869 if (hdev->appearance != appearance) {
3870 hdev->appearance = appearance;
3872 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3873 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3876 ext_info_changed(hdev, sk);
3879 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3882 hci_dev_unlock(hdev);
3887 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3888 void *data, u16 len)
3890 struct mgmt_rp_get_phy_configuration rp;
3892 bt_dev_dbg(hdev, "sock %p", sk);
3896 memset(&rp, 0, sizeof(rp));
3898 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3899 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3900 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3902 hci_dev_unlock(hdev);
3904 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3908 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3910 struct mgmt_ev_phy_configuration_changed ev;
3912 memset(&ev, 0, sizeof(ev));
3914 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3916 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3920 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3922 struct mgmt_pending_cmd *cmd = data;
3923 struct sk_buff *skb = cmd->skb;
3924 u8 status = mgmt_status(err);
3926 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3931 status = MGMT_STATUS_FAILED;
3932 else if (IS_ERR(skb))
3933 status = mgmt_status(PTR_ERR(skb));
3935 status = mgmt_status(skb->data[0]);
3938 bt_dev_dbg(hdev, "status %d", status);
3941 mgmt_cmd_status(cmd->sk, hdev->id,
3942 MGMT_OP_SET_PHY_CONFIGURATION, status);
3944 mgmt_cmd_complete(cmd->sk, hdev->id,
3945 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3948 mgmt_phy_configuration_changed(hdev, cmd->sk);
3951 if (skb && !IS_ERR(skb))
3954 mgmt_pending_remove(cmd);
3957 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3959 struct mgmt_pending_cmd *cmd = data;
3960 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3961 struct hci_cp_le_set_default_phy cp_phy;
3962 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3964 memset(&cp_phy, 0, sizeof(cp_phy));
3966 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3967 cp_phy.all_phys |= 0x01;
3969 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3970 cp_phy.all_phys |= 0x02;
3972 if (selected_phys & MGMT_PHY_LE_1M_TX)
3973 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3975 if (selected_phys & MGMT_PHY_LE_2M_TX)
3976 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3978 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3979 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3981 if (selected_phys & MGMT_PHY_LE_1M_RX)
3982 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3984 if (selected_phys & MGMT_PHY_LE_2M_RX)
3985 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3987 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3988 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3990 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3991 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3996 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3997 void *data, u16 len)
3999 struct mgmt_cp_set_phy_configuration *cp = data;
4000 struct mgmt_pending_cmd *cmd;
4001 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4002 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4003 bool changed = false;
4006 bt_dev_dbg(hdev, "sock %p", sk);
4008 configurable_phys = get_configurable_phys(hdev);
4009 supported_phys = get_supported_phys(hdev);
4010 selected_phys = __le32_to_cpu(cp->selected_phys);
4012 if (selected_phys & ~supported_phys)
4013 return mgmt_cmd_status(sk, hdev->id,
4014 MGMT_OP_SET_PHY_CONFIGURATION,
4015 MGMT_STATUS_INVALID_PARAMS);
4017 unconfigure_phys = supported_phys & ~configurable_phys;
4019 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4020 return mgmt_cmd_status(sk, hdev->id,
4021 MGMT_OP_SET_PHY_CONFIGURATION,
4022 MGMT_STATUS_INVALID_PARAMS);
4024 if (selected_phys == get_selected_phys(hdev))
4025 return mgmt_cmd_complete(sk, hdev->id,
4026 MGMT_OP_SET_PHY_CONFIGURATION,
4031 if (!hdev_is_powered(hdev)) {
4032 err = mgmt_cmd_status(sk, hdev->id,
4033 MGMT_OP_SET_PHY_CONFIGURATION,
4034 MGMT_STATUS_REJECTED);
4038 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4039 err = mgmt_cmd_status(sk, hdev->id,
4040 MGMT_OP_SET_PHY_CONFIGURATION,
4045 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4046 pkt_type |= (HCI_DH3 | HCI_DM3);
4048 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4050 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4051 pkt_type |= (HCI_DH5 | HCI_DM5);
4053 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4055 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4056 pkt_type &= ~HCI_2DH1;
4058 pkt_type |= HCI_2DH1;
4060 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4061 pkt_type &= ~HCI_2DH3;
4063 pkt_type |= HCI_2DH3;
4065 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4066 pkt_type &= ~HCI_2DH5;
4068 pkt_type |= HCI_2DH5;
4070 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4071 pkt_type &= ~HCI_3DH1;
4073 pkt_type |= HCI_3DH1;
4075 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4076 pkt_type &= ~HCI_3DH3;
4078 pkt_type |= HCI_3DH3;
4080 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4081 pkt_type &= ~HCI_3DH5;
4083 pkt_type |= HCI_3DH5;
4085 if (pkt_type != hdev->pkt_type) {
4086 hdev->pkt_type = pkt_type;
4090 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4091 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4093 mgmt_phy_configuration_changed(hdev, sk);
4095 err = mgmt_cmd_complete(sk, hdev->id,
4096 MGMT_OP_SET_PHY_CONFIGURATION,
4102 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4107 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4108 set_default_phy_complete);
4111 err = mgmt_cmd_status(sk, hdev->id,
4112 MGMT_OP_SET_PHY_CONFIGURATION,
4113 MGMT_STATUS_FAILED);
4116 mgmt_pending_remove(cmd);
4120 hci_dev_unlock(hdev);
4125 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4128 int err = MGMT_STATUS_SUCCESS;
4129 struct mgmt_cp_set_blocked_keys *keys = data;
4130 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4131 sizeof(struct mgmt_blocked_key_info));
4132 u16 key_count, expected_len;
4135 bt_dev_dbg(hdev, "sock %p", sk);
4137 key_count = __le16_to_cpu(keys->key_count);
4138 if (key_count > max_key_count) {
4139 bt_dev_err(hdev, "too big key_count value %u", key_count);
4140 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4141 MGMT_STATUS_INVALID_PARAMS);
4144 expected_len = struct_size(keys, keys, key_count);
4145 if (expected_len != len) {
4146 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4148 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4149 MGMT_STATUS_INVALID_PARAMS);
4154 hci_blocked_keys_clear(hdev);
4156 for (i = 0; i < key_count; ++i) {
4157 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4160 err = MGMT_STATUS_NO_RESOURCES;
4164 b->type = keys->keys[i].type;
4165 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4166 list_add_rcu(&b->list, &hdev->blocked_keys);
4168 hci_dev_unlock(hdev);
4170 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4174 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4175 void *data, u16 len)
4177 struct mgmt_mode *cp = data;
4179 bool changed = false;
4181 bt_dev_dbg(hdev, "sock %p", sk);
4183 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4184 return mgmt_cmd_status(sk, hdev->id,
4185 MGMT_OP_SET_WIDEBAND_SPEECH,
4186 MGMT_STATUS_NOT_SUPPORTED);
4188 if (cp->val != 0x00 && cp->val != 0x01)
4189 return mgmt_cmd_status(sk, hdev->id,
4190 MGMT_OP_SET_WIDEBAND_SPEECH,
4191 MGMT_STATUS_INVALID_PARAMS);
4195 if (hdev_is_powered(hdev) &&
4196 !!cp->val != hci_dev_test_flag(hdev,
4197 HCI_WIDEBAND_SPEECH_ENABLED)) {
4198 err = mgmt_cmd_status(sk, hdev->id,
4199 MGMT_OP_SET_WIDEBAND_SPEECH,
4200 MGMT_STATUS_REJECTED);
4205 changed = !hci_dev_test_and_set_flag(hdev,
4206 HCI_WIDEBAND_SPEECH_ENABLED);
4208 changed = hci_dev_test_and_clear_flag(hdev,
4209 HCI_WIDEBAND_SPEECH_ENABLED);
4211 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4216 err = new_settings(hdev, sk);
4219 hci_dev_unlock(hdev);
4223 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4224 void *data, u16 data_len)
4227 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4230 u8 tx_power_range[2];
4232 bt_dev_dbg(hdev, "sock %p", sk);
4234 memset(&buf, 0, sizeof(buf));
4238 /* When the Read Simple Pairing Options command is supported, then
4239 * the remote public key validation is supported.
4241 * Alternatively, when Microsoft extensions are available, they can
4242 * indicate support for public key validation as well.
4244 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4245 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4247 flags |= 0x02; /* Remote public key validation (LE) */
4249 /* When the Read Encryption Key Size command is supported, then the
4250 * encryption key size is enforced.
4252 if (hdev->commands[20] & 0x10)
4253 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4255 flags |= 0x08; /* Encryption key size enforcement (LE) */
4257 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4260 /* When the Read Simple Pairing Options command is supported, then
4261 * also max encryption key size information is provided.
4263 if (hdev->commands[41] & 0x08)
4264 cap_len = eir_append_le16(rp->cap, cap_len,
4265 MGMT_CAP_MAX_ENC_KEY_SIZE,
4266 hdev->max_enc_key_size);
4268 cap_len = eir_append_le16(rp->cap, cap_len,
4269 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4270 SMP_MAX_ENC_KEY_SIZE);
4272 /* Append the min/max LE tx power parameters if we were able to fetch
4273 * it from the controller
4275 if (hdev->commands[38] & 0x80) {
4276 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4277 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4278 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4282 rp->cap_len = cpu_to_le16(cap_len);
4284 hci_dev_unlock(hdev);
4286 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4287 rp, sizeof(*rp) + cap_len);
4290 #ifdef CONFIG_BT_FEATURE_DEBUG
4291 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4292 static const u8 debug_uuid[16] = {
4293 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4294 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4298 /* 330859bc-7506-492d-9370-9a6f0614037f */
4299 static const u8 quality_report_uuid[16] = {
4300 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4301 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4304 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4305 static const u8 offload_codecs_uuid[16] = {
4306 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4307 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4310 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4311 static const u8 le_simultaneous_roles_uuid[16] = {
4312 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4313 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4316 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4317 static const u8 rpa_resolution_uuid[16] = {
4318 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4319 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4322 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4323 static const u8 iso_socket_uuid[16] = {
4324 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4325 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4328 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4329 static const u8 mgmt_mesh_uuid[16] = {
4330 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4331 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4334 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4335 void *data, u16 data_len)
4337 struct mgmt_rp_read_exp_features_info *rp;
4343 bt_dev_dbg(hdev, "sock %p", sk);
4345 /* Enough space for 7 features */
4346 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4347 rp = kzalloc(len, GFP_KERNEL);
4351 #ifdef CONFIG_BT_FEATURE_DEBUG
4353 flags = bt_dbg_get() ? BIT(0) : 0;
4355 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4356 rp->features[idx].flags = cpu_to_le32(flags);
4361 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4362 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4367 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4368 rp->features[idx].flags = cpu_to_le32(flags);
4372 if (hdev && ll_privacy_capable(hdev)) {
4373 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4374 flags = BIT(0) | BIT(1);
4378 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4379 rp->features[idx].flags = cpu_to_le32(flags);
4383 if (hdev && (aosp_has_quality_report(hdev) ||
4384 hdev->set_quality_report)) {
4385 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4390 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4391 rp->features[idx].flags = cpu_to_le32(flags);
4395 if (hdev && hdev->get_data_path_id) {
4396 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4401 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4402 rp->features[idx].flags = cpu_to_le32(flags);
4406 if (IS_ENABLED(CONFIG_BT_LE)) {
4407 flags = iso_enabled() ? BIT(0) : 0;
4408 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4409 rp->features[idx].flags = cpu_to_le32(flags);
4413 if (hdev && lmp_le_capable(hdev)) {
4414 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4419 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4420 rp->features[idx].flags = cpu_to_le32(flags);
4424 rp->feature_count = cpu_to_le16(idx);
4426 /* After reading the experimental features information, enable
4427 * the events to update client on any future change.
4429 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4431 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4432 MGMT_OP_READ_EXP_FEATURES_INFO,
4433 0, rp, sizeof(*rp) + (20 * idx));
4439 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4442 struct mgmt_ev_exp_feature_changed ev;
4444 memset(&ev, 0, sizeof(ev));
4445 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4446 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4448 // Do we need to be atomic with the conn_flags?
4449 if (enabled && privacy_mode_capable(hdev))
4450 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4452 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4454 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4456 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4460 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4461 bool enabled, struct sock *skip)
4463 struct mgmt_ev_exp_feature_changed ev;
4465 memset(&ev, 0, sizeof(ev));
4466 memcpy(ev.uuid, uuid, 16);
4467 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4469 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4471 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4474 #define EXP_FEAT(_uuid, _set_func) \
4477 .set_func = _set_func, \
4480 /* The zero key uuid is special. Multiple exp features are set through it. */
4481 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4482 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4484 struct mgmt_rp_set_exp_feature rp;
4486 memset(rp.uuid, 0, 16);
4487 rp.flags = cpu_to_le32(0);
4489 #ifdef CONFIG_BT_FEATURE_DEBUG
4491 bool changed = bt_dbg_get();
4496 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4500 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4503 changed = hci_dev_test_and_clear_flag(hdev,
4504 HCI_ENABLE_LL_PRIVACY);
4506 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4510 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4512 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4513 MGMT_OP_SET_EXP_FEATURE, 0,
4517 #ifdef CONFIG_BT_FEATURE_DEBUG
4518 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4519 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4521 struct mgmt_rp_set_exp_feature rp;
4526 /* Command requires to use the non-controller index */
4528 return mgmt_cmd_status(sk, hdev->id,
4529 MGMT_OP_SET_EXP_FEATURE,
4530 MGMT_STATUS_INVALID_INDEX);
4532 /* Parameters are limited to a single octet */
4533 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4534 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4535 MGMT_OP_SET_EXP_FEATURE,
4536 MGMT_STATUS_INVALID_PARAMS);
4538 /* Only boolean on/off is supported */
4539 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4540 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4541 MGMT_OP_SET_EXP_FEATURE,
4542 MGMT_STATUS_INVALID_PARAMS);
4544 val = !!cp->param[0];
4545 changed = val ? !bt_dbg_get() : bt_dbg_get();
4548 memcpy(rp.uuid, debug_uuid, 16);
4549 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4551 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4553 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4554 MGMT_OP_SET_EXP_FEATURE, 0,
4558 exp_feature_changed(hdev, debug_uuid, val, sk);
4564 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4565 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4567 struct mgmt_rp_set_exp_feature rp;
4571 /* Command requires to use the controller index */
4573 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4574 MGMT_OP_SET_EXP_FEATURE,
4575 MGMT_STATUS_INVALID_INDEX);
4577 /* Parameters are limited to a single octet */
4578 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4579 return mgmt_cmd_status(sk, hdev->id,
4580 MGMT_OP_SET_EXP_FEATURE,
4581 MGMT_STATUS_INVALID_PARAMS);
4583 /* Only boolean on/off is supported */
4584 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4585 return mgmt_cmd_status(sk, hdev->id,
4586 MGMT_OP_SET_EXP_FEATURE,
4587 MGMT_STATUS_INVALID_PARAMS);
4589 val = !!cp->param[0];
4592 changed = !hci_dev_test_and_set_flag(hdev,
4593 HCI_MESH_EXPERIMENTAL);
4595 hci_dev_clear_flag(hdev, HCI_MESH);
4596 changed = hci_dev_test_and_clear_flag(hdev,
4597 HCI_MESH_EXPERIMENTAL);
4600 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4601 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4603 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4605 err = mgmt_cmd_complete(sk, hdev->id,
4606 MGMT_OP_SET_EXP_FEATURE, 0,
4610 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4615 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4616 struct mgmt_cp_set_exp_feature *cp,
4619 struct mgmt_rp_set_exp_feature rp;
4624 /* Command requires to use the controller index */
4626 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4627 MGMT_OP_SET_EXP_FEATURE,
4628 MGMT_STATUS_INVALID_INDEX);
4630 /* Changes can only be made when controller is powered down */
4631 if (hdev_is_powered(hdev))
4632 return mgmt_cmd_status(sk, hdev->id,
4633 MGMT_OP_SET_EXP_FEATURE,
4634 MGMT_STATUS_REJECTED);
4636 /* Parameters are limited to a single octet */
4637 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4638 return mgmt_cmd_status(sk, hdev->id,
4639 MGMT_OP_SET_EXP_FEATURE,
4640 MGMT_STATUS_INVALID_PARAMS);
4642 /* Only boolean on/off is supported */
4643 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4644 return mgmt_cmd_status(sk, hdev->id,
4645 MGMT_OP_SET_EXP_FEATURE,
4646 MGMT_STATUS_INVALID_PARAMS);
4648 val = !!cp->param[0];
4651 changed = !hci_dev_test_and_set_flag(hdev,
4652 HCI_ENABLE_LL_PRIVACY);
4653 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4655 /* Enable LL privacy + supported settings changed */
4656 flags = BIT(0) | BIT(1);
4658 changed = hci_dev_test_and_clear_flag(hdev,
4659 HCI_ENABLE_LL_PRIVACY);
4661 /* Disable LL privacy + supported settings changed */
4665 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4666 rp.flags = cpu_to_le32(flags);
4668 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4670 err = mgmt_cmd_complete(sk, hdev->id,
4671 MGMT_OP_SET_EXP_FEATURE, 0,
4675 exp_ll_privacy_feature_changed(val, hdev, sk);
4680 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4681 struct mgmt_cp_set_exp_feature *cp,
4684 struct mgmt_rp_set_exp_feature rp;
4688 /* Command requires to use a valid controller index */
4690 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4691 MGMT_OP_SET_EXP_FEATURE,
4692 MGMT_STATUS_INVALID_INDEX);
4694 /* Parameters are limited to a single octet */
4695 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4696 return mgmt_cmd_status(sk, hdev->id,
4697 MGMT_OP_SET_EXP_FEATURE,
4698 MGMT_STATUS_INVALID_PARAMS);
4700 /* Only boolean on/off is supported */
4701 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4702 return mgmt_cmd_status(sk, hdev->id,
4703 MGMT_OP_SET_EXP_FEATURE,
4704 MGMT_STATUS_INVALID_PARAMS);
4706 hci_req_sync_lock(hdev);
4708 val = !!cp->param[0];
4709 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4711 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4712 err = mgmt_cmd_status(sk, hdev->id,
4713 MGMT_OP_SET_EXP_FEATURE,
4714 MGMT_STATUS_NOT_SUPPORTED);
4715 goto unlock_quality_report;
4719 if (hdev->set_quality_report)
4720 err = hdev->set_quality_report(hdev, val);
4722 err = aosp_set_quality_report(hdev, val);
4725 err = mgmt_cmd_status(sk, hdev->id,
4726 MGMT_OP_SET_EXP_FEATURE,
4727 MGMT_STATUS_FAILED);
4728 goto unlock_quality_report;
4732 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4734 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4737 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4739 memcpy(rp.uuid, quality_report_uuid, 16);
4740 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4741 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4743 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4747 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4749 unlock_quality_report:
4750 hci_req_sync_unlock(hdev);
4754 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4755 struct mgmt_cp_set_exp_feature *cp,
4760 struct mgmt_rp_set_exp_feature rp;
4762 /* Command requires to use a valid controller index */
4764 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4765 MGMT_OP_SET_EXP_FEATURE,
4766 MGMT_STATUS_INVALID_INDEX);
4768 /* Parameters are limited to a single octet */
4769 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4770 return mgmt_cmd_status(sk, hdev->id,
4771 MGMT_OP_SET_EXP_FEATURE,
4772 MGMT_STATUS_INVALID_PARAMS);
4774 /* Only boolean on/off is supported */
4775 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4776 return mgmt_cmd_status(sk, hdev->id,
4777 MGMT_OP_SET_EXP_FEATURE,
4778 MGMT_STATUS_INVALID_PARAMS);
4780 val = !!cp->param[0];
4781 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4783 if (!hdev->get_data_path_id) {
4784 return mgmt_cmd_status(sk, hdev->id,
4785 MGMT_OP_SET_EXP_FEATURE,
4786 MGMT_STATUS_NOT_SUPPORTED);
4791 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4793 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4796 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4799 memcpy(rp.uuid, offload_codecs_uuid, 16);
4800 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4801 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4802 err = mgmt_cmd_complete(sk, hdev->id,
4803 MGMT_OP_SET_EXP_FEATURE, 0,
4807 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4812 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4813 struct mgmt_cp_set_exp_feature *cp,
4818 struct mgmt_rp_set_exp_feature rp;
4820 /* Command requires to use a valid controller index */
4822 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4823 MGMT_OP_SET_EXP_FEATURE,
4824 MGMT_STATUS_INVALID_INDEX);
4826 /* Parameters are limited to a single octet */
4827 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4828 return mgmt_cmd_status(sk, hdev->id,
4829 MGMT_OP_SET_EXP_FEATURE,
4830 MGMT_STATUS_INVALID_PARAMS);
4832 /* Only boolean on/off is supported */
4833 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4834 return mgmt_cmd_status(sk, hdev->id,
4835 MGMT_OP_SET_EXP_FEATURE,
4836 MGMT_STATUS_INVALID_PARAMS);
4838 val = !!cp->param[0];
4839 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4841 if (!hci_dev_le_state_simultaneous(hdev)) {
4842 return mgmt_cmd_status(sk, hdev->id,
4843 MGMT_OP_SET_EXP_FEATURE,
4844 MGMT_STATUS_NOT_SUPPORTED);
4849 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4851 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4854 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4857 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4858 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4859 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4860 err = mgmt_cmd_complete(sk, hdev->id,
4861 MGMT_OP_SET_EXP_FEATURE, 0,
4865 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4871 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4872 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4874 struct mgmt_rp_set_exp_feature rp;
4875 bool val, changed = false;
4878 /* Command requires to use the non-controller index */
4880 return mgmt_cmd_status(sk, hdev->id,
4881 MGMT_OP_SET_EXP_FEATURE,
4882 MGMT_STATUS_INVALID_INDEX);
4884 /* Parameters are limited to a single octet */
4885 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4886 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4887 MGMT_OP_SET_EXP_FEATURE,
4888 MGMT_STATUS_INVALID_PARAMS);
4890 /* Only boolean on/off is supported */
4891 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4892 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4893 MGMT_OP_SET_EXP_FEATURE,
4894 MGMT_STATUS_INVALID_PARAMS);
4896 val = cp->param[0] ? true : false;
4905 memcpy(rp.uuid, iso_socket_uuid, 16);
4906 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4908 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4910 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4911 MGMT_OP_SET_EXP_FEATURE, 0,
4915 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4921 static const struct mgmt_exp_feature {
4923 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4924 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4925 } exp_features[] = {
4926 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4927 #ifdef CONFIG_BT_FEATURE_DEBUG
4928 EXP_FEAT(debug_uuid, set_debug_func),
4930 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4931 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4932 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4933 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4934 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4936 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4939 /* end with a null feature */
4940 EXP_FEAT(NULL, NULL)
4943 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4944 void *data, u16 data_len)
4946 struct mgmt_cp_set_exp_feature *cp = data;
4949 bt_dev_dbg(hdev, "sock %p", sk);
4951 for (i = 0; exp_features[i].uuid; i++) {
4952 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4953 return exp_features[i].set_func(sk, hdev, cp, data_len);
4956 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4957 MGMT_OP_SET_EXP_FEATURE,
4958 MGMT_STATUS_NOT_SUPPORTED);
4961 static u32 get_params_flags(struct hci_dev *hdev,
4962 struct hci_conn_params *params)
4964 u32 flags = hdev->conn_flags;
4966 /* Devices using RPAs can only be programmed in the acceptlist if
4967 * LL Privacy has been enable otherwise they cannot mark
4968 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4970 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4971 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
4972 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4977 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4980 struct mgmt_cp_get_device_flags *cp = data;
4981 struct mgmt_rp_get_device_flags rp;
4982 struct bdaddr_list_with_flags *br_params;
4983 struct hci_conn_params *params;
4984 u32 supported_flags;
4985 u32 current_flags = 0;
4986 u8 status = MGMT_STATUS_INVALID_PARAMS;
4988 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4989 &cp->addr.bdaddr, cp->addr.type);
4993 supported_flags = hdev->conn_flags;
4995 memset(&rp, 0, sizeof(rp));
4997 if (cp->addr.type == BDADDR_BREDR) {
4998 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5004 current_flags = br_params->flags;
5006 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5007 le_addr_type(cp->addr.type));
5011 supported_flags = get_params_flags(hdev, params);
5012 current_flags = params->flags;
5015 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5016 rp.addr.type = cp->addr.type;
5017 rp.supported_flags = cpu_to_le32(supported_flags);
5018 rp.current_flags = cpu_to_le32(current_flags);
5020 status = MGMT_STATUS_SUCCESS;
5023 hci_dev_unlock(hdev);
5025 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5029 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5030 bdaddr_t *bdaddr, u8 bdaddr_type,
5031 u32 supported_flags, u32 current_flags)
5033 struct mgmt_ev_device_flags_changed ev;
5035 bacpy(&ev.addr.bdaddr, bdaddr);
5036 ev.addr.type = bdaddr_type;
5037 ev.supported_flags = cpu_to_le32(supported_flags);
5038 ev.current_flags = cpu_to_le32(current_flags);
5040 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5043 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5046 struct mgmt_cp_set_device_flags *cp = data;
5047 struct bdaddr_list_with_flags *br_params;
5048 struct hci_conn_params *params;
5049 u8 status = MGMT_STATUS_INVALID_PARAMS;
5050 u32 supported_flags;
5051 u32 current_flags = __le32_to_cpu(cp->current_flags);
5053 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5054 &cp->addr.bdaddr, cp->addr.type, current_flags);
5056 // We should take hci_dev_lock() early, I think.. conn_flags can change
5057 supported_flags = hdev->conn_flags;
5059 if ((supported_flags | current_flags) != supported_flags) {
5060 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5061 current_flags, supported_flags);
5067 if (cp->addr.type == BDADDR_BREDR) {
5068 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5073 br_params->flags = current_flags;
5074 status = MGMT_STATUS_SUCCESS;
5076 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5077 &cp->addr.bdaddr, cp->addr.type);
5083 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5084 le_addr_type(cp->addr.type));
5086 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5087 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5091 supported_flags = get_params_flags(hdev, params);
5093 if ((supported_flags | current_flags) != supported_flags) {
5094 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5095 current_flags, supported_flags);
5099 WRITE_ONCE(params->flags, current_flags);
5100 status = MGMT_STATUS_SUCCESS;
5102 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5105 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5106 hci_update_passive_scan(hdev);
5109 hci_dev_unlock(hdev);
5112 if (status == MGMT_STATUS_SUCCESS)
5113 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5114 supported_flags, current_flags);
5116 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5117 &cp->addr, sizeof(cp->addr));
5120 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5123 struct mgmt_ev_adv_monitor_added ev;
5125 ev.monitor_handle = cpu_to_le16(handle);
5127 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5130 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5132 struct mgmt_ev_adv_monitor_removed ev;
5133 struct mgmt_pending_cmd *cmd;
5134 struct sock *sk_skip = NULL;
5135 struct mgmt_cp_remove_adv_monitor *cp;
5137 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5141 if (cp->monitor_handle)
5145 ev.monitor_handle = cpu_to_le16(handle);
5147 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5150 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5151 void *data, u16 len)
5153 struct adv_monitor *monitor = NULL;
5154 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5157 __u32 supported = 0;
5159 __u16 num_handles = 0;
5160 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5162 BT_DBG("request for %s", hdev->name);
5166 if (msft_monitor_supported(hdev))
5167 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5169 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5170 handles[num_handles++] = monitor->handle;
5172 hci_dev_unlock(hdev);
5174 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5175 rp = kmalloc(rp_size, GFP_KERNEL);
5179 /* All supported features are currently enabled */
5180 enabled = supported;
5182 rp->supported_features = cpu_to_le32(supported);
5183 rp->enabled_features = cpu_to_le32(enabled);
5184 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5185 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5186 rp->num_handles = cpu_to_le16(num_handles);
5188 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5190 err = mgmt_cmd_complete(sk, hdev->id,
5191 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5192 MGMT_STATUS_SUCCESS, rp, rp_size);
5199 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5200 void *data, int status)
5202 struct mgmt_rp_add_adv_patterns_monitor rp;
5203 struct mgmt_pending_cmd *cmd = data;
5204 struct adv_monitor *monitor = cmd->user_data;
5208 rp.monitor_handle = cpu_to_le16(monitor->handle);
5211 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5212 hdev->adv_monitors_cnt++;
5213 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5214 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5215 hci_update_passive_scan(hdev);
5218 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5219 mgmt_status(status), &rp, sizeof(rp));
5220 mgmt_pending_remove(cmd);
5222 hci_dev_unlock(hdev);
5223 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5224 rp.monitor_handle, status);
5227 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5229 struct mgmt_pending_cmd *cmd = data;
5230 struct adv_monitor *monitor = cmd->user_data;
5232 return hci_add_adv_monitor(hdev, monitor);
5235 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5236 struct adv_monitor *m, u8 status,
5237 void *data, u16 len, u16 op)
5239 struct mgmt_pending_cmd *cmd;
5247 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5248 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5249 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5250 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5251 status = MGMT_STATUS_BUSY;
5255 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5257 status = MGMT_STATUS_NO_RESOURCES;
5262 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5263 mgmt_add_adv_patterns_monitor_complete);
5266 status = MGMT_STATUS_NO_RESOURCES;
5268 status = MGMT_STATUS_FAILED;
5273 hci_dev_unlock(hdev);
5278 hci_free_adv_monitor(hdev, m);
5279 hci_dev_unlock(hdev);
5280 return mgmt_cmd_status(sk, hdev->id, op, status);
5283 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5284 struct mgmt_adv_rssi_thresholds *rssi)
5287 m->rssi.low_threshold = rssi->low_threshold;
5288 m->rssi.low_threshold_timeout =
5289 __le16_to_cpu(rssi->low_threshold_timeout);
5290 m->rssi.high_threshold = rssi->high_threshold;
5291 m->rssi.high_threshold_timeout =
5292 __le16_to_cpu(rssi->high_threshold_timeout);
5293 m->rssi.sampling_period = rssi->sampling_period;
5295 /* Default values. These numbers are the least constricting
5296 * parameters for MSFT API to work, so it behaves as if there
5297 * are no rssi parameter to consider. May need to be changed
5298 * if other API are to be supported.
5300 m->rssi.low_threshold = -127;
5301 m->rssi.low_threshold_timeout = 60;
5302 m->rssi.high_threshold = -127;
5303 m->rssi.high_threshold_timeout = 0;
5304 m->rssi.sampling_period = 0;
5308 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5309 struct mgmt_adv_pattern *patterns)
5311 u8 offset = 0, length = 0;
5312 struct adv_pattern *p = NULL;
5315 for (i = 0; i < pattern_count; i++) {
5316 offset = patterns[i].offset;
5317 length = patterns[i].length;
5318 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5319 length > HCI_MAX_EXT_AD_LENGTH ||
5320 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5321 return MGMT_STATUS_INVALID_PARAMS;
5323 p = kmalloc(sizeof(*p), GFP_KERNEL);
5325 return MGMT_STATUS_NO_RESOURCES;
5327 p->ad_type = patterns[i].ad_type;
5328 p->offset = patterns[i].offset;
5329 p->length = patterns[i].length;
5330 memcpy(p->value, patterns[i].value, p->length);
5332 INIT_LIST_HEAD(&p->list);
5333 list_add(&p->list, &m->patterns);
5336 return MGMT_STATUS_SUCCESS;
5339 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5340 void *data, u16 len)
5342 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5343 struct adv_monitor *m = NULL;
5344 u8 status = MGMT_STATUS_SUCCESS;
5345 size_t expected_size = sizeof(*cp);
5347 BT_DBG("request for %s", hdev->name);
5349 if (len <= sizeof(*cp)) {
5350 status = MGMT_STATUS_INVALID_PARAMS;
5354 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5355 if (len != expected_size) {
5356 status = MGMT_STATUS_INVALID_PARAMS;
5360 m = kzalloc(sizeof(*m), GFP_KERNEL);
5362 status = MGMT_STATUS_NO_RESOURCES;
5366 INIT_LIST_HEAD(&m->patterns);
5368 parse_adv_monitor_rssi(m, NULL);
5369 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5372 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5373 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5376 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5377 void *data, u16 len)
5379 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5380 struct adv_monitor *m = NULL;
5381 u8 status = MGMT_STATUS_SUCCESS;
5382 size_t expected_size = sizeof(*cp);
5384 BT_DBG("request for %s", hdev->name);
5386 if (len <= sizeof(*cp)) {
5387 status = MGMT_STATUS_INVALID_PARAMS;
5391 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5392 if (len != expected_size) {
5393 status = MGMT_STATUS_INVALID_PARAMS;
5397 m = kzalloc(sizeof(*m), GFP_KERNEL);
5399 status = MGMT_STATUS_NO_RESOURCES;
5403 INIT_LIST_HEAD(&m->patterns);
5405 parse_adv_monitor_rssi(m, &cp->rssi);
5406 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5409 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5410 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5413 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5414 void *data, int status)
5416 struct mgmt_rp_remove_adv_monitor rp;
5417 struct mgmt_pending_cmd *cmd = data;
5418 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5422 rp.monitor_handle = cp->monitor_handle;
5425 hci_update_passive_scan(hdev);
5427 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5428 mgmt_status(status), &rp, sizeof(rp));
5429 mgmt_pending_remove(cmd);
5431 hci_dev_unlock(hdev);
5432 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5433 rp.monitor_handle, status);
5436 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5438 struct mgmt_pending_cmd *cmd = data;
5439 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5440 u16 handle = __le16_to_cpu(cp->monitor_handle);
5443 return hci_remove_all_adv_monitor(hdev);
5445 return hci_remove_single_adv_monitor(hdev, handle);
5448 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5449 void *data, u16 len)
5451 struct mgmt_pending_cmd *cmd;
5456 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5457 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5458 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5459 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5460 status = MGMT_STATUS_BUSY;
5464 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5466 status = MGMT_STATUS_NO_RESOURCES;
5470 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5471 mgmt_remove_adv_monitor_complete);
5474 mgmt_pending_remove(cmd);
5477 status = MGMT_STATUS_NO_RESOURCES;
5479 status = MGMT_STATUS_FAILED;
5484 hci_dev_unlock(hdev);
5489 hci_dev_unlock(hdev);
5490 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5494 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5496 struct mgmt_rp_read_local_oob_data mgmt_rp;
5497 size_t rp_size = sizeof(mgmt_rp);
5498 struct mgmt_pending_cmd *cmd = data;
5499 struct sk_buff *skb = cmd->skb;
5500 u8 status = mgmt_status(err);
5504 status = MGMT_STATUS_FAILED;
5505 else if (IS_ERR(skb))
5506 status = mgmt_status(PTR_ERR(skb));
5508 status = mgmt_status(skb->data[0]);
5511 bt_dev_dbg(hdev, "status %d", status);
5514 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5518 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5520 if (!bredr_sc_enabled(hdev)) {
5521 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5523 if (skb->len < sizeof(*rp)) {
5524 mgmt_cmd_status(cmd->sk, hdev->id,
5525 MGMT_OP_READ_LOCAL_OOB_DATA,
5526 MGMT_STATUS_FAILED);
5530 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5531 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5533 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5535 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5537 if (skb->len < sizeof(*rp)) {
5538 mgmt_cmd_status(cmd->sk, hdev->id,
5539 MGMT_OP_READ_LOCAL_OOB_DATA,
5540 MGMT_STATUS_FAILED);
5544 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5545 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5547 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5548 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5551 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5552 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5555 if (skb && !IS_ERR(skb))
5558 mgmt_pending_free(cmd);
5561 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5563 struct mgmt_pending_cmd *cmd = data;
5565 if (bredr_sc_enabled(hdev))
5566 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5568 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5570 if (IS_ERR(cmd->skb))
5571 return PTR_ERR(cmd->skb);
5576 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5577 void *data, u16 data_len)
5579 struct mgmt_pending_cmd *cmd;
5582 bt_dev_dbg(hdev, "sock %p", sk);
5586 if (!hdev_is_powered(hdev)) {
5587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5588 MGMT_STATUS_NOT_POWERED);
5592 if (!lmp_ssp_capable(hdev)) {
5593 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5594 MGMT_STATUS_NOT_SUPPORTED);
5598 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5602 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5603 read_local_oob_data_complete);
5606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5607 MGMT_STATUS_FAILED);
5610 mgmt_pending_free(cmd);
5614 hci_dev_unlock(hdev);
5618 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5619 void *data, u16 len)
5621 struct mgmt_addr_info *addr = data;
5624 bt_dev_dbg(hdev, "sock %p", sk);
5626 if (!bdaddr_type_is_valid(addr->type))
5627 return mgmt_cmd_complete(sk, hdev->id,
5628 MGMT_OP_ADD_REMOTE_OOB_DATA,
5629 MGMT_STATUS_INVALID_PARAMS,
5630 addr, sizeof(*addr));
5634 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5635 struct mgmt_cp_add_remote_oob_data *cp = data;
5638 if (cp->addr.type != BDADDR_BREDR) {
5639 err = mgmt_cmd_complete(sk, hdev->id,
5640 MGMT_OP_ADD_REMOTE_OOB_DATA,
5641 MGMT_STATUS_INVALID_PARAMS,
5642 &cp->addr, sizeof(cp->addr));
5646 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5647 cp->addr.type, cp->hash,
5648 cp->rand, NULL, NULL);
5650 status = MGMT_STATUS_FAILED;
5652 status = MGMT_STATUS_SUCCESS;
5654 err = mgmt_cmd_complete(sk, hdev->id,
5655 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5656 &cp->addr, sizeof(cp->addr));
5657 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5658 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5659 u8 *rand192, *hash192, *rand256, *hash256;
5662 if (bdaddr_type_is_le(cp->addr.type)) {
5663 /* Enforce zero-valued 192-bit parameters as
5664 * long as legacy SMP OOB isn't implemented.
5666 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5667 memcmp(cp->hash192, ZERO_KEY, 16)) {
5668 err = mgmt_cmd_complete(sk, hdev->id,
5669 MGMT_OP_ADD_REMOTE_OOB_DATA,
5670 MGMT_STATUS_INVALID_PARAMS,
5671 addr, sizeof(*addr));
5678 /* In case one of the P-192 values is set to zero,
5679 * then just disable OOB data for P-192.
5681 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5682 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5686 rand192 = cp->rand192;
5687 hash192 = cp->hash192;
5691 /* In case one of the P-256 values is set to zero, then just
5692 * disable OOB data for P-256.
5694 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5695 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5699 rand256 = cp->rand256;
5700 hash256 = cp->hash256;
5703 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5704 cp->addr.type, hash192, rand192,
5707 status = MGMT_STATUS_FAILED;
5709 status = MGMT_STATUS_SUCCESS;
5711 err = mgmt_cmd_complete(sk, hdev->id,
5712 MGMT_OP_ADD_REMOTE_OOB_DATA,
5713 status, &cp->addr, sizeof(cp->addr));
5715 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5717 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5718 MGMT_STATUS_INVALID_PARAMS);
5722 hci_dev_unlock(hdev);
5726 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5727 void *data, u16 len)
5729 struct mgmt_cp_remove_remote_oob_data *cp = data;
5733 bt_dev_dbg(hdev, "sock %p", sk);
5735 if (cp->addr.type != BDADDR_BREDR)
5736 return mgmt_cmd_complete(sk, hdev->id,
5737 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5738 MGMT_STATUS_INVALID_PARAMS,
5739 &cp->addr, sizeof(cp->addr));
5743 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5744 hci_remote_oob_data_clear(hdev);
5745 status = MGMT_STATUS_SUCCESS;
5749 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5751 status = MGMT_STATUS_INVALID_PARAMS;
5753 status = MGMT_STATUS_SUCCESS;
5756 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5757 status, &cp->addr, sizeof(cp->addr));
5759 hci_dev_unlock(hdev);
5763 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5765 struct mgmt_pending_cmd *cmd;
5767 bt_dev_dbg(hdev, "status %u", status);
5771 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5773 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5776 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5779 cmd->cmd_complete(cmd, mgmt_status(status));
5780 mgmt_pending_remove(cmd);
5783 hci_dev_unlock(hdev);
5786 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5787 uint8_t *mgmt_status)
5790 case DISCOV_TYPE_LE:
5791 *mgmt_status = mgmt_le_support(hdev);
5795 case DISCOV_TYPE_INTERLEAVED:
5796 *mgmt_status = mgmt_le_support(hdev);
5800 case DISCOV_TYPE_BREDR:
5801 *mgmt_status = mgmt_bredr_support(hdev);
5806 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5813 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5815 struct mgmt_pending_cmd *cmd = data;
5817 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5818 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5819 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5822 bt_dev_dbg(hdev, "err %d", err);
5824 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5826 mgmt_pending_remove(cmd);
5828 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5832 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5834 return hci_start_discovery_sync(hdev);
5837 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5838 u16 op, void *data, u16 len)
5840 struct mgmt_cp_start_discovery *cp = data;
5841 struct mgmt_pending_cmd *cmd;
5845 bt_dev_dbg(hdev, "sock %p", sk);
5849 if (!hdev_is_powered(hdev)) {
5850 err = mgmt_cmd_complete(sk, hdev->id, op,
5851 MGMT_STATUS_NOT_POWERED,
5852 &cp->type, sizeof(cp->type));
5856 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5857 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5858 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5859 &cp->type, sizeof(cp->type));
5863 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5864 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5865 &cp->type, sizeof(cp->type));
5869 /* Can't start discovery when it is paused */
5870 if (hdev->discovery_paused) {
5871 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5872 &cp->type, sizeof(cp->type));
5876 /* Clear the discovery filter first to free any previously
5877 * allocated memory for the UUID list.
5879 hci_discovery_filter_clear(hdev);
5881 hdev->discovery.type = cp->type;
5882 hdev->discovery.report_invalid_rssi = false;
5883 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5884 hdev->discovery.limited = true;
5886 hdev->discovery.limited = false;
5888 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5894 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5895 start_discovery_complete);
5897 mgmt_pending_remove(cmd);
5901 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5904 hci_dev_unlock(hdev);
5908 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5909 void *data, u16 len)
5911 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5915 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5916 void *data, u16 len)
5918 return start_discovery_internal(sk, hdev,
5919 MGMT_OP_START_LIMITED_DISCOVERY,
5923 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5924 void *data, u16 len)
5926 struct mgmt_cp_start_service_discovery *cp = data;
5927 struct mgmt_pending_cmd *cmd;
5928 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5929 u16 uuid_count, expected_len;
5933 bt_dev_dbg(hdev, "sock %p", sk);
5937 if (!hdev_is_powered(hdev)) {
5938 err = mgmt_cmd_complete(sk, hdev->id,
5939 MGMT_OP_START_SERVICE_DISCOVERY,
5940 MGMT_STATUS_NOT_POWERED,
5941 &cp->type, sizeof(cp->type));
5945 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5946 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5947 err = mgmt_cmd_complete(sk, hdev->id,
5948 MGMT_OP_START_SERVICE_DISCOVERY,
5949 MGMT_STATUS_BUSY, &cp->type,
5954 if (hdev->discovery_paused) {
5955 err = mgmt_cmd_complete(sk, hdev->id,
5956 MGMT_OP_START_SERVICE_DISCOVERY,
5957 MGMT_STATUS_BUSY, &cp->type,
5962 uuid_count = __le16_to_cpu(cp->uuid_count);
5963 if (uuid_count > max_uuid_count) {
5964 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5966 err = mgmt_cmd_complete(sk, hdev->id,
5967 MGMT_OP_START_SERVICE_DISCOVERY,
5968 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5973 expected_len = sizeof(*cp) + uuid_count * 16;
5974 if (expected_len != len) {
5975 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5977 err = mgmt_cmd_complete(sk, hdev->id,
5978 MGMT_OP_START_SERVICE_DISCOVERY,
5979 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5984 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5985 err = mgmt_cmd_complete(sk, hdev->id,
5986 MGMT_OP_START_SERVICE_DISCOVERY,
5987 status, &cp->type, sizeof(cp->type));
5991 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5998 /* Clear the discovery filter first to free any previously
5999 * allocated memory for the UUID list.
6001 hci_discovery_filter_clear(hdev);
6003 hdev->discovery.result_filtering = true;
6004 hdev->discovery.type = cp->type;
6005 hdev->discovery.rssi = cp->rssi;
6006 hdev->discovery.uuid_count = uuid_count;
6008 if (uuid_count > 0) {
6009 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6011 if (!hdev->discovery.uuids) {
6012 err = mgmt_cmd_complete(sk, hdev->id,
6013 MGMT_OP_START_SERVICE_DISCOVERY,
6015 &cp->type, sizeof(cp->type));
6016 mgmt_pending_remove(cmd);
6021 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6022 start_discovery_complete);
6024 mgmt_pending_remove(cmd);
6028 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6031 hci_dev_unlock(hdev);
6035 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6037 struct mgmt_pending_cmd *cmd;
6039 bt_dev_dbg(hdev, "status %u", status);
6043 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6045 cmd->cmd_complete(cmd, mgmt_status(status));
6046 mgmt_pending_remove(cmd);
6049 hci_dev_unlock(hdev);
6052 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6054 struct mgmt_pending_cmd *cmd = data;
6056 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6059 bt_dev_dbg(hdev, "err %d", err);
6061 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6063 mgmt_pending_remove(cmd);
6066 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6069 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6071 return hci_stop_discovery_sync(hdev);
6074 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6077 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6078 struct mgmt_pending_cmd *cmd;
6081 bt_dev_dbg(hdev, "sock %p", sk);
6085 if (!hci_discovery_active(hdev)) {
6086 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6087 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6088 sizeof(mgmt_cp->type));
6092 if (hdev->discovery.type != mgmt_cp->type) {
6093 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6094 MGMT_STATUS_INVALID_PARAMS,
6095 &mgmt_cp->type, sizeof(mgmt_cp->type));
6099 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6105 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6106 stop_discovery_complete);
6108 mgmt_pending_remove(cmd);
6112 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6115 hci_dev_unlock(hdev);
6119 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6122 struct mgmt_cp_confirm_name *cp = data;
6123 struct inquiry_entry *e;
6126 bt_dev_dbg(hdev, "sock %p", sk);
6130 if (!hci_discovery_active(hdev)) {
6131 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6132 MGMT_STATUS_FAILED, &cp->addr,
6137 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6139 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6140 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6145 if (cp->name_known) {
6146 e->name_state = NAME_KNOWN;
6149 e->name_state = NAME_NEEDED;
6150 hci_inquiry_cache_update_resolve(hdev, e);
6153 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6154 &cp->addr, sizeof(cp->addr));
6157 hci_dev_unlock(hdev);
6161 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6164 struct mgmt_cp_block_device *cp = data;
6168 bt_dev_dbg(hdev, "sock %p", sk);
6170 if (!bdaddr_type_is_valid(cp->addr.type))
6171 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6172 MGMT_STATUS_INVALID_PARAMS,
6173 &cp->addr, sizeof(cp->addr));
6177 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6180 status = MGMT_STATUS_FAILED;
6184 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6186 status = MGMT_STATUS_SUCCESS;
6189 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6190 &cp->addr, sizeof(cp->addr));
6192 hci_dev_unlock(hdev);
6197 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6200 struct mgmt_cp_unblock_device *cp = data;
6204 bt_dev_dbg(hdev, "sock %p", sk);
6206 if (!bdaddr_type_is_valid(cp->addr.type))
6207 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6208 MGMT_STATUS_INVALID_PARAMS,
6209 &cp->addr, sizeof(cp->addr));
6213 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6216 status = MGMT_STATUS_INVALID_PARAMS;
6220 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6222 status = MGMT_STATUS_SUCCESS;
6225 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6226 &cp->addr, sizeof(cp->addr));
6228 hci_dev_unlock(hdev);
6233 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6235 return hci_update_eir_sync(hdev);
6238 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6241 struct mgmt_cp_set_device_id *cp = data;
6245 bt_dev_dbg(hdev, "sock %p", sk);
6247 source = __le16_to_cpu(cp->source);
6249 if (source > 0x0002)
6250 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6251 MGMT_STATUS_INVALID_PARAMS);
6255 hdev->devid_source = source;
6256 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6257 hdev->devid_product = __le16_to_cpu(cp->product);
6258 hdev->devid_version = __le16_to_cpu(cp->version);
6260 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6263 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6265 hci_dev_unlock(hdev);
6270 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6273 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6275 bt_dev_dbg(hdev, "status %d", err);
6278 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6280 struct cmd_lookup match = { NULL, hdev };
6282 struct adv_info *adv_instance;
6283 u8 status = mgmt_status(err);
6286 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6287 cmd_status_rsp, &status);
6291 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6292 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6294 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6296 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6299 new_settings(hdev, match.sk);
6304 /* If "Set Advertising" was just disabled and instance advertising was
6305 * set up earlier, then re-enable multi-instance advertising.
6307 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6308 list_empty(&hdev->adv_instances))
6311 instance = hdev->cur_adv_instance;
6313 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6314 struct adv_info, list);
6318 instance = adv_instance->instance;
6321 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6323 enable_advertising_instance(hdev, err);
6326 static int set_adv_sync(struct hci_dev *hdev, void *data)
6328 struct mgmt_pending_cmd *cmd = data;
6329 struct mgmt_mode *cp = cmd->param;
6332 if (cp->val == 0x02)
6333 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6335 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6337 cancel_adv_timeout(hdev);
6340 /* Switch to instance "0" for the Set Advertising setting.
6341 * We cannot use update_[adv|scan_rsp]_data() here as the
6342 * HCI_ADVERTISING flag is not yet set.
6344 hdev->cur_adv_instance = 0x00;
6346 if (ext_adv_capable(hdev)) {
6347 hci_start_ext_adv_sync(hdev, 0x00);
6349 hci_update_adv_data_sync(hdev, 0x00);
6350 hci_update_scan_rsp_data_sync(hdev, 0x00);
6351 hci_enable_advertising_sync(hdev);
6354 hci_disable_advertising_sync(hdev);
6360 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6363 struct mgmt_mode *cp = data;
6364 struct mgmt_pending_cmd *cmd;
6368 bt_dev_dbg(hdev, "sock %p", sk);
6370 status = mgmt_le_support(hdev);
6372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6375 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6377 MGMT_STATUS_INVALID_PARAMS);
6379 if (hdev->advertising_paused)
6380 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6387 /* The following conditions are ones which mean that we should
6388 * not do any HCI communication but directly send a mgmt
6389 * response to user space (after toggling the flag if
6392 if (!hdev_is_powered(hdev) ||
6393 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6394 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6395 hci_dev_test_flag(hdev, HCI_MESH) ||
6396 hci_conn_num(hdev, LE_LINK) > 0 ||
6397 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6398 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6402 hdev->cur_adv_instance = 0x00;
6403 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6404 if (cp->val == 0x02)
6405 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6407 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6409 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6410 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6413 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6418 err = new_settings(hdev, sk);
6423 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6424 pending_find(MGMT_OP_SET_LE, hdev)) {
6425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6430 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6434 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6435 set_advertising_complete);
6438 mgmt_pending_remove(cmd);
6441 hci_dev_unlock(hdev);
6445 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6446 void *data, u16 len)
6448 struct mgmt_cp_set_static_address *cp = data;
6451 bt_dev_dbg(hdev, "sock %p", sk);
6453 if (!lmp_le_capable(hdev))
6454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6455 MGMT_STATUS_NOT_SUPPORTED);
6457 if (hdev_is_powered(hdev))
6458 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6459 MGMT_STATUS_REJECTED);
6461 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6462 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6463 return mgmt_cmd_status(sk, hdev->id,
6464 MGMT_OP_SET_STATIC_ADDRESS,
6465 MGMT_STATUS_INVALID_PARAMS);
6467 /* Two most significant bits shall be set */
6468 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6469 return mgmt_cmd_status(sk, hdev->id,
6470 MGMT_OP_SET_STATIC_ADDRESS,
6471 MGMT_STATUS_INVALID_PARAMS);
6476 bacpy(&hdev->static_addr, &cp->bdaddr);
6478 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6482 err = new_settings(hdev, sk);
6485 hci_dev_unlock(hdev);
6489 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6490 void *data, u16 len)
6492 struct mgmt_cp_set_scan_params *cp = data;
6493 __u16 interval, window;
6496 bt_dev_dbg(hdev, "sock %p", sk);
6498 if (!lmp_le_capable(hdev))
6499 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6500 MGMT_STATUS_NOT_SUPPORTED);
6502 interval = __le16_to_cpu(cp->interval);
6504 if (interval < 0x0004 || interval > 0x4000)
6505 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6506 MGMT_STATUS_INVALID_PARAMS);
6508 window = __le16_to_cpu(cp->window);
6510 if (window < 0x0004 || window > 0x4000)
6511 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6512 MGMT_STATUS_INVALID_PARAMS);
6514 if (window > interval)
6515 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6516 MGMT_STATUS_INVALID_PARAMS);
6520 hdev->le_scan_interval = interval;
6521 hdev->le_scan_window = window;
6523 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6526 /* If background scan is running, restart it so new parameters are
6529 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6530 hdev->discovery.state == DISCOVERY_STOPPED)
6531 hci_update_passive_scan(hdev);
6533 hci_dev_unlock(hdev);
6538 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6540 struct mgmt_pending_cmd *cmd = data;
6542 bt_dev_dbg(hdev, "err %d", err);
6545 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6548 struct mgmt_mode *cp = cmd->param;
6551 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6553 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6555 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6556 new_settings(hdev, cmd->sk);
6559 mgmt_pending_free(cmd);
6562 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6564 struct mgmt_pending_cmd *cmd = data;
6565 struct mgmt_mode *cp = cmd->param;
6567 return hci_write_fast_connectable_sync(hdev, cp->val);
6570 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6571 void *data, u16 len)
6573 struct mgmt_mode *cp = data;
6574 struct mgmt_pending_cmd *cmd;
6577 bt_dev_dbg(hdev, "sock %p", sk);
6579 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6580 hdev->hci_ver < BLUETOOTH_VER_1_2)
6581 return mgmt_cmd_status(sk, hdev->id,
6582 MGMT_OP_SET_FAST_CONNECTABLE,
6583 MGMT_STATUS_NOT_SUPPORTED);
6585 if (cp->val != 0x00 && cp->val != 0x01)
6586 return mgmt_cmd_status(sk, hdev->id,
6587 MGMT_OP_SET_FAST_CONNECTABLE,
6588 MGMT_STATUS_INVALID_PARAMS);
6592 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6593 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6597 if (!hdev_is_powered(hdev)) {
6598 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6599 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6600 new_settings(hdev, sk);
6604 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6609 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6610 fast_connectable_complete);
6613 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6614 MGMT_STATUS_FAILED);
6617 mgmt_pending_free(cmd);
6621 hci_dev_unlock(hdev);
6626 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6628 struct mgmt_pending_cmd *cmd = data;
6630 bt_dev_dbg(hdev, "err %d", err);
6633 u8 mgmt_err = mgmt_status(err);
6635 /* We need to restore the flag if related HCI commands
6638 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6640 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6642 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6643 new_settings(hdev, cmd->sk);
6646 mgmt_pending_free(cmd);
6649 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6653 status = hci_write_fast_connectable_sync(hdev, false);
6656 status = hci_update_scan_sync(hdev);
6658 /* Since only the advertising data flags will change, there
6659 * is no need to update the scan response data.
6662 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6667 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6669 struct mgmt_mode *cp = data;
6670 struct mgmt_pending_cmd *cmd;
6673 bt_dev_dbg(hdev, "sock %p", sk);
6675 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6677 MGMT_STATUS_NOT_SUPPORTED);
6679 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6680 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6681 MGMT_STATUS_REJECTED);
6683 if (cp->val != 0x00 && cp->val != 0x01)
6684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6685 MGMT_STATUS_INVALID_PARAMS);
6689 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6690 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6694 if (!hdev_is_powered(hdev)) {
6696 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6697 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6698 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6699 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6702 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6704 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6708 err = new_settings(hdev, sk);
6712 /* Reject disabling when powered on */
6714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6715 MGMT_STATUS_REJECTED);
6718 /* When configuring a dual-mode controller to operate
6719 * with LE only and using a static address, then switching
6720 * BR/EDR back on is not allowed.
6722 * Dual-mode controllers shall operate with the public
6723 * address as its identity address for BR/EDR and LE. So
6724 * reject the attempt to create an invalid configuration.
6726 * The same restrictions applies when secure connections
6727 * has been enabled. For BR/EDR this is a controller feature
6728 * while for LE it is a host stack feature. This means that
6729 * switching BR/EDR back on when secure connections has been
6730 * enabled is not a supported transaction.
6732 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6733 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6734 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6735 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6736 MGMT_STATUS_REJECTED);
6741 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6745 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6746 set_bredr_complete);
6749 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 MGMT_STATUS_FAILED);
6752 mgmt_pending_free(cmd);
6757 /* We need to flip the bit already here so that
6758 * hci_req_update_adv_data generates the correct flags.
6760 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6763 hci_dev_unlock(hdev);
6767 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6769 struct mgmt_pending_cmd *cmd = data;
6770 struct mgmt_mode *cp;
6772 bt_dev_dbg(hdev, "err %d", err);
6775 u8 mgmt_err = mgmt_status(err);
6777 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6785 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6786 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6789 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6790 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6793 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6794 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6798 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6799 new_settings(hdev, cmd->sk);
6802 mgmt_pending_free(cmd);
6805 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6807 struct mgmt_pending_cmd *cmd = data;
6808 struct mgmt_mode *cp = cmd->param;
6811 /* Force write of val */
6812 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6814 return hci_write_sc_support_sync(hdev, val);
6817 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6818 void *data, u16 len)
6820 struct mgmt_mode *cp = data;
6821 struct mgmt_pending_cmd *cmd;
6825 bt_dev_dbg(hdev, "sock %p", sk);
6827 if (!lmp_sc_capable(hdev) &&
6828 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6830 MGMT_STATUS_NOT_SUPPORTED);
6832 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6833 lmp_sc_capable(hdev) &&
6834 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6836 MGMT_STATUS_REJECTED);
6838 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6840 MGMT_STATUS_INVALID_PARAMS);
6844 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6845 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6849 changed = !hci_dev_test_and_set_flag(hdev,
6851 if (cp->val == 0x02)
6852 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6854 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6856 changed = hci_dev_test_and_clear_flag(hdev,
6858 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6866 err = new_settings(hdev, sk);
6873 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6874 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6875 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6879 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6883 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6884 set_secure_conn_complete);
6887 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6888 MGMT_STATUS_FAILED);
6890 mgmt_pending_free(cmd);
6894 hci_dev_unlock(hdev);
6898 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6899 void *data, u16 len)
6901 struct mgmt_mode *cp = data;
6902 bool changed, use_changed;
6905 bt_dev_dbg(hdev, "sock %p", sk);
6907 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6909 MGMT_STATUS_INVALID_PARAMS);
6914 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6916 changed = hci_dev_test_and_clear_flag(hdev,
6917 HCI_KEEP_DEBUG_KEYS);
6919 if (cp->val == 0x02)
6920 use_changed = !hci_dev_test_and_set_flag(hdev,
6921 HCI_USE_DEBUG_KEYS);
6923 use_changed = hci_dev_test_and_clear_flag(hdev,
6924 HCI_USE_DEBUG_KEYS);
6926 if (hdev_is_powered(hdev) && use_changed &&
6927 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6928 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6929 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6930 sizeof(mode), &mode);
6933 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6938 err = new_settings(hdev, sk);
6941 hci_dev_unlock(hdev);
6945 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6948 struct mgmt_cp_set_privacy *cp = cp_data;
6952 bt_dev_dbg(hdev, "sock %p", sk);
6954 if (!lmp_le_capable(hdev))
6955 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6956 MGMT_STATUS_NOT_SUPPORTED);
6958 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6960 MGMT_STATUS_INVALID_PARAMS);
6962 if (hdev_is_powered(hdev))
6963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6964 MGMT_STATUS_REJECTED);
6968 /* If user space supports this command it is also expected to
6969 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6971 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6974 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6975 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6976 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6977 hci_adv_instances_set_rpa_expired(hdev, true);
6978 if (cp->privacy == 0x02)
6979 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6981 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6983 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6984 memset(hdev->irk, 0, sizeof(hdev->irk));
6985 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6986 hci_adv_instances_set_rpa_expired(hdev, false);
6987 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6990 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6995 err = new_settings(hdev, sk);
6998 hci_dev_unlock(hdev);
7002 static bool irk_is_valid(struct mgmt_irk_info *irk)
7004 switch (irk->addr.type) {
7005 case BDADDR_LE_PUBLIC:
7008 case BDADDR_LE_RANDOM:
7009 /* Two most significant bits shall be set */
7010 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7018 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7021 struct mgmt_cp_load_irks *cp = cp_data;
7022 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7023 sizeof(struct mgmt_irk_info));
7024 u16 irk_count, expected_len;
7027 bt_dev_dbg(hdev, "sock %p", sk);
7029 if (!lmp_le_capable(hdev))
7030 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7031 MGMT_STATUS_NOT_SUPPORTED);
7033 irk_count = __le16_to_cpu(cp->irk_count);
7034 if (irk_count > max_irk_count) {
7035 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7038 MGMT_STATUS_INVALID_PARAMS);
7041 expected_len = struct_size(cp, irks, irk_count);
7042 if (expected_len != len) {
7043 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7045 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7046 MGMT_STATUS_INVALID_PARAMS);
7049 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7051 for (i = 0; i < irk_count; i++) {
7052 struct mgmt_irk_info *key = &cp->irks[i];
7054 if (!irk_is_valid(key))
7055 return mgmt_cmd_status(sk, hdev->id,
7057 MGMT_STATUS_INVALID_PARAMS);
7062 hci_smp_irks_clear(hdev);
7064 for (i = 0; i < irk_count; i++) {
7065 struct mgmt_irk_info *irk = &cp->irks[i];
7066 u8 addr_type = le_addr_type(irk->addr.type);
7068 if (hci_is_blocked_key(hdev,
7069 HCI_BLOCKED_KEY_TYPE_IRK,
7071 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7076 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7077 if (irk->addr.type == BDADDR_BREDR)
7078 addr_type = BDADDR_BREDR;
7080 hci_add_irk(hdev, &irk->addr.bdaddr,
7081 addr_type, irk->val,
7085 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7087 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7089 hci_dev_unlock(hdev);
7094 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7096 if (key->initiator != 0x00 && key->initiator != 0x01)
7099 switch (key->addr.type) {
7100 case BDADDR_LE_PUBLIC:
7103 case BDADDR_LE_RANDOM:
7104 /* Two most significant bits shall be set */
7105 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7113 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7114 void *cp_data, u16 len)
7116 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7117 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7118 sizeof(struct mgmt_ltk_info));
7119 u16 key_count, expected_len;
7122 bt_dev_dbg(hdev, "sock %p", sk);
7124 if (!lmp_le_capable(hdev))
7125 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7126 MGMT_STATUS_NOT_SUPPORTED);
7128 key_count = __le16_to_cpu(cp->key_count);
7129 if (key_count > max_key_count) {
7130 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7132 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7133 MGMT_STATUS_INVALID_PARAMS);
7136 expected_len = struct_size(cp, keys, key_count);
7137 if (expected_len != len) {
7138 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7140 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7141 MGMT_STATUS_INVALID_PARAMS);
7144 bt_dev_dbg(hdev, "key_count %u", key_count);
7146 for (i = 0; i < key_count; i++) {
7147 struct mgmt_ltk_info *key = &cp->keys[i];
7149 if (!ltk_is_valid(key))
7150 return mgmt_cmd_status(sk, hdev->id,
7151 MGMT_OP_LOAD_LONG_TERM_KEYS,
7152 MGMT_STATUS_INVALID_PARAMS);
7157 hci_smp_ltks_clear(hdev);
7159 for (i = 0; i < key_count; i++) {
7160 struct mgmt_ltk_info *key = &cp->keys[i];
7161 u8 type, authenticated;
7162 u8 addr_type = le_addr_type(key->addr.type);
7164 if (hci_is_blocked_key(hdev,
7165 HCI_BLOCKED_KEY_TYPE_LTK,
7167 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7172 switch (key->type) {
7173 case MGMT_LTK_UNAUTHENTICATED:
7174 authenticated = 0x00;
7175 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7177 case MGMT_LTK_AUTHENTICATED:
7178 authenticated = 0x01;
7179 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7181 case MGMT_LTK_P256_UNAUTH:
7182 authenticated = 0x00;
7183 type = SMP_LTK_P256;
7185 case MGMT_LTK_P256_AUTH:
7186 authenticated = 0x01;
7187 type = SMP_LTK_P256;
7189 case MGMT_LTK_P256_DEBUG:
7190 authenticated = 0x00;
7191 type = SMP_LTK_P256_DEBUG;
7197 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7198 if (key->addr.type == BDADDR_BREDR)
7199 addr_type = BDADDR_BREDR;
7201 hci_add_ltk(hdev, &key->addr.bdaddr,
7202 addr_type, type, authenticated,
7203 key->val, key->enc_size, key->ediv, key->rand);
7206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7209 hci_dev_unlock(hdev);
7214 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7216 struct mgmt_pending_cmd *cmd = data;
7217 struct hci_conn *conn = cmd->user_data;
7218 struct mgmt_cp_get_conn_info *cp = cmd->param;
7219 struct mgmt_rp_get_conn_info rp;
7222 bt_dev_dbg(hdev, "err %d", err);
7224 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7226 status = mgmt_status(err);
7227 if (status == MGMT_STATUS_SUCCESS) {
7228 rp.rssi = conn->rssi;
7229 rp.tx_power = conn->tx_power;
7230 rp.max_tx_power = conn->max_tx_power;
7232 rp.rssi = HCI_RSSI_INVALID;
7233 rp.tx_power = HCI_TX_POWER_INVALID;
7234 rp.max_tx_power = HCI_TX_POWER_INVALID;
7237 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7240 mgmt_pending_free(cmd);
7243 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7245 struct mgmt_pending_cmd *cmd = data;
7246 struct mgmt_cp_get_conn_info *cp = cmd->param;
7247 struct hci_conn *conn;
7251 /* Make sure we are still connected */
7252 if (cp->addr.type == BDADDR_BREDR)
7253 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7256 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7258 if (!conn || conn->state != BT_CONNECTED)
7259 return MGMT_STATUS_NOT_CONNECTED;
7261 cmd->user_data = conn;
7262 handle = cpu_to_le16(conn->handle);
7264 /* Refresh RSSI each time */
7265 err = hci_read_rssi_sync(hdev, handle);
7267 /* For LE links TX power does not change thus we don't need to
7268 * query for it once value is known.
7270 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7271 conn->tx_power == HCI_TX_POWER_INVALID))
7272 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7274 /* Max TX power needs to be read only once per connection */
7275 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7276 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7281 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7284 struct mgmt_cp_get_conn_info *cp = data;
7285 struct mgmt_rp_get_conn_info rp;
7286 struct hci_conn *conn;
7287 unsigned long conn_info_age;
7290 bt_dev_dbg(hdev, "sock %p", sk);
7292 memset(&rp, 0, sizeof(rp));
7293 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7294 rp.addr.type = cp->addr.type;
7296 if (!bdaddr_type_is_valid(cp->addr.type))
7297 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7298 MGMT_STATUS_INVALID_PARAMS,
7303 if (!hdev_is_powered(hdev)) {
7304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7305 MGMT_STATUS_NOT_POWERED, &rp,
7310 if (cp->addr.type == BDADDR_BREDR)
7311 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7314 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7316 if (!conn || conn->state != BT_CONNECTED) {
7317 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7318 MGMT_STATUS_NOT_CONNECTED, &rp,
7323 /* To avoid client trying to guess when to poll again for information we
7324 * calculate conn info age as random value between min/max set in hdev.
7326 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7327 hdev->conn_info_max_age - 1);
7329 /* Query controller to refresh cached values if they are too old or were
7332 if (time_after(jiffies, conn->conn_info_timestamp +
7333 msecs_to_jiffies(conn_info_age)) ||
7334 !conn->conn_info_timestamp) {
7335 struct mgmt_pending_cmd *cmd;
7337 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7342 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7343 cmd, get_conn_info_complete);
7347 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7348 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7351 mgmt_pending_free(cmd);
7356 conn->conn_info_timestamp = jiffies;
7358 /* Cache is valid, just reply with values cached in hci_conn */
7359 rp.rssi = conn->rssi;
7360 rp.tx_power = conn->tx_power;
7361 rp.max_tx_power = conn->max_tx_power;
7363 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7364 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7368 hci_dev_unlock(hdev);
7372 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7374 struct mgmt_pending_cmd *cmd = data;
7375 struct mgmt_cp_get_clock_info *cp = cmd->param;
7376 struct mgmt_rp_get_clock_info rp;
7377 struct hci_conn *conn = cmd->user_data;
7378 u8 status = mgmt_status(err);
7380 bt_dev_dbg(hdev, "err %d", err);
7382 memset(&rp, 0, sizeof(rp));
7383 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7384 rp.addr.type = cp->addr.type;
7389 rp.local_clock = cpu_to_le32(hdev->clock);
7392 rp.piconet_clock = cpu_to_le32(conn->clock);
7393 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7397 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7400 mgmt_pending_free(cmd);
7403 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7405 struct mgmt_pending_cmd *cmd = data;
7406 struct mgmt_cp_get_clock_info *cp = cmd->param;
7407 struct hci_cp_read_clock hci_cp;
7408 struct hci_conn *conn;
7410 memset(&hci_cp, 0, sizeof(hci_cp));
7411 hci_read_clock_sync(hdev, &hci_cp);
7413 /* Make sure connection still exists */
7414 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7415 if (!conn || conn->state != BT_CONNECTED)
7416 return MGMT_STATUS_NOT_CONNECTED;
7418 cmd->user_data = conn;
7419 hci_cp.handle = cpu_to_le16(conn->handle);
7420 hci_cp.which = 0x01; /* Piconet clock */
7422 return hci_read_clock_sync(hdev, &hci_cp);
7425 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7428 struct mgmt_cp_get_clock_info *cp = data;
7429 struct mgmt_rp_get_clock_info rp;
7430 struct mgmt_pending_cmd *cmd;
7431 struct hci_conn *conn;
7434 bt_dev_dbg(hdev, "sock %p", sk);
7436 memset(&rp, 0, sizeof(rp));
7437 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7438 rp.addr.type = cp->addr.type;
7440 if (cp->addr.type != BDADDR_BREDR)
7441 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7442 MGMT_STATUS_INVALID_PARAMS,
7447 if (!hdev_is_powered(hdev)) {
7448 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7449 MGMT_STATUS_NOT_POWERED, &rp,
7454 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7455 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7457 if (!conn || conn->state != BT_CONNECTED) {
7458 err = mgmt_cmd_complete(sk, hdev->id,
7459 MGMT_OP_GET_CLOCK_INFO,
7460 MGMT_STATUS_NOT_CONNECTED,
7468 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7472 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7473 get_clock_info_complete);
7476 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7477 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7480 mgmt_pending_free(cmd);
7485 hci_dev_unlock(hdev);
7489 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7491 struct hci_conn *conn;
7493 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7497 if (conn->dst_type != type)
7500 if (conn->state != BT_CONNECTED)
7506 /* This function requires the caller holds hdev->lock */
7507 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7508 u8 addr_type, u8 auto_connect)
7510 struct hci_conn_params *params;
7512 params = hci_conn_params_add(hdev, addr, addr_type);
7516 if (params->auto_connect == auto_connect)
7519 hci_pend_le_list_del_init(params);
7521 switch (auto_connect) {
7522 case HCI_AUTO_CONN_DISABLED:
7523 case HCI_AUTO_CONN_LINK_LOSS:
7524 /* If auto connect is being disabled when we're trying to
7525 * connect to device, keep connecting.
7527 if (params->explicit_connect)
7528 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7530 case HCI_AUTO_CONN_REPORT:
7531 if (params->explicit_connect)
7532 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7534 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7536 case HCI_AUTO_CONN_DIRECT:
7537 case HCI_AUTO_CONN_ALWAYS:
7538 if (!is_connected(hdev, addr, addr_type))
7539 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7543 params->auto_connect = auto_connect;
7545 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7546 addr, addr_type, auto_connect);
7551 static void device_added(struct sock *sk, struct hci_dev *hdev,
7552 bdaddr_t *bdaddr, u8 type, u8 action)
7554 struct mgmt_ev_device_added ev;
7556 bacpy(&ev.addr.bdaddr, bdaddr);
7557 ev.addr.type = type;
7560 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7563 static int add_device_sync(struct hci_dev *hdev, void *data)
7565 return hci_update_passive_scan_sync(hdev);
7568 static int add_device(struct sock *sk, struct hci_dev *hdev,
7569 void *data, u16 len)
7571 struct mgmt_cp_add_device *cp = data;
7572 u8 auto_conn, addr_type;
7573 struct hci_conn_params *params;
7575 u32 current_flags = 0;
7576 u32 supported_flags;
7578 bt_dev_dbg(hdev, "sock %p", sk);
7580 if (!bdaddr_type_is_valid(cp->addr.type) ||
7581 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7582 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7583 MGMT_STATUS_INVALID_PARAMS,
7584 &cp->addr, sizeof(cp->addr));
7586 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7587 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7588 MGMT_STATUS_INVALID_PARAMS,
7589 &cp->addr, sizeof(cp->addr));
7593 if (cp->addr.type == BDADDR_BREDR) {
7594 /* Only incoming connections action is supported for now */
7595 if (cp->action != 0x01) {
7596 err = mgmt_cmd_complete(sk, hdev->id,
7598 MGMT_STATUS_INVALID_PARAMS,
7599 &cp->addr, sizeof(cp->addr));
7603 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7609 hci_update_scan(hdev);
7614 addr_type = le_addr_type(cp->addr.type);
7616 if (cp->action == 0x02)
7617 auto_conn = HCI_AUTO_CONN_ALWAYS;
7618 else if (cp->action == 0x01)
7619 auto_conn = HCI_AUTO_CONN_DIRECT;
7621 auto_conn = HCI_AUTO_CONN_REPORT;
7623 /* Kernel internally uses conn_params with resolvable private
7624 * address, but Add Device allows only identity addresses.
7625 * Make sure it is enforced before calling
7626 * hci_conn_params_lookup.
7628 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7629 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7630 MGMT_STATUS_INVALID_PARAMS,
7631 &cp->addr, sizeof(cp->addr));
7635 /* If the connection parameters don't exist for this device,
7636 * they will be created and configured with defaults.
7638 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7640 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7641 MGMT_STATUS_FAILED, &cp->addr,
7645 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7648 current_flags = params->flags;
7651 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7656 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7657 supported_flags = hdev->conn_flags;
7658 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7659 supported_flags, current_flags);
7661 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7662 MGMT_STATUS_SUCCESS, &cp->addr,
7666 hci_dev_unlock(hdev);
7670 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7671 bdaddr_t *bdaddr, u8 type)
7673 struct mgmt_ev_device_removed ev;
7675 bacpy(&ev.addr.bdaddr, bdaddr);
7676 ev.addr.type = type;
7678 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7681 static int remove_device_sync(struct hci_dev *hdev, void *data)
7683 return hci_update_passive_scan_sync(hdev);
7686 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7687 void *data, u16 len)
7689 struct mgmt_cp_remove_device *cp = data;
7692 bt_dev_dbg(hdev, "sock %p", sk);
7696 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7697 struct hci_conn_params *params;
7700 if (!bdaddr_type_is_valid(cp->addr.type)) {
7701 err = mgmt_cmd_complete(sk, hdev->id,
7702 MGMT_OP_REMOVE_DEVICE,
7703 MGMT_STATUS_INVALID_PARAMS,
7704 &cp->addr, sizeof(cp->addr));
7708 if (cp->addr.type == BDADDR_BREDR) {
7709 err = hci_bdaddr_list_del(&hdev->accept_list,
7713 err = mgmt_cmd_complete(sk, hdev->id,
7714 MGMT_OP_REMOVE_DEVICE,
7715 MGMT_STATUS_INVALID_PARAMS,
7721 hci_update_scan(hdev);
7723 device_removed(sk, hdev, &cp->addr.bdaddr,
7728 addr_type = le_addr_type(cp->addr.type);
7730 /* Kernel internally uses conn_params with resolvable private
7731 * address, but Remove Device allows only identity addresses.
7732 * Make sure it is enforced before calling
7733 * hci_conn_params_lookup.
7735 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7736 err = mgmt_cmd_complete(sk, hdev->id,
7737 MGMT_OP_REMOVE_DEVICE,
7738 MGMT_STATUS_INVALID_PARAMS,
7739 &cp->addr, sizeof(cp->addr));
7743 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7746 err = mgmt_cmd_complete(sk, hdev->id,
7747 MGMT_OP_REMOVE_DEVICE,
7748 MGMT_STATUS_INVALID_PARAMS,
7749 &cp->addr, sizeof(cp->addr));
7753 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7754 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7755 err = mgmt_cmd_complete(sk, hdev->id,
7756 MGMT_OP_REMOVE_DEVICE,
7757 MGMT_STATUS_INVALID_PARAMS,
7758 &cp->addr, sizeof(cp->addr));
7762 hci_conn_params_free(params);
7764 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7766 struct hci_conn_params *p, *tmp;
7767 struct bdaddr_list *b, *btmp;
7769 if (cp->addr.type) {
7770 err = mgmt_cmd_complete(sk, hdev->id,
7771 MGMT_OP_REMOVE_DEVICE,
7772 MGMT_STATUS_INVALID_PARAMS,
7773 &cp->addr, sizeof(cp->addr));
7777 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7778 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7783 hci_update_scan(hdev);
7785 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7786 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7788 device_removed(sk, hdev, &p->addr, p->addr_type);
7789 if (p->explicit_connect) {
7790 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7793 hci_conn_params_free(p);
7796 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7799 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7802 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7803 MGMT_STATUS_SUCCESS, &cp->addr,
7806 hci_dev_unlock(hdev);
7810 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7813 struct mgmt_cp_load_conn_param *cp = data;
7814 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7815 sizeof(struct mgmt_conn_param));
7816 u16 param_count, expected_len;
7819 if (!lmp_le_capable(hdev))
7820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7821 MGMT_STATUS_NOT_SUPPORTED);
7823 param_count = __le16_to_cpu(cp->param_count);
7824 if (param_count > max_param_count) {
7825 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7828 MGMT_STATUS_INVALID_PARAMS);
7831 expected_len = struct_size(cp, params, param_count);
7832 if (expected_len != len) {
7833 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7836 MGMT_STATUS_INVALID_PARAMS);
7839 bt_dev_dbg(hdev, "param_count %u", param_count);
7843 hci_conn_params_clear_disabled(hdev);
7845 for (i = 0; i < param_count; i++) {
7846 struct mgmt_conn_param *param = &cp->params[i];
7847 struct hci_conn_params *hci_param;
7848 u16 min, max, latency, timeout;
7851 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7854 if (param->addr.type == BDADDR_LE_PUBLIC) {
7855 addr_type = ADDR_LE_DEV_PUBLIC;
7856 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7857 addr_type = ADDR_LE_DEV_RANDOM;
7859 bt_dev_err(hdev, "ignoring invalid connection parameters");
7863 min = le16_to_cpu(param->min_interval);
7864 max = le16_to_cpu(param->max_interval);
7865 latency = le16_to_cpu(param->latency);
7866 timeout = le16_to_cpu(param->timeout);
7868 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7869 min, max, latency, timeout);
7871 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7872 bt_dev_err(hdev, "ignoring invalid connection parameters");
7876 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7879 bt_dev_err(hdev, "failed to add connection parameters");
7883 hci_param->conn_min_interval = min;
7884 hci_param->conn_max_interval = max;
7885 hci_param->conn_latency = latency;
7886 hci_param->supervision_timeout = timeout;
7889 hci_dev_unlock(hdev);
7891 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7895 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7896 void *data, u16 len)
7898 struct mgmt_cp_set_external_config *cp = data;
7902 bt_dev_dbg(hdev, "sock %p", sk);
7904 if (hdev_is_powered(hdev))
7905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7906 MGMT_STATUS_REJECTED);
7908 if (cp->config != 0x00 && cp->config != 0x01)
7909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7910 MGMT_STATUS_INVALID_PARAMS);
7912 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7914 MGMT_STATUS_NOT_SUPPORTED);
7919 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7921 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7923 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7930 err = new_options(hdev, sk);
7932 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7933 mgmt_index_removed(hdev);
7935 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7936 hci_dev_set_flag(hdev, HCI_CONFIG);
7937 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7939 queue_work(hdev->req_workqueue, &hdev->power_on);
7941 set_bit(HCI_RAW, &hdev->flags);
7942 mgmt_index_added(hdev);
7947 hci_dev_unlock(hdev);
7951 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7952 void *data, u16 len)
7954 struct mgmt_cp_set_public_address *cp = data;
7958 bt_dev_dbg(hdev, "sock %p", sk);
7960 if (hdev_is_powered(hdev))
7961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7962 MGMT_STATUS_REJECTED);
7964 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7966 MGMT_STATUS_INVALID_PARAMS);
7968 if (!hdev->set_bdaddr)
7969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7970 MGMT_STATUS_NOT_SUPPORTED);
7974 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7975 bacpy(&hdev->public_addr, &cp->bdaddr);
7977 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7984 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7985 err = new_options(hdev, sk);
7987 if (is_configured(hdev)) {
7988 mgmt_index_removed(hdev);
7990 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7992 hci_dev_set_flag(hdev, HCI_CONFIG);
7993 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7995 queue_work(hdev->req_workqueue, &hdev->power_on);
7999 hci_dev_unlock(hdev);
8003 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8006 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8007 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8008 u8 *h192, *r192, *h256, *r256;
8009 struct mgmt_pending_cmd *cmd = data;
8010 struct sk_buff *skb = cmd->skb;
8011 u8 status = mgmt_status(err);
8014 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8019 status = MGMT_STATUS_FAILED;
8020 else if (IS_ERR(skb))
8021 status = mgmt_status(PTR_ERR(skb));
8023 status = mgmt_status(skb->data[0]);
8026 bt_dev_dbg(hdev, "status %u", status);
8028 mgmt_cp = cmd->param;
8031 status = mgmt_status(status);
8038 } else if (!bredr_sc_enabled(hdev)) {
8039 struct hci_rp_read_local_oob_data *rp;
8041 if (skb->len != sizeof(*rp)) {
8042 status = MGMT_STATUS_FAILED;
8045 status = MGMT_STATUS_SUCCESS;
8046 rp = (void *)skb->data;
8048 eir_len = 5 + 18 + 18;
8055 struct hci_rp_read_local_oob_ext_data *rp;
8057 if (skb->len != sizeof(*rp)) {
8058 status = MGMT_STATUS_FAILED;
8061 status = MGMT_STATUS_SUCCESS;
8062 rp = (void *)skb->data;
8064 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8065 eir_len = 5 + 18 + 18;
8069 eir_len = 5 + 18 + 18 + 18 + 18;
8079 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8086 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8087 hdev->dev_class, 3);
8090 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8091 EIR_SSP_HASH_C192, h192, 16);
8092 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8093 EIR_SSP_RAND_R192, r192, 16);
8097 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8098 EIR_SSP_HASH_C256, h256, 16);
8099 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8100 EIR_SSP_RAND_R256, r256, 16);
8104 mgmt_rp->type = mgmt_cp->type;
8105 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8107 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8108 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8109 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8110 if (err < 0 || status)
8113 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8115 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8116 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8117 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8119 if (skb && !IS_ERR(skb))
8123 mgmt_pending_remove(cmd);
8126 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8127 struct mgmt_cp_read_local_oob_ext_data *cp)
8129 struct mgmt_pending_cmd *cmd;
8132 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8137 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8138 read_local_oob_ext_data_complete);
8141 mgmt_pending_remove(cmd);
8148 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8149 void *data, u16 data_len)
8151 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8152 struct mgmt_rp_read_local_oob_ext_data *rp;
8155 u8 status, flags, role, addr[7], hash[16], rand[16];
8158 bt_dev_dbg(hdev, "sock %p", sk);
8160 if (hdev_is_powered(hdev)) {
8162 case BIT(BDADDR_BREDR):
8163 status = mgmt_bredr_support(hdev);
8169 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8170 status = mgmt_le_support(hdev);
8174 eir_len = 9 + 3 + 18 + 18 + 3;
8177 status = MGMT_STATUS_INVALID_PARAMS;
8182 status = MGMT_STATUS_NOT_POWERED;
8186 rp_len = sizeof(*rp) + eir_len;
8187 rp = kmalloc(rp_len, GFP_ATOMIC);
8191 if (!status && !lmp_ssp_capable(hdev)) {
8192 status = MGMT_STATUS_NOT_SUPPORTED;
8203 case BIT(BDADDR_BREDR):
8204 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8205 err = read_local_ssp_oob_req(hdev, sk, cp);
8206 hci_dev_unlock(hdev);
8210 status = MGMT_STATUS_FAILED;
8213 eir_len = eir_append_data(rp->eir, eir_len,
8215 hdev->dev_class, 3);
8218 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8219 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8220 smp_generate_oob(hdev, hash, rand) < 0) {
8221 hci_dev_unlock(hdev);
8222 status = MGMT_STATUS_FAILED;
8226 /* This should return the active RPA, but since the RPA
8227 * is only programmed on demand, it is really hard to fill
8228 * this in at the moment. For now disallow retrieving
8229 * local out-of-band data when privacy is in use.
8231 * Returning the identity address will not help here since
8232 * pairing happens before the identity resolving key is
8233 * known and thus the connection establishment happens
8234 * based on the RPA and not the identity address.
8236 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8237 hci_dev_unlock(hdev);
8238 status = MGMT_STATUS_REJECTED;
8242 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8243 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8244 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8245 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8246 memcpy(addr, &hdev->static_addr, 6);
8249 memcpy(addr, &hdev->bdaddr, 6);
8253 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8254 addr, sizeof(addr));
8256 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8261 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8262 &role, sizeof(role));
8264 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8265 eir_len = eir_append_data(rp->eir, eir_len,
8267 hash, sizeof(hash));
8269 eir_len = eir_append_data(rp->eir, eir_len,
8271 rand, sizeof(rand));
8274 flags = mgmt_get_adv_discov_flags(hdev);
8276 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8277 flags |= LE_AD_NO_BREDR;
8279 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8280 &flags, sizeof(flags));
8284 hci_dev_unlock(hdev);
8286 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8288 status = MGMT_STATUS_SUCCESS;
8291 rp->type = cp->type;
8292 rp->eir_len = cpu_to_le16(eir_len);
8294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8295 status, rp, sizeof(*rp) + eir_len);
8296 if (err < 0 || status)
8299 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8300 rp, sizeof(*rp) + eir_len,
8301 HCI_MGMT_OOB_DATA_EVENTS, sk);
8309 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8313 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8314 flags |= MGMT_ADV_FLAG_DISCOV;
8315 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8316 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8317 flags |= MGMT_ADV_FLAG_APPEARANCE;
8318 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8319 flags |= MGMT_ADV_PARAM_DURATION;
8320 flags |= MGMT_ADV_PARAM_TIMEOUT;
8321 flags |= MGMT_ADV_PARAM_INTERVALS;
8322 flags |= MGMT_ADV_PARAM_TX_POWER;
8323 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8325 /* In extended adv TX_POWER returned from Set Adv Param
8326 * will be always valid.
8328 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8329 flags |= MGMT_ADV_FLAG_TX_POWER;
8331 if (ext_adv_capable(hdev)) {
8332 flags |= MGMT_ADV_FLAG_SEC_1M;
8333 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8334 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8336 if (le_2m_capable(hdev))
8337 flags |= MGMT_ADV_FLAG_SEC_2M;
8339 if (le_coded_capable(hdev))
8340 flags |= MGMT_ADV_FLAG_SEC_CODED;
8346 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8347 void *data, u16 data_len)
8349 struct mgmt_rp_read_adv_features *rp;
8352 struct adv_info *adv_instance;
8353 u32 supported_flags;
8356 bt_dev_dbg(hdev, "sock %p", sk);
8358 if (!lmp_le_capable(hdev))
8359 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8360 MGMT_STATUS_REJECTED);
8364 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8365 rp = kmalloc(rp_len, GFP_ATOMIC);
8367 hci_dev_unlock(hdev);
8371 supported_flags = get_supported_adv_flags(hdev);
8373 rp->supported_flags = cpu_to_le32(supported_flags);
8374 rp->max_adv_data_len = max_adv_len(hdev);
8375 rp->max_scan_rsp_len = max_adv_len(hdev);
8376 rp->max_instances = hdev->le_num_of_adv_sets;
8377 rp->num_instances = hdev->adv_instance_cnt;
8379 instance = rp->instance;
8380 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8381 /* Only instances 1-le_num_of_adv_sets are externally visible */
8382 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8383 *instance = adv_instance->instance;
8386 rp->num_instances--;
8391 hci_dev_unlock(hdev);
8393 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8394 MGMT_STATUS_SUCCESS, rp, rp_len);
8401 static u8 calculate_name_len(struct hci_dev *hdev)
8403 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8405 return eir_append_local_name(hdev, buf, 0);
8408 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8411 u8 max_len = max_adv_len(hdev);
8414 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8415 MGMT_ADV_FLAG_LIMITED_DISCOV |
8416 MGMT_ADV_FLAG_MANAGED_FLAGS))
8419 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8422 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8423 max_len -= calculate_name_len(hdev);
8425 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8432 static bool flags_managed(u32 adv_flags)
8434 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8435 MGMT_ADV_FLAG_LIMITED_DISCOV |
8436 MGMT_ADV_FLAG_MANAGED_FLAGS);
8439 static bool tx_power_managed(u32 adv_flags)
8441 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8444 static bool name_managed(u32 adv_flags)
8446 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8449 static bool appearance_managed(u32 adv_flags)
8451 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8454 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8455 u8 len, bool is_adv_data)
8460 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8465 /* Make sure that the data is correctly formatted. */
8466 for (i = 0; i < len; i += (cur_len + 1)) {
8472 if (data[i + 1] == EIR_FLAGS &&
8473 (!is_adv_data || flags_managed(adv_flags)))
8476 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8479 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8482 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8485 if (data[i + 1] == EIR_APPEARANCE &&
8486 appearance_managed(adv_flags))
8489 /* If the current field length would exceed the total data
8490 * length, then it's invalid.
8492 if (i + cur_len >= len)
8499 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8501 u32 supported_flags, phy_flags;
8503 /* The current implementation only supports a subset of the specified
8504 * flags. Also need to check mutual exclusiveness of sec flags.
8506 supported_flags = get_supported_adv_flags(hdev);
8507 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8508 if (adv_flags & ~supported_flags ||
8509 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8515 static bool adv_busy(struct hci_dev *hdev)
8517 return pending_find(MGMT_OP_SET_LE, hdev);
8520 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8523 struct adv_info *adv, *n;
8525 bt_dev_dbg(hdev, "err %d", err);
8529 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8536 adv->pending = false;
8540 instance = adv->instance;
8542 if (hdev->cur_adv_instance == instance)
8543 cancel_adv_timeout(hdev);
8545 hci_remove_adv_instance(hdev, instance);
8546 mgmt_advertising_removed(sk, hdev, instance);
8549 hci_dev_unlock(hdev);
8552 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8554 struct mgmt_pending_cmd *cmd = data;
8555 struct mgmt_cp_add_advertising *cp = cmd->param;
8556 struct mgmt_rp_add_advertising rp;
8558 memset(&rp, 0, sizeof(rp));
8560 rp.instance = cp->instance;
8563 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8566 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8567 mgmt_status(err), &rp, sizeof(rp));
8569 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8571 mgmt_pending_free(cmd);
8574 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8576 struct mgmt_pending_cmd *cmd = data;
8577 struct mgmt_cp_add_advertising *cp = cmd->param;
8579 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8582 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8583 void *data, u16 data_len)
8585 struct mgmt_cp_add_advertising *cp = data;
8586 struct mgmt_rp_add_advertising rp;
8589 u16 timeout, duration;
8590 unsigned int prev_instance_cnt;
8591 u8 schedule_instance = 0;
8592 struct adv_info *adv, *next_instance;
8594 struct mgmt_pending_cmd *cmd;
8596 bt_dev_dbg(hdev, "sock %p", sk);
8598 status = mgmt_le_support(hdev);
8600 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8603 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8605 MGMT_STATUS_INVALID_PARAMS);
8607 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8609 MGMT_STATUS_INVALID_PARAMS);
8611 flags = __le32_to_cpu(cp->flags);
8612 timeout = __le16_to_cpu(cp->timeout);
8613 duration = __le16_to_cpu(cp->duration);
8615 if (!requested_adv_flags_are_valid(hdev, flags))
8616 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8617 MGMT_STATUS_INVALID_PARAMS);
8621 if (timeout && !hdev_is_powered(hdev)) {
8622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8623 MGMT_STATUS_REJECTED);
8627 if (adv_busy(hdev)) {
8628 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8633 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8634 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8635 cp->scan_rsp_len, false)) {
8636 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8637 MGMT_STATUS_INVALID_PARAMS);
8641 prev_instance_cnt = hdev->adv_instance_cnt;
8643 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8644 cp->adv_data_len, cp->data,
8646 cp->data + cp->adv_data_len,
8648 HCI_ADV_TX_POWER_NO_PREFERENCE,
8649 hdev->le_adv_min_interval,
8650 hdev->le_adv_max_interval, 0);
8652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8653 MGMT_STATUS_FAILED);
8657 /* Only trigger an advertising added event if a new instance was
8660 if (hdev->adv_instance_cnt > prev_instance_cnt)
8661 mgmt_advertising_added(sk, hdev, cp->instance);
8663 if (hdev->cur_adv_instance == cp->instance) {
8664 /* If the currently advertised instance is being changed then
8665 * cancel the current advertising and schedule the next
8666 * instance. If there is only one instance then the overridden
8667 * advertising data will be visible right away.
8669 cancel_adv_timeout(hdev);
8671 next_instance = hci_get_next_instance(hdev, cp->instance);
8673 schedule_instance = next_instance->instance;
8674 } else if (!hdev->adv_instance_timeout) {
8675 /* Immediately advertise the new instance if no other
8676 * instance is currently being advertised.
8678 schedule_instance = cp->instance;
8681 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8682 * there is no instance to be advertised then we have no HCI
8683 * communication to make. Simply return.
8685 if (!hdev_is_powered(hdev) ||
8686 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8687 !schedule_instance) {
8688 rp.instance = cp->instance;
8689 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8690 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8694 /* We're good to go, update advertising data, parameters, and start
8697 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8704 cp->instance = schedule_instance;
8706 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8707 add_advertising_complete);
8709 mgmt_pending_free(cmd);
8712 hci_dev_unlock(hdev);
8717 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8720 struct mgmt_pending_cmd *cmd = data;
8721 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8722 struct mgmt_rp_add_ext_adv_params rp;
8723 struct adv_info *adv;
8726 BT_DBG("%s", hdev->name);
8730 adv = hci_find_adv_instance(hdev, cp->instance);
8734 rp.instance = cp->instance;
8735 rp.tx_power = adv->tx_power;
8737 /* While we're at it, inform userspace of the available space for this
8738 * advertisement, given the flags that will be used.
8740 flags = __le32_to_cpu(cp->flags);
8741 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8742 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8745 /* If this advertisement was previously advertising and we
8746 * failed to update it, we signal that it has been removed and
8747 * delete its structure
8750 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8752 hci_remove_adv_instance(hdev, cp->instance);
8754 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8757 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8758 mgmt_status(err), &rp, sizeof(rp));
8763 mgmt_pending_free(cmd);
8765 hci_dev_unlock(hdev);
8768 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8770 struct mgmt_pending_cmd *cmd = data;
8771 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8773 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8776 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8777 void *data, u16 data_len)
8779 struct mgmt_cp_add_ext_adv_params *cp = data;
8780 struct mgmt_rp_add_ext_adv_params rp;
8781 struct mgmt_pending_cmd *cmd = NULL;
8782 struct adv_info *adv;
8783 u32 flags, min_interval, max_interval;
8784 u16 timeout, duration;
8789 BT_DBG("%s", hdev->name);
8791 status = mgmt_le_support(hdev);
8793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8796 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8797 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8798 MGMT_STATUS_INVALID_PARAMS);
8800 /* The purpose of breaking add_advertising into two separate MGMT calls
8801 * for params and data is to allow more parameters to be added to this
8802 * structure in the future. For this reason, we verify that we have the
8803 * bare minimum structure we know of when the interface was defined. Any
8804 * extra parameters we don't know about will be ignored in this request.
8806 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8808 MGMT_STATUS_INVALID_PARAMS);
8810 flags = __le32_to_cpu(cp->flags);
8812 if (!requested_adv_flags_are_valid(hdev, flags))
8813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8814 MGMT_STATUS_INVALID_PARAMS);
8818 /* In new interface, we require that we are powered to register */
8819 if (!hdev_is_powered(hdev)) {
8820 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8821 MGMT_STATUS_REJECTED);
8825 if (adv_busy(hdev)) {
8826 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8831 /* Parse defined parameters from request, use defaults otherwise */
8832 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8833 __le16_to_cpu(cp->timeout) : 0;
8835 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8836 __le16_to_cpu(cp->duration) :
8837 hdev->def_multi_adv_rotation_duration;
8839 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8840 __le32_to_cpu(cp->min_interval) :
8841 hdev->le_adv_min_interval;
8843 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8844 __le32_to_cpu(cp->max_interval) :
8845 hdev->le_adv_max_interval;
8847 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8849 HCI_ADV_TX_POWER_NO_PREFERENCE;
8851 /* Create advertising instance with no advertising or response data */
8852 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8853 timeout, duration, tx_power, min_interval,
8857 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8858 MGMT_STATUS_FAILED);
8862 /* Submit request for advertising params if ext adv available */
8863 if (ext_adv_capable(hdev)) {
8864 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8868 hci_remove_adv_instance(hdev, cp->instance);
8872 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8873 add_ext_adv_params_complete);
8875 mgmt_pending_free(cmd);
8877 rp.instance = cp->instance;
8878 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8879 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8880 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8881 err = mgmt_cmd_complete(sk, hdev->id,
8882 MGMT_OP_ADD_EXT_ADV_PARAMS,
8883 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8887 hci_dev_unlock(hdev);
8892 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8894 struct mgmt_pending_cmd *cmd = data;
8895 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8896 struct mgmt_rp_add_advertising rp;
8898 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8900 memset(&rp, 0, sizeof(rp));
8902 rp.instance = cp->instance;
8905 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8908 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8909 mgmt_status(err), &rp, sizeof(rp));
8911 mgmt_pending_free(cmd);
8914 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8916 struct mgmt_pending_cmd *cmd = data;
8917 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8920 if (ext_adv_capable(hdev)) {
8921 err = hci_update_adv_data_sync(hdev, cp->instance);
8925 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8929 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8932 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8935 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8938 struct mgmt_cp_add_ext_adv_data *cp = data;
8939 struct mgmt_rp_add_ext_adv_data rp;
8940 u8 schedule_instance = 0;
8941 struct adv_info *next_instance;
8942 struct adv_info *adv_instance;
8944 struct mgmt_pending_cmd *cmd;
8946 BT_DBG("%s", hdev->name);
8950 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8952 if (!adv_instance) {
8953 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8954 MGMT_STATUS_INVALID_PARAMS);
8958 /* In new interface, we require that we are powered to register */
8959 if (!hdev_is_powered(hdev)) {
8960 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8961 MGMT_STATUS_REJECTED);
8962 goto clear_new_instance;
8965 if (adv_busy(hdev)) {
8966 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8968 goto clear_new_instance;
8971 /* Validate new data */
8972 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8973 cp->adv_data_len, true) ||
8974 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8975 cp->adv_data_len, cp->scan_rsp_len, false)) {
8976 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8977 MGMT_STATUS_INVALID_PARAMS);
8978 goto clear_new_instance;
8981 /* Set the data in the advertising instance */
8982 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8983 cp->data, cp->scan_rsp_len,
8984 cp->data + cp->adv_data_len);
8986 /* If using software rotation, determine next instance to use */
8987 if (hdev->cur_adv_instance == cp->instance) {
8988 /* If the currently advertised instance is being changed
8989 * then cancel the current advertising and schedule the
8990 * next instance. If there is only one instance then the
8991 * overridden advertising data will be visible right
8994 cancel_adv_timeout(hdev);
8996 next_instance = hci_get_next_instance(hdev, cp->instance);
8998 schedule_instance = next_instance->instance;
8999 } else if (!hdev->adv_instance_timeout) {
9000 /* Immediately advertise the new instance if no other
9001 * instance is currently being advertised.
9003 schedule_instance = cp->instance;
9006 /* If the HCI_ADVERTISING flag is set or there is no instance to
9007 * be advertised then we have no HCI communication to make.
9010 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9011 if (adv_instance->pending) {
9012 mgmt_advertising_added(sk, hdev, cp->instance);
9013 adv_instance->pending = false;
9015 rp.instance = cp->instance;
9016 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9017 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9021 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9025 goto clear_new_instance;
9028 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9029 add_ext_adv_data_complete);
9031 mgmt_pending_free(cmd);
9032 goto clear_new_instance;
9035 /* We were successful in updating data, so trigger advertising_added
9036 * event if this is an instance that wasn't previously advertising. If
9037 * a failure occurs in the requests we initiated, we will remove the
9038 * instance again in add_advertising_complete
9040 if (adv_instance->pending)
9041 mgmt_advertising_added(sk, hdev, cp->instance);
9046 hci_remove_adv_instance(hdev, cp->instance);
9049 hci_dev_unlock(hdev);
9054 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9057 struct mgmt_pending_cmd *cmd = data;
9058 struct mgmt_cp_remove_advertising *cp = cmd->param;
9059 struct mgmt_rp_remove_advertising rp;
9061 bt_dev_dbg(hdev, "err %d", err);
9063 memset(&rp, 0, sizeof(rp));
9064 rp.instance = cp->instance;
9067 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9070 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9071 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9073 mgmt_pending_free(cmd);
9076 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9078 struct mgmt_pending_cmd *cmd = data;
9079 struct mgmt_cp_remove_advertising *cp = cmd->param;
9082 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9086 if (list_empty(&hdev->adv_instances))
9087 err = hci_disable_advertising_sync(hdev);
9092 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9093 void *data, u16 data_len)
9095 struct mgmt_cp_remove_advertising *cp = data;
9096 struct mgmt_pending_cmd *cmd;
9099 bt_dev_dbg(hdev, "sock %p", sk);
9103 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9104 err = mgmt_cmd_status(sk, hdev->id,
9105 MGMT_OP_REMOVE_ADVERTISING,
9106 MGMT_STATUS_INVALID_PARAMS);
9110 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9116 if (list_empty(&hdev->adv_instances)) {
9117 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9118 MGMT_STATUS_INVALID_PARAMS);
9122 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9129 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9130 remove_advertising_complete);
9132 mgmt_pending_free(cmd);
9135 hci_dev_unlock(hdev);
9140 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9141 void *data, u16 data_len)
9143 struct mgmt_cp_get_adv_size_info *cp = data;
9144 struct mgmt_rp_get_adv_size_info rp;
9145 u32 flags, supported_flags;
9147 bt_dev_dbg(hdev, "sock %p", sk);
9149 if (!lmp_le_capable(hdev))
9150 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9151 MGMT_STATUS_REJECTED);
9153 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9154 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9155 MGMT_STATUS_INVALID_PARAMS);
9157 flags = __le32_to_cpu(cp->flags);
9159 /* The current implementation only supports a subset of the specified
9162 supported_flags = get_supported_adv_flags(hdev);
9163 if (flags & ~supported_flags)
9164 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9165 MGMT_STATUS_INVALID_PARAMS);
9167 rp.instance = cp->instance;
9168 rp.flags = cp->flags;
9169 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9170 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9172 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9173 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9176 static const struct hci_mgmt_handler mgmt_handlers[] = {
9177 { NULL }, /* 0x0000 (no command) */
9178 { read_version, MGMT_READ_VERSION_SIZE,
9180 HCI_MGMT_UNTRUSTED },
9181 { read_commands, MGMT_READ_COMMANDS_SIZE,
9183 HCI_MGMT_UNTRUSTED },
9184 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9186 HCI_MGMT_UNTRUSTED },
9187 { read_controller_info, MGMT_READ_INFO_SIZE,
9188 HCI_MGMT_UNTRUSTED },
9189 { set_powered, MGMT_SETTING_SIZE },
9190 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9191 { set_connectable, MGMT_SETTING_SIZE },
9192 { set_fast_connectable, MGMT_SETTING_SIZE },
9193 { set_bondable, MGMT_SETTING_SIZE },
9194 { set_link_security, MGMT_SETTING_SIZE },
9195 { set_ssp, MGMT_SETTING_SIZE },
9196 { set_hs, MGMT_SETTING_SIZE },
9197 { set_le, MGMT_SETTING_SIZE },
9198 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9199 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9200 { add_uuid, MGMT_ADD_UUID_SIZE },
9201 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9202 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9204 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9206 { disconnect, MGMT_DISCONNECT_SIZE },
9207 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9208 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9209 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9210 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9211 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9212 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9213 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9214 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9215 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9216 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9217 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9218 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9219 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9221 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9222 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9223 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9224 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9225 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9226 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9227 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9228 { set_advertising, MGMT_SETTING_SIZE },
9229 { set_bredr, MGMT_SETTING_SIZE },
9230 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9231 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9232 { set_secure_conn, MGMT_SETTING_SIZE },
9233 { set_debug_keys, MGMT_SETTING_SIZE },
9234 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9235 { load_irks, MGMT_LOAD_IRKS_SIZE,
9237 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9238 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9239 { add_device, MGMT_ADD_DEVICE_SIZE },
9240 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9241 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9243 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9245 HCI_MGMT_UNTRUSTED },
9246 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9247 HCI_MGMT_UNCONFIGURED |
9248 HCI_MGMT_UNTRUSTED },
9249 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9250 HCI_MGMT_UNCONFIGURED },
9251 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9252 HCI_MGMT_UNCONFIGURED },
9253 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9255 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9256 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9258 HCI_MGMT_UNTRUSTED },
9259 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9260 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9262 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9263 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9264 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9265 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9266 HCI_MGMT_UNTRUSTED },
9267 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9268 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9269 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9270 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9272 { set_wideband_speech, MGMT_SETTING_SIZE },
9273 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9274 HCI_MGMT_UNTRUSTED },
9275 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9276 HCI_MGMT_UNTRUSTED |
9277 HCI_MGMT_HDEV_OPTIONAL },
9278 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9280 HCI_MGMT_HDEV_OPTIONAL },
9281 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9282 HCI_MGMT_UNTRUSTED },
9283 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9285 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9286 HCI_MGMT_UNTRUSTED },
9287 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9289 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9290 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9291 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9292 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9294 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9295 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9297 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9299 { add_adv_patterns_monitor_rssi,
9300 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9302 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9304 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9305 { mesh_send, MGMT_MESH_SEND_SIZE,
9307 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9310 void mgmt_index_added(struct hci_dev *hdev)
9312 struct mgmt_ev_ext_index ev;
9314 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9317 switch (hdev->dev_type) {
9319 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9320 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9321 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9324 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9325 HCI_MGMT_INDEX_EVENTS);
9338 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9339 HCI_MGMT_EXT_INDEX_EVENTS);
9342 void mgmt_index_removed(struct hci_dev *hdev)
9344 struct mgmt_ev_ext_index ev;
9345 u8 status = MGMT_STATUS_INVALID_INDEX;
9347 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9350 switch (hdev->dev_type) {
9352 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9354 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9355 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9356 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9359 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9360 HCI_MGMT_INDEX_EVENTS);
9373 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9374 HCI_MGMT_EXT_INDEX_EVENTS);
9376 /* Cancel any remaining timed work */
9377 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9379 cancel_delayed_work_sync(&hdev->discov_off);
9380 cancel_delayed_work_sync(&hdev->service_cache);
9381 cancel_delayed_work_sync(&hdev->rpa_expired);
9384 void mgmt_power_on(struct hci_dev *hdev, int err)
9386 struct cmd_lookup match = { NULL, hdev };
9388 bt_dev_dbg(hdev, "err %d", err);
9393 restart_le_actions(hdev);
9394 hci_update_passive_scan(hdev);
9397 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9399 new_settings(hdev, match.sk);
9404 hci_dev_unlock(hdev);
9407 void __mgmt_power_off(struct hci_dev *hdev)
9409 struct cmd_lookup match = { NULL, hdev };
9410 u8 status, zero_cod[] = { 0, 0, 0 };
9412 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9414 /* If the power off is because of hdev unregistration let
9415 * use the appropriate INVALID_INDEX status. Otherwise use
9416 * NOT_POWERED. We cover both scenarios here since later in
9417 * mgmt_index_removed() any hci_conn callbacks will have already
9418 * been triggered, potentially causing misleading DISCONNECTED
9421 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9422 status = MGMT_STATUS_INVALID_INDEX;
9424 status = MGMT_STATUS_NOT_POWERED;
9426 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9428 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9429 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9430 zero_cod, sizeof(zero_cod),
9431 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9432 ext_info_changed(hdev, NULL);
9435 new_settings(hdev, match.sk);
9441 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9443 struct mgmt_pending_cmd *cmd;
9446 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9450 if (err == -ERFKILL)
9451 status = MGMT_STATUS_RFKILLED;
9453 status = MGMT_STATUS_FAILED;
9455 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9457 mgmt_pending_remove(cmd);
9460 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9463 struct mgmt_ev_new_link_key ev;
9465 memset(&ev, 0, sizeof(ev));
9467 ev.store_hint = persistent;
9468 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9469 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9470 ev.key.type = key->type;
9471 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9472 ev.key.pin_len = key->pin_len;
9474 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9477 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9479 switch (ltk->type) {
9481 case SMP_LTK_RESPONDER:
9482 if (ltk->authenticated)
9483 return MGMT_LTK_AUTHENTICATED;
9484 return MGMT_LTK_UNAUTHENTICATED;
9486 if (ltk->authenticated)
9487 return MGMT_LTK_P256_AUTH;
9488 return MGMT_LTK_P256_UNAUTH;
9489 case SMP_LTK_P256_DEBUG:
9490 return MGMT_LTK_P256_DEBUG;
9493 return MGMT_LTK_UNAUTHENTICATED;
9496 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9498 struct mgmt_ev_new_long_term_key ev;
9500 memset(&ev, 0, sizeof(ev));
9502 /* Devices using resolvable or non-resolvable random addresses
9503 * without providing an identity resolving key don't require
9504 * to store long term keys. Their addresses will change the
9507 * Only when a remote device provides an identity address
9508 * make sure the long term key is stored. If the remote
9509 * identity is known, the long term keys are internally
9510 * mapped to the identity address. So allow static random
9511 * and public addresses here.
9513 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9514 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9515 ev.store_hint = 0x00;
9517 ev.store_hint = persistent;
9519 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9520 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9521 ev.key.type = mgmt_ltk_type(key);
9522 ev.key.enc_size = key->enc_size;
9523 ev.key.ediv = key->ediv;
9524 ev.key.rand = key->rand;
9526 if (key->type == SMP_LTK)
9527 ev.key.initiator = 1;
9529 /* Make sure we copy only the significant bytes based on the
9530 * encryption key size, and set the rest of the value to zeroes.
9532 memcpy(ev.key.val, key->val, key->enc_size);
9533 memset(ev.key.val + key->enc_size, 0,
9534 sizeof(ev.key.val) - key->enc_size);
9536 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9539 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9541 struct mgmt_ev_new_irk ev;
9543 memset(&ev, 0, sizeof(ev));
9545 ev.store_hint = persistent;
9547 bacpy(&ev.rpa, &irk->rpa);
9548 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9549 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9550 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9552 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9555 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9558 struct mgmt_ev_new_csrk ev;
9560 memset(&ev, 0, sizeof(ev));
9562 /* Devices using resolvable or non-resolvable random addresses
9563 * without providing an identity resolving key don't require
9564 * to store signature resolving keys. Their addresses will change
9565 * the next time around.
9567 * Only when a remote device provides an identity address
9568 * make sure the signature resolving key is stored. So allow
9569 * static random and public addresses here.
9571 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9572 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9573 ev.store_hint = 0x00;
9575 ev.store_hint = persistent;
9577 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9578 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9579 ev.key.type = csrk->type;
9580 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9582 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9585 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9586 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9587 u16 max_interval, u16 latency, u16 timeout)
9589 struct mgmt_ev_new_conn_param ev;
9591 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9594 memset(&ev, 0, sizeof(ev));
9595 bacpy(&ev.addr.bdaddr, bdaddr);
9596 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9597 ev.store_hint = store_hint;
9598 ev.min_interval = cpu_to_le16(min_interval);
9599 ev.max_interval = cpu_to_le16(max_interval);
9600 ev.latency = cpu_to_le16(latency);
9601 ev.timeout = cpu_to_le16(timeout);
9603 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9606 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9607 u8 *name, u8 name_len)
9609 struct sk_buff *skb;
9610 struct mgmt_ev_device_connected *ev;
9614 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9617 /* allocate buff for LE or BR/EDR adv */
9618 if (conn->le_adv_data_len > 0)
9619 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9620 sizeof(*ev) + conn->le_adv_data_len);
9622 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9623 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9624 eir_precalc_len(sizeof(conn->dev_class)));
9626 ev = skb_put(skb, sizeof(*ev));
9627 bacpy(&ev->addr.bdaddr, &conn->dst);
9628 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9631 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9633 ev->flags = __cpu_to_le32(flags);
9635 /* We must ensure that the EIR Data fields are ordered and
9636 * unique. Keep it simple for now and avoid the problem by not
9637 * adding any BR/EDR data to the LE adv.
9639 if (conn->le_adv_data_len > 0) {
9640 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9641 eir_len = conn->le_adv_data_len;
9644 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9646 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9647 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9648 conn->dev_class, sizeof(conn->dev_class));
9651 ev->eir_len = cpu_to_le16(eir_len);
9653 mgmt_event_skb(skb, NULL);
9656 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9658 struct sock **sk = data;
9660 cmd->cmd_complete(cmd, 0);
9665 mgmt_pending_remove(cmd);
9668 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9670 struct hci_dev *hdev = data;
9671 struct mgmt_cp_unpair_device *cp = cmd->param;
9673 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9675 cmd->cmd_complete(cmd, 0);
9676 mgmt_pending_remove(cmd);
9679 bool mgmt_powering_down(struct hci_dev *hdev)
9681 struct mgmt_pending_cmd *cmd;
9682 struct mgmt_mode *cp;
9684 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9695 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9696 u8 link_type, u8 addr_type, u8 reason,
9697 bool mgmt_connected)
9699 struct mgmt_ev_device_disconnected ev;
9700 struct sock *sk = NULL;
9702 if (!mgmt_connected)
9705 if (link_type != ACL_LINK && link_type != LE_LINK)
9708 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9710 bacpy(&ev.addr.bdaddr, bdaddr);
9711 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9714 /* Report disconnects due to suspend */
9715 if (hdev->suspended)
9716 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9718 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9723 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9727 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9728 u8 link_type, u8 addr_type, u8 status)
9730 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9731 struct mgmt_cp_disconnect *cp;
9732 struct mgmt_pending_cmd *cmd;
9734 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9737 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9743 if (bacmp(bdaddr, &cp->addr.bdaddr))
9746 if (cp->addr.type != bdaddr_type)
9749 cmd->cmd_complete(cmd, mgmt_status(status));
9750 mgmt_pending_remove(cmd);
9753 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9754 u8 addr_type, u8 status)
9756 struct mgmt_ev_connect_failed ev;
9758 bacpy(&ev.addr.bdaddr, bdaddr);
9759 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9760 ev.status = mgmt_status(status);
9762 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9765 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9767 struct mgmt_ev_pin_code_request ev;
9769 bacpy(&ev.addr.bdaddr, bdaddr);
9770 ev.addr.type = BDADDR_BREDR;
9773 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9776 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9779 struct mgmt_pending_cmd *cmd;
9781 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9785 cmd->cmd_complete(cmd, mgmt_status(status));
9786 mgmt_pending_remove(cmd);
9789 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9792 struct mgmt_pending_cmd *cmd;
9794 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9798 cmd->cmd_complete(cmd, mgmt_status(status));
9799 mgmt_pending_remove(cmd);
9802 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9803 u8 link_type, u8 addr_type, u32 value,
9806 struct mgmt_ev_user_confirm_request ev;
9808 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9810 bacpy(&ev.addr.bdaddr, bdaddr);
9811 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9812 ev.confirm_hint = confirm_hint;
9813 ev.value = cpu_to_le32(value);
9815 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9819 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9820 u8 link_type, u8 addr_type)
9822 struct mgmt_ev_user_passkey_request ev;
9824 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9826 bacpy(&ev.addr.bdaddr, bdaddr);
9827 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9829 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9833 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9834 u8 link_type, u8 addr_type, u8 status,
9837 struct mgmt_pending_cmd *cmd;
9839 cmd = pending_find(opcode, hdev);
9843 cmd->cmd_complete(cmd, mgmt_status(status));
9844 mgmt_pending_remove(cmd);
9849 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9850 u8 link_type, u8 addr_type, u8 status)
9852 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9853 status, MGMT_OP_USER_CONFIRM_REPLY);
9856 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9857 u8 link_type, u8 addr_type, u8 status)
9859 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9861 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9864 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9865 u8 link_type, u8 addr_type, u8 status)
9867 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9868 status, MGMT_OP_USER_PASSKEY_REPLY);
9871 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9872 u8 link_type, u8 addr_type, u8 status)
9874 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9876 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9879 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9880 u8 link_type, u8 addr_type, u32 passkey,
9883 struct mgmt_ev_passkey_notify ev;
9885 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9887 bacpy(&ev.addr.bdaddr, bdaddr);
9888 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9889 ev.passkey = __cpu_to_le32(passkey);
9890 ev.entered = entered;
9892 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9895 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9897 struct mgmt_ev_auth_failed ev;
9898 struct mgmt_pending_cmd *cmd;
9899 u8 status = mgmt_status(hci_status);
9901 bacpy(&ev.addr.bdaddr, &conn->dst);
9902 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9905 cmd = find_pairing(conn);
9907 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9908 cmd ? cmd->sk : NULL);
9911 cmd->cmd_complete(cmd, status);
9912 mgmt_pending_remove(cmd);
9916 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9918 struct cmd_lookup match = { NULL, hdev };
9922 u8 mgmt_err = mgmt_status(status);
9923 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9924 cmd_status_rsp, &mgmt_err);
9928 if (test_bit(HCI_AUTH, &hdev->flags))
9929 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9931 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9933 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9937 new_settings(hdev, match.sk);
9943 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9945 struct cmd_lookup *match = data;
9947 if (match->sk == NULL) {
9948 match->sk = cmd->sk;
9949 sock_hold(match->sk);
9953 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9956 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9958 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9959 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9960 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9963 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9964 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9965 ext_info_changed(hdev, NULL);
9972 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9974 struct mgmt_cp_set_local_name ev;
9975 struct mgmt_pending_cmd *cmd;
9980 memset(&ev, 0, sizeof(ev));
9981 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9982 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9984 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9986 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9988 /* If this is a HCI command related to powering on the
9989 * HCI dev don't send any mgmt signals.
9991 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9995 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9996 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9997 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10000 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10004 for (i = 0; i < uuid_count; i++) {
10005 if (!memcmp(uuid, uuids[i], 16))
10012 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10016 while (parsed < eir_len) {
10017 u8 field_len = eir[0];
10021 if (field_len == 0)
10024 if (eir_len - parsed < field_len + 1)
10028 case EIR_UUID16_ALL:
10029 case EIR_UUID16_SOME:
10030 for (i = 0; i + 3 <= field_len; i += 2) {
10031 memcpy(uuid, bluetooth_base_uuid, 16);
10032 uuid[13] = eir[i + 3];
10033 uuid[12] = eir[i + 2];
10034 if (has_uuid(uuid, uuid_count, uuids))
10038 case EIR_UUID32_ALL:
10039 case EIR_UUID32_SOME:
10040 for (i = 0; i + 5 <= field_len; i += 4) {
10041 memcpy(uuid, bluetooth_base_uuid, 16);
10042 uuid[15] = eir[i + 5];
10043 uuid[14] = eir[i + 4];
10044 uuid[13] = eir[i + 3];
10045 uuid[12] = eir[i + 2];
10046 if (has_uuid(uuid, uuid_count, uuids))
10050 case EIR_UUID128_ALL:
10051 case EIR_UUID128_SOME:
10052 for (i = 0; i + 17 <= field_len; i += 16) {
10053 memcpy(uuid, eir + i + 2, 16);
10054 if (has_uuid(uuid, uuid_count, uuids))
10060 parsed += field_len + 1;
10061 eir += field_len + 1;
10067 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10068 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10070 /* If a RSSI threshold has been specified, and
10071 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10072 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10073 * is set, let it through for further processing, as we might need to
10074 * restart the scan.
10076 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10077 * the results are also dropped.
10079 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10080 (rssi == HCI_RSSI_INVALID ||
10081 (rssi < hdev->discovery.rssi &&
10082 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10085 if (hdev->discovery.uuid_count != 0) {
10086 /* If a list of UUIDs is provided in filter, results with no
10087 * matching UUID should be dropped.
10089 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10090 hdev->discovery.uuids) &&
10091 !eir_has_uuids(scan_rsp, scan_rsp_len,
10092 hdev->discovery.uuid_count,
10093 hdev->discovery.uuids))
10097 /* If duplicate filtering does not report RSSI changes, then restart
10098 * scanning to ensure updated result with updated RSSI values.
10100 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10101 /* Validate RSSI value against the RSSI threshold once more. */
10102 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10103 rssi < hdev->discovery.rssi)
10110 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10111 bdaddr_t *bdaddr, u8 addr_type)
10113 struct mgmt_ev_adv_monitor_device_lost ev;
10115 ev.monitor_handle = cpu_to_le16(handle);
10116 bacpy(&ev.addr.bdaddr, bdaddr);
10117 ev.addr.type = addr_type;
10119 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10123 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10124 struct sk_buff *skb,
10125 struct sock *skip_sk,
10128 struct sk_buff *advmon_skb;
10129 size_t advmon_skb_len;
10130 __le16 *monitor_handle;
10135 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10136 sizeof(struct mgmt_ev_device_found)) + skb->len;
10137 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10142 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10143 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10144 * store monitor_handle of the matched monitor.
10146 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10147 *monitor_handle = cpu_to_le16(handle);
10148 skb_put_data(advmon_skb, skb->data, skb->len);
10150 mgmt_event_skb(advmon_skb, skip_sk);
10153 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10154 bdaddr_t *bdaddr, bool report_device,
10155 struct sk_buff *skb,
10156 struct sock *skip_sk)
10158 struct monitored_device *dev, *tmp;
10159 bool matched = false;
10160 bool notified = false;
10162 /* We have received the Advertisement Report because:
10163 * 1. the kernel has initiated active discovery
10164 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10166 * 3. if none of the above is true, we have one or more active
10167 * Advertisement Monitor
10169 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10170 * and report ONLY one advertisement per device for the matched Monitor
10171 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10173 * For case 3, since we are not active scanning and all advertisements
10174 * received are due to a matched Advertisement Monitor, report all
10175 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10177 if (report_device && !hdev->advmon_pend_notify) {
10178 mgmt_event_skb(skb, skip_sk);
10182 hdev->advmon_pend_notify = false;
10184 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10185 if (!bacmp(&dev->bdaddr, bdaddr)) {
10188 if (!dev->notified) {
10189 mgmt_send_adv_monitor_device_found(hdev, skb,
10193 dev->notified = true;
10197 if (!dev->notified)
10198 hdev->advmon_pend_notify = true;
10201 if (!report_device &&
10202 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10203 /* Handle 0 indicates that we are not active scanning and this
10204 * is a subsequent advertisement report for an already matched
10205 * Advertisement Monitor or the controller offloading support
10206 * is not available.
10208 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10212 mgmt_event_skb(skb, skip_sk);
10217 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10218 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10219 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10222 struct sk_buff *skb;
10223 struct mgmt_ev_mesh_device_found *ev;
10226 if (!hdev->mesh_ad_types[0])
10229 /* Scan for requested AD types */
10231 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10232 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10233 if (!hdev->mesh_ad_types[j])
10236 if (hdev->mesh_ad_types[j] == eir[i + 1])
10242 if (scan_rsp_len > 0) {
10243 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10244 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10245 if (!hdev->mesh_ad_types[j])
10248 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10257 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10258 sizeof(*ev) + eir_len + scan_rsp_len);
10262 ev = skb_put(skb, sizeof(*ev));
10264 bacpy(&ev->addr.bdaddr, bdaddr);
10265 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10267 ev->flags = cpu_to_le32(flags);
10268 ev->instant = cpu_to_le64(instant);
10271 /* Copy EIR or advertising data into event */
10272 skb_put_data(skb, eir, eir_len);
10274 if (scan_rsp_len > 0)
10275 /* Append scan response data to event */
10276 skb_put_data(skb, scan_rsp, scan_rsp_len);
10278 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10280 mgmt_event_skb(skb, NULL);
10283 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10284 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10285 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10288 struct sk_buff *skb;
10289 struct mgmt_ev_device_found *ev;
10290 bool report_device = hci_discovery_active(hdev);
10292 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10293 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10294 eir, eir_len, scan_rsp, scan_rsp_len,
10297 /* Don't send events for a non-kernel initiated discovery. With
10298 * LE one exception is if we have pend_le_reports > 0 in which
10299 * case we're doing passive scanning and want these events.
10301 if (!hci_discovery_active(hdev)) {
10302 if (link_type == ACL_LINK)
10304 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10305 report_device = true;
10306 else if (!hci_is_adv_monitoring(hdev))
10310 if (hdev->discovery.result_filtering) {
10311 /* We are using service discovery */
10312 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10317 if (hdev->discovery.limited) {
10318 /* Check for limited discoverable bit */
10320 if (!(dev_class[1] & 0x20))
10323 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10324 if (!flags || !(flags[0] & LE_AD_LIMITED))
10329 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10330 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10331 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10335 ev = skb_put(skb, sizeof(*ev));
10337 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10338 * RSSI value was reported as 0 when not available. This behavior
10339 * is kept when using device discovery. This is required for full
10340 * backwards compatibility with the API.
10342 * However when using service discovery, the value 127 will be
10343 * returned when the RSSI is not available.
10345 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10346 link_type == ACL_LINK)
10349 bacpy(&ev->addr.bdaddr, bdaddr);
10350 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10352 ev->flags = cpu_to_le32(flags);
10355 /* Copy EIR or advertising data into event */
10356 skb_put_data(skb, eir, eir_len);
10358 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10361 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10363 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10366 if (scan_rsp_len > 0)
10367 /* Append scan response data to event */
10368 skb_put_data(skb, scan_rsp, scan_rsp_len);
10370 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10372 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10375 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10376 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10378 struct sk_buff *skb;
10379 struct mgmt_ev_device_found *ev;
10383 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10384 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10386 ev = skb_put(skb, sizeof(*ev));
10387 bacpy(&ev->addr.bdaddr, bdaddr);
10388 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10392 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10394 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10396 ev->eir_len = cpu_to_le16(eir_len);
10397 ev->flags = cpu_to_le32(flags);
10399 mgmt_event_skb(skb, NULL);
10402 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10404 struct mgmt_ev_discovering ev;
10406 bt_dev_dbg(hdev, "discovering %u", discovering);
10408 memset(&ev, 0, sizeof(ev));
10409 ev.type = hdev->discovery.type;
10410 ev.discovering = discovering;
10412 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10415 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10417 struct mgmt_ev_controller_suspend ev;
10419 ev.suspend_state = state;
10420 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10423 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10426 struct mgmt_ev_controller_resume ev;
10428 ev.wake_reason = reason;
10430 bacpy(&ev.addr.bdaddr, bdaddr);
10431 ev.addr.type = addr_type;
10433 memset(&ev.addr, 0, sizeof(ev.addr));
10436 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10439 static struct hci_mgmt_chan chan = {
10440 .channel = HCI_CHANNEL_CONTROL,
10441 .handler_count = ARRAY_SIZE(mgmt_handlers),
10442 .handlers = mgmt_handlers,
10443 .hdev_init = mgmt_init_hdev,
10446 int mgmt_init(void)
10448 return hci_mgmt_chan_register(&chan);
10451 void mgmt_exit(void)
10453 hci_mgmt_chan_unregister(&chan);
10456 void mgmt_cleanup(struct sock *sk)
10458 struct mgmt_mesh_tx *mesh_tx;
10459 struct hci_dev *hdev;
10461 read_lock(&hci_dev_list_lock);
10463 list_for_each_entry(hdev, &hci_dev_list, list) {
10465 mesh_tx = mgmt_mesh_next(hdev, sk);
10468 mesh_send_complete(hdev, mesh_tx, true);
10472 read_unlock(&hci_dev_list_lock);