2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
135 MGMT_OP_MESH_SEND_CANCEL,
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
154 MGMT_EV_DEVICE_FOUND,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
198 static const u16 mgmt_untrusted_events[] = {
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
286 static u8 mgmt_errno_status(int err)
290 return MGMT_STATUS_SUCCESS;
292 return MGMT_STATUS_REJECTED;
294 return MGMT_STATUS_INVALID_PARAMS;
296 return MGMT_STATUS_NOT_SUPPORTED;
298 return MGMT_STATUS_BUSY;
300 return MGMT_STATUS_AUTH_FAILED;
302 return MGMT_STATUS_NO_RESOURCES;
304 return MGMT_STATUS_ALREADY_CONNECTED;
306 return MGMT_STATUS_DISCONNECTED;
309 return MGMT_STATUS_FAILED;
312 static u8 mgmt_status(int err)
315 return mgmt_errno_status(err);
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
320 return MGMT_STATUS_FAILED;
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
350 static u8 le_addr_type(u8 mgmt_addr_type)
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
355 return ADDR_LE_DEV_RANDOM;
358 void mgmt_fill_version_info(void *ver)
360 struct mgmt_rp_read_version *rp = ver;
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
369 struct mgmt_rp_read_version rp;
371 bt_dev_dbg(hdev, "sock %p", sk);
373 mgmt_fill_version_info(&rp);
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
387 bt_dev_dbg(hdev, "sock %p", sk);
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
399 rp = kmalloc(rp_size, GFP_KERNEL);
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
415 __le16 *opcode = rp->opcodes;
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
434 struct mgmt_rp_read_index_list *rp;
440 bt_dev_dbg(hdev, "sock %p", sk);
442 read_lock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
454 read_unlock(&hci_dev_list_lock);
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
481 read_unlock(&hci_dev_list_lock);
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
494 struct mgmt_rp_read_unconf_index_list *rp;
500 bt_dev_dbg(hdev, "sock %p", sk);
502 read_lock(&hci_dev_list_lock);
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
514 read_unlock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
541 read_unlock(&hci_dev_list_lock);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
554 struct mgmt_rp_read_ext_index_list *rp;
559 bt_dev_dbg(hdev, "sock %p", sk);
561 read_lock(&hci_dev_list_lock);
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
571 read_unlock(&hci_dev_list_lock);
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
604 rp->num_controllers = cpu_to_le16(count);
606 read_unlock(&hci_dev_list_lock);
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
625 static bool is_configured(struct hci_dev *hdev)
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
639 static __le32 get_missing_options(struct hci_dev *hdev)
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
652 return cpu_to_le32(options);
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
657 __le32 options = get_missing_options(hdev);
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
665 __le32 options = get_missing_options(hdev);
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
674 struct mgmt_rp_read_config_info rp;
677 bt_dev_dbg(hdev, "sock %p", sk);
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
693 hci_dev_unlock(hdev);
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
699 static u32 get_supported_phys(struct hci_dev *hdev)
701 u32 supported_phys = 0;
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
748 return supported_phys;
751 static u32 get_selected_phys(struct hci_dev *hdev)
753 u32 selected_phys = 0;
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
811 return selected_phys;
814 static u32 get_configurable_phys(struct hci_dev *hdev)
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
820 static u32 get_supported_settings(struct hci_dev *hdev)
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
838 if (IS_ENABLED(CONFIG_BT_HS))
839 settings |= MGMT_SETTING_HS;
842 if (lmp_sc_capable(hdev))
843 settings |= MGMT_SETTING_SECURE_CONN;
845 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
847 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
850 if (lmp_le_capable(hdev)) {
851 settings |= MGMT_SETTING_LE;
852 settings |= MGMT_SETTING_SECURE_CONN;
853 settings |= MGMT_SETTING_PRIVACY;
854 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 settings |= MGMT_SETTING_ADVERTISING;
858 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
860 settings |= MGMT_SETTING_CONFIGURATION;
862 if (cis_central_capable(hdev))
863 settings |= MGMT_SETTING_CIS_CENTRAL;
865 if (cis_peripheral_capable(hdev))
866 settings |= MGMT_SETTING_CIS_PERIPHERAL;
868 settings |= MGMT_SETTING_PHY_CONFIGURATION;
873 static u32 get_current_settings(struct hci_dev *hdev)
877 if (hdev_is_powered(hdev))
878 settings |= MGMT_SETTING_POWERED;
880 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 settings |= MGMT_SETTING_CONNECTABLE;
883 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 settings |= MGMT_SETTING_FAST_CONNECTABLE;
886 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 settings |= MGMT_SETTING_DISCOVERABLE;
889 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 settings |= MGMT_SETTING_BONDABLE;
892 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 settings |= MGMT_SETTING_BREDR;
895 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 settings |= MGMT_SETTING_LE;
898 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 settings |= MGMT_SETTING_LINK_SECURITY;
901 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 settings |= MGMT_SETTING_SSP;
904 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 settings |= MGMT_SETTING_HS;
907 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 settings |= MGMT_SETTING_ADVERTISING;
910 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 settings |= MGMT_SETTING_SECURE_CONN;
913 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 settings |= MGMT_SETTING_DEBUG_KEYS;
916 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 settings |= MGMT_SETTING_PRIVACY;
919 /* The current setting for static address has two purposes. The
920 * first is to indicate if the static address will be used and
921 * the second is to indicate if it is actually set.
923 * This means if the static address is not configured, this flag
924 * will never be set. If the address is configured, then if the
925 * address is actually used decides if the flag is set or not.
927 * For single mode LE only controllers and dual-mode controllers
928 * with BR/EDR disabled, the existence of the static address will
931 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 settings |= MGMT_SETTING_STATIC_ADDRESS;
938 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
941 if (cis_central_capable(hdev))
942 settings |= MGMT_SETTING_CIS_CENTRAL;
944 if (cis_peripheral_capable(hdev))
945 settings |= MGMT_SETTING_CIS_PERIPHERAL;
950 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
952 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
955 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
957 struct mgmt_pending_cmd *cmd;
959 /* If there's a pending mgmt command the flags will not yet have
960 * their final values, so check for this first.
962 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
964 struct mgmt_mode *cp = cmd->param;
966 return LE_AD_GENERAL;
967 else if (cp->val == 0x02)
968 return LE_AD_LIMITED;
970 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
971 return LE_AD_LIMITED;
972 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
973 return LE_AD_GENERAL;
979 bool mgmt_get_connectable(struct hci_dev *hdev)
981 struct mgmt_pending_cmd *cmd;
983 /* If there's a pending mgmt command the flag will not yet have
984 * it's final value, so check for this first.
986 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
988 struct mgmt_mode *cp = cmd->param;
993 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
996 static int service_cache_sync(struct hci_dev *hdev, void *data)
998 hci_update_eir_sync(hdev);
999 hci_update_class_sync(hdev);
1004 static void service_cache_off(struct work_struct *work)
1006 struct hci_dev *hdev = container_of(work, struct hci_dev,
1007 service_cache.work);
1009 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1012 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1015 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1017 /* The generation of a new RPA and programming it into the
1018 * controller happens in the hci_req_enable_advertising()
1021 if (ext_adv_capable(hdev))
1022 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1024 return hci_enable_advertising_sync(hdev);
1027 static void rpa_expired(struct work_struct *work)
1029 struct hci_dev *hdev = container_of(work, struct hci_dev,
1032 bt_dev_dbg(hdev, "");
1034 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1036 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1039 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1042 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1044 static void discov_off(struct work_struct *work)
1046 struct hci_dev *hdev = container_of(work, struct hci_dev,
1049 bt_dev_dbg(hdev, "");
1053 /* When discoverable timeout triggers, then just make sure
1054 * the limited discoverable flag is cleared. Even in the case
1055 * of a timeout triggered from general discoverable, it is
1056 * safe to unconditionally clear the flag.
1058 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1059 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1060 hdev->discov_timeout = 0;
1062 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1064 mgmt_new_settings(hdev);
1066 hci_dev_unlock(hdev);
1069 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1071 static void mesh_send_complete(struct hci_dev *hdev,
1072 struct mgmt_mesh_tx *mesh_tx, bool silent)
1074 u8 handle = mesh_tx->handle;
1077 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1078 sizeof(handle), NULL);
1080 mgmt_mesh_remove(mesh_tx);
1083 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1085 struct mgmt_mesh_tx *mesh_tx;
1087 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1088 hci_disable_advertising_sync(hdev);
1089 mesh_tx = mgmt_mesh_next(hdev, NULL);
1092 mesh_send_complete(hdev, mesh_tx, false);
1097 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1098 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1099 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1101 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1106 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1107 mesh_send_start_complete);
1110 mesh_send_complete(hdev, mesh_tx, false);
1112 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1115 static void mesh_send_done(struct work_struct *work)
1117 struct hci_dev *hdev = container_of(work, struct hci_dev,
1118 mesh_send_done.work);
1120 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1123 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1126 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1128 if (hci_dev_test_flag(hdev, HCI_MGMT))
1131 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1133 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1134 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1135 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1136 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1138 /* Non-mgmt controlled devices get this bit set
1139 * implicitly so that pairing works for them, however
1140 * for mgmt we require user-space to explicitly enable
1143 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1145 hci_dev_set_flag(hdev, HCI_MGMT);
1148 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1149 void *data, u16 data_len)
1151 struct mgmt_rp_read_info rp;
1153 bt_dev_dbg(hdev, "sock %p", sk);
1157 memset(&rp, 0, sizeof(rp));
1159 bacpy(&rp.bdaddr, &hdev->bdaddr);
1161 rp.version = hdev->hci_ver;
1162 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1164 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1165 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1167 memcpy(rp.dev_class, hdev->dev_class, 3);
1169 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1170 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1172 hci_dev_unlock(hdev);
1174 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1178 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1183 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1184 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1185 hdev->dev_class, 3);
1187 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1188 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1191 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1192 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1193 hdev->dev_name, name_len);
1195 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1196 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1197 hdev->short_name, name_len);
1202 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1203 void *data, u16 data_len)
1206 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1209 bt_dev_dbg(hdev, "sock %p", sk);
1211 memset(&buf, 0, sizeof(buf));
1215 bacpy(&rp->bdaddr, &hdev->bdaddr);
1217 rp->version = hdev->hci_ver;
1218 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1220 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1221 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1224 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1225 rp->eir_len = cpu_to_le16(eir_len);
1227 hci_dev_unlock(hdev);
1229 /* If this command is called at least once, then the events
1230 * for class of device and local name changes are disabled
1231 * and only the new extended controller information event
1234 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1235 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1236 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1238 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1239 sizeof(*rp) + eir_len);
1242 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1245 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1248 memset(buf, 0, sizeof(buf));
1250 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1251 ev->eir_len = cpu_to_le16(eir_len);
1253 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1254 sizeof(*ev) + eir_len,
1255 HCI_MGMT_EXT_INFO_EVENTS, skip);
1258 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1260 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1262 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1266 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1268 struct mgmt_ev_advertising_added ev;
1270 ev.instance = instance;
1272 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1275 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1278 struct mgmt_ev_advertising_removed ev;
1280 ev.instance = instance;
1282 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1285 static void cancel_adv_timeout(struct hci_dev *hdev)
1287 if (hdev->adv_instance_timeout) {
1288 hdev->adv_instance_timeout = 0;
1289 cancel_delayed_work(&hdev->adv_instance_expire);
1293 /* This function requires the caller holds hdev->lock */
1294 static void restart_le_actions(struct hci_dev *hdev)
1296 struct hci_conn_params *p;
1298 list_for_each_entry(p, &hdev->le_conn_params, list) {
1299 /* Needed for AUTO_OFF case where might not "really"
1300 * have been powered off.
1302 hci_pend_le_list_del_init(p);
1304 switch (p->auto_connect) {
1305 case HCI_AUTO_CONN_DIRECT:
1306 case HCI_AUTO_CONN_ALWAYS:
1307 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1309 case HCI_AUTO_CONN_REPORT:
1310 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1318 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1320 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1322 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1323 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1326 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1328 struct mgmt_pending_cmd *cmd = data;
1329 struct mgmt_mode *cp;
1331 /* Make sure cmd still outstanding. */
1332 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1337 bt_dev_dbg(hdev, "err %d", err);
1342 restart_le_actions(hdev);
1343 hci_update_passive_scan(hdev);
1344 hci_dev_unlock(hdev);
1347 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1349 /* Only call new_setting for power on as power off is deferred
1350 * to hdev->power_off work which does call hci_dev_do_close.
1353 new_settings(hdev, cmd->sk);
1355 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1359 mgmt_pending_remove(cmd);
1362 static int set_powered_sync(struct hci_dev *hdev, void *data)
1364 struct mgmt_pending_cmd *cmd = data;
1365 struct mgmt_mode *cp = cmd->param;
1367 BT_DBG("%s", hdev->name);
1369 return hci_set_powered_sync(hdev, cp->val);
1372 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1375 struct mgmt_mode *cp = data;
1376 struct mgmt_pending_cmd *cmd;
1379 bt_dev_dbg(hdev, "sock %p", sk);
1381 if (cp->val != 0x00 && cp->val != 0x01)
1382 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1383 MGMT_STATUS_INVALID_PARAMS);
1387 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1388 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1393 if (!!cp->val == hdev_is_powered(hdev)) {
1394 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1398 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1404 /* Cancel potentially blocking sync operation before power off */
1405 if (cp->val == 0x00) {
1406 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1407 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1408 mgmt_set_powered_complete);
1410 /* Use hci_cmd_sync_submit since hdev might not be running */
1411 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1412 mgmt_set_powered_complete);
1416 mgmt_pending_remove(cmd);
1419 hci_dev_unlock(hdev);
1423 int mgmt_new_settings(struct hci_dev *hdev)
1425 return new_settings(hdev, NULL);
1430 struct hci_dev *hdev;
1434 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1436 struct cmd_lookup *match = data;
1438 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1440 list_del(&cmd->list);
1442 if (match->sk == NULL) {
1443 match->sk = cmd->sk;
1444 sock_hold(match->sk);
1447 mgmt_pending_free(cmd);
1450 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1454 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1455 mgmt_pending_remove(cmd);
1458 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1460 if (cmd->cmd_complete) {
1463 cmd->cmd_complete(cmd, *status);
1464 mgmt_pending_remove(cmd);
1469 cmd_status_rsp(cmd, data);
1472 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1475 cmd->param, cmd->param_len);
1478 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1481 cmd->param, sizeof(struct mgmt_addr_info));
1484 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1486 if (!lmp_bredr_capable(hdev))
1487 return MGMT_STATUS_NOT_SUPPORTED;
1488 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1489 return MGMT_STATUS_REJECTED;
1491 return MGMT_STATUS_SUCCESS;
1494 static u8 mgmt_le_support(struct hci_dev *hdev)
1496 if (!lmp_le_capable(hdev))
1497 return MGMT_STATUS_NOT_SUPPORTED;
1498 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1499 return MGMT_STATUS_REJECTED;
1501 return MGMT_STATUS_SUCCESS;
1504 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1507 struct mgmt_pending_cmd *cmd = data;
1509 bt_dev_dbg(hdev, "err %d", err);
1511 /* Make sure cmd still outstanding. */
1512 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1518 u8 mgmt_err = mgmt_status(err);
1519 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1520 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1524 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1525 hdev->discov_timeout > 0) {
1526 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1527 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1530 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1531 new_settings(hdev, cmd->sk);
1534 mgmt_pending_remove(cmd);
1535 hci_dev_unlock(hdev);
1538 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1540 BT_DBG("%s", hdev->name);
1542 return hci_update_discoverable_sync(hdev);
1545 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1548 struct mgmt_cp_set_discoverable *cp = data;
1549 struct mgmt_pending_cmd *cmd;
1553 bt_dev_dbg(hdev, "sock %p", sk);
1555 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1556 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1558 MGMT_STATUS_REJECTED);
1560 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562 MGMT_STATUS_INVALID_PARAMS);
1564 timeout = __le16_to_cpu(cp->timeout);
1566 /* Disabling discoverable requires that no timeout is set,
1567 * and enabling limited discoverable requires a timeout.
1569 if ((cp->val == 0x00 && timeout > 0) ||
1570 (cp->val == 0x02 && timeout == 0))
1571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_INVALID_PARAMS);
1576 if (!hdev_is_powered(hdev) && timeout > 0) {
1577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1578 MGMT_STATUS_NOT_POWERED);
1582 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1583 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1589 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1591 MGMT_STATUS_REJECTED);
1595 if (hdev->advertising_paused) {
1596 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 if (!hdev_is_powered(hdev)) {
1602 bool changed = false;
1604 /* Setting limited discoverable when powered off is
1605 * not a valid operation since it requires a timeout
1606 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1608 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1609 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1613 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1618 err = new_settings(hdev, sk);
1623 /* If the current mode is the same, then just update the timeout
1624 * value with the new value. And if only the timeout gets updated,
1625 * then no need for any HCI transactions.
1627 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1628 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1629 HCI_LIMITED_DISCOVERABLE)) {
1630 cancel_delayed_work(&hdev->discov_off);
1631 hdev->discov_timeout = timeout;
1633 if (cp->val && hdev->discov_timeout > 0) {
1634 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1635 queue_delayed_work(hdev->req_workqueue,
1636 &hdev->discov_off, to);
1639 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1643 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1649 /* Cancel any potential discoverable timeout that might be
1650 * still active and store new timeout value. The arming of
1651 * the timeout happens in the complete handler.
1653 cancel_delayed_work(&hdev->discov_off);
1654 hdev->discov_timeout = timeout;
1657 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1659 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1661 /* Limited discoverable mode */
1662 if (cp->val == 0x02)
1663 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1665 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1667 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1668 mgmt_set_discoverable_complete);
1671 mgmt_pending_remove(cmd);
1674 hci_dev_unlock(hdev);
1678 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1681 struct mgmt_pending_cmd *cmd = data;
1683 bt_dev_dbg(hdev, "err %d", err);
1685 /* Make sure cmd still outstanding. */
1686 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1692 u8 mgmt_err = mgmt_status(err);
1693 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1697 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1698 new_settings(hdev, cmd->sk);
1702 mgmt_pending_remove(cmd);
1704 hci_dev_unlock(hdev);
1707 static int set_connectable_update_settings(struct hci_dev *hdev,
1708 struct sock *sk, u8 val)
1710 bool changed = false;
1713 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1717 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1719 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1720 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1723 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1728 hci_update_scan(hdev);
1729 hci_update_passive_scan(hdev);
1730 return new_settings(hdev, sk);
1736 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1738 BT_DBG("%s", hdev->name);
1740 return hci_update_connectable_sync(hdev);
1743 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1746 struct mgmt_mode *cp = data;
1747 struct mgmt_pending_cmd *cmd;
1750 bt_dev_dbg(hdev, "sock %p", sk);
1752 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1753 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1755 MGMT_STATUS_REJECTED);
1757 if (cp->val != 0x00 && cp->val != 0x01)
1758 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1759 MGMT_STATUS_INVALID_PARAMS);
1763 if (!hdev_is_powered(hdev)) {
1764 err = set_connectable_update_settings(hdev, sk, cp->val);
1768 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1769 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1770 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1775 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1782 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1784 if (hdev->discov_timeout > 0)
1785 cancel_delayed_work(&hdev->discov_off);
1787 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1788 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1789 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1792 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1793 mgmt_set_connectable_complete);
1796 mgmt_pending_remove(cmd);
1799 hci_dev_unlock(hdev);
1803 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1806 struct mgmt_mode *cp = data;
1810 bt_dev_dbg(hdev, "sock %p", sk);
1812 if (cp->val != 0x00 && cp->val != 0x01)
1813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1814 MGMT_STATUS_INVALID_PARAMS);
1819 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1821 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1823 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1828 /* In limited privacy mode the change of bondable mode
1829 * may affect the local advertising address.
1831 hci_update_discoverable(hdev);
1833 err = new_settings(hdev, sk);
1837 hci_dev_unlock(hdev);
1841 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1844 struct mgmt_mode *cp = data;
1845 struct mgmt_pending_cmd *cmd;
1849 bt_dev_dbg(hdev, "sock %p", sk);
1851 status = mgmt_bredr_support(hdev);
1853 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1856 if (cp->val != 0x00 && cp->val != 0x01)
1857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1858 MGMT_STATUS_INVALID_PARAMS);
1862 if (!hdev_is_powered(hdev)) {
1863 bool changed = false;
1865 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1866 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1870 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1875 err = new_settings(hdev, sk);
1880 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1881 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1888 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1889 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1893 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1899 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1901 mgmt_pending_remove(cmd);
1906 hci_dev_unlock(hdev);
1910 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1912 struct cmd_lookup match = { NULL, hdev };
1913 struct mgmt_pending_cmd *cmd = data;
1914 struct mgmt_mode *cp = cmd->param;
1915 u8 enable = cp->val;
1918 /* Make sure cmd still outstanding. */
1919 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1923 u8 mgmt_err = mgmt_status(err);
1925 if (enable && hci_dev_test_and_clear_flag(hdev,
1927 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1928 new_settings(hdev, NULL);
1931 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1937 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1939 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1942 changed = hci_dev_test_and_clear_flag(hdev,
1945 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1948 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1951 new_settings(hdev, match.sk);
1956 hci_update_eir_sync(hdev);
1959 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1961 struct mgmt_pending_cmd *cmd = data;
1962 struct mgmt_mode *cp = cmd->param;
1963 bool changed = false;
1967 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1969 err = hci_write_ssp_mode_sync(hdev, cp->val);
1971 if (!err && changed)
1972 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1977 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1979 struct mgmt_mode *cp = data;
1980 struct mgmt_pending_cmd *cmd;
1984 bt_dev_dbg(hdev, "sock %p", sk);
1986 status = mgmt_bredr_support(hdev);
1988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1990 if (!lmp_ssp_capable(hdev))
1991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1992 MGMT_STATUS_NOT_SUPPORTED);
1994 if (cp->val != 0x00 && cp->val != 0x01)
1995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1996 MGMT_STATUS_INVALID_PARAMS);
2000 if (!hdev_is_powered(hdev)) {
2004 changed = !hci_dev_test_and_set_flag(hdev,
2007 changed = hci_dev_test_and_clear_flag(hdev,
2010 changed = hci_dev_test_and_clear_flag(hdev,
2013 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2016 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2021 err = new_settings(hdev, sk);
2026 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2027 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2032 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2033 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2037 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2041 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2045 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2046 MGMT_STATUS_FAILED);
2049 mgmt_pending_remove(cmd);
2053 hci_dev_unlock(hdev);
2057 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2059 struct mgmt_mode *cp = data;
2064 bt_dev_dbg(hdev, "sock %p", sk);
2066 if (!IS_ENABLED(CONFIG_BT_HS))
2067 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2068 MGMT_STATUS_NOT_SUPPORTED);
2070 status = mgmt_bredr_support(hdev);
2072 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2074 if (!lmp_ssp_capable(hdev))
2075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2076 MGMT_STATUS_NOT_SUPPORTED);
2078 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 MGMT_STATUS_REJECTED);
2082 if (cp->val != 0x00 && cp->val != 0x01)
2083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084 MGMT_STATUS_INVALID_PARAMS);
2088 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2089 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2095 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2097 if (hdev_is_powered(hdev)) {
2098 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2099 MGMT_STATUS_REJECTED);
2103 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2106 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2111 err = new_settings(hdev, sk);
2114 hci_dev_unlock(hdev);
2118 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2120 struct cmd_lookup match = { NULL, hdev };
2121 u8 status = mgmt_status(err);
2123 bt_dev_dbg(hdev, "err %d", err);
2126 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2131 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2133 new_settings(hdev, match.sk);
2139 static int set_le_sync(struct hci_dev *hdev, void *data)
2141 struct mgmt_pending_cmd *cmd = data;
2142 struct mgmt_mode *cp = cmd->param;
2147 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2149 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2150 hci_disable_advertising_sync(hdev);
2152 if (ext_adv_capable(hdev))
2153 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2155 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2158 err = hci_write_le_host_supported_sync(hdev, val, 0);
2160 /* Make sure the controller has a good default for
2161 * advertising data. Restrict the update to when LE
2162 * has actually been enabled. During power on, the
2163 * update in powered_update_hci will take care of it.
2165 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2166 if (ext_adv_capable(hdev)) {
2169 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2171 hci_update_scan_rsp_data_sync(hdev, 0x00);
2173 hci_update_adv_data_sync(hdev, 0x00);
2174 hci_update_scan_rsp_data_sync(hdev, 0x00);
2177 hci_update_passive_scan(hdev);
2183 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2185 struct mgmt_pending_cmd *cmd = data;
2186 u8 status = mgmt_status(err);
2187 struct sock *sk = cmd->sk;
2190 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2191 cmd_status_rsp, &status);
2195 mgmt_pending_remove(cmd);
2196 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2199 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2201 struct mgmt_pending_cmd *cmd = data;
2202 struct mgmt_cp_set_mesh *cp = cmd->param;
2203 size_t len = cmd->param_len;
2205 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2208 hci_dev_set_flag(hdev, HCI_MESH);
2210 hci_dev_clear_flag(hdev, HCI_MESH);
2214 /* If filters don't fit, forward all adv pkts */
2215 if (len <= sizeof(hdev->mesh_ad_types))
2216 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2218 hci_update_passive_scan_sync(hdev);
2222 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2224 struct mgmt_cp_set_mesh *cp = data;
2225 struct mgmt_pending_cmd *cmd;
2228 bt_dev_dbg(hdev, "sock %p", sk);
2230 if (!lmp_le_capable(hdev) ||
2231 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2232 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2233 MGMT_STATUS_NOT_SUPPORTED);
2235 if (cp->enable != 0x00 && cp->enable != 0x01)
2236 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2237 MGMT_STATUS_INVALID_PARAMS);
2241 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2245 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2249 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2250 MGMT_STATUS_FAILED);
2253 mgmt_pending_remove(cmd);
2256 hci_dev_unlock(hdev);
2260 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2262 struct mgmt_mesh_tx *mesh_tx = data;
2263 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2264 unsigned long mesh_send_interval;
2265 u8 mgmt_err = mgmt_status(err);
2267 /* Report any errors here, but don't report completion */
2270 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2271 /* Send Complete Error Code for handle */
2272 mesh_send_complete(hdev, mesh_tx, false);
2276 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2277 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2278 mesh_send_interval);
2281 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2283 struct mgmt_mesh_tx *mesh_tx = data;
2284 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2285 struct adv_info *adv, *next_instance;
2286 u8 instance = hdev->le_num_of_adv_sets + 1;
2287 u16 timeout, duration;
2290 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2291 return MGMT_STATUS_BUSY;
2294 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2295 adv = hci_add_adv_instance(hdev, instance, 0,
2296 send->adv_data_len, send->adv_data,
2299 HCI_ADV_TX_POWER_NO_PREFERENCE,
2300 hdev->le_adv_min_interval,
2301 hdev->le_adv_max_interval,
2305 mesh_tx->instance = instance;
2309 if (hdev->cur_adv_instance == instance) {
2310 /* If the currently advertised instance is being changed then
2311 * cancel the current advertising and schedule the next
2312 * instance. If there is only one instance then the overridden
2313 * advertising data will be visible right away.
2315 cancel_adv_timeout(hdev);
2317 next_instance = hci_get_next_instance(hdev, instance);
2319 instance = next_instance->instance;
2322 } else if (hdev->adv_instance_timeout) {
2323 /* Immediately advertise the new instance if no other, or
2324 * let it go naturally from queue if ADV is already happening
2330 return hci_schedule_adv_instance_sync(hdev, instance, true);
2335 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2337 struct mgmt_rp_mesh_read_features *rp = data;
2339 if (rp->used_handles >= rp->max_handles)
2342 rp->handles[rp->used_handles++] = mesh_tx->handle;
2345 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2346 void *data, u16 len)
2348 struct mgmt_rp_mesh_read_features rp;
2350 if (!lmp_le_capable(hdev) ||
2351 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2352 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2353 MGMT_STATUS_NOT_SUPPORTED);
2355 memset(&rp, 0, sizeof(rp));
2356 rp.index = cpu_to_le16(hdev->id);
2357 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2358 rp.max_handles = MESH_HANDLES_MAX;
2363 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2365 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2366 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2368 hci_dev_unlock(hdev);
2372 static int send_cancel(struct hci_dev *hdev, void *data)
2374 struct mgmt_pending_cmd *cmd = data;
2375 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2376 struct mgmt_mesh_tx *mesh_tx;
2378 if (!cancel->handle) {
2380 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2383 mesh_send_complete(hdev, mesh_tx, false);
2386 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2388 if (mesh_tx && mesh_tx->sk == cmd->sk)
2389 mesh_send_complete(hdev, mesh_tx, false);
2392 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2394 mgmt_pending_free(cmd);
2399 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2400 void *data, u16 len)
2402 struct mgmt_pending_cmd *cmd;
2405 if (!lmp_le_capable(hdev) ||
2406 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2408 MGMT_STATUS_NOT_SUPPORTED);
2410 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2412 MGMT_STATUS_REJECTED);
2415 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2419 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2423 MGMT_STATUS_FAILED);
2426 mgmt_pending_free(cmd);
2429 hci_dev_unlock(hdev);
2433 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2435 struct mgmt_mesh_tx *mesh_tx;
2436 struct mgmt_cp_mesh_send *send = data;
2437 struct mgmt_rp_mesh_read_features rp;
2441 if (!lmp_le_capable(hdev) ||
2442 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2444 MGMT_STATUS_NOT_SUPPORTED);
2445 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2446 len <= MGMT_MESH_SEND_SIZE ||
2447 len > (MGMT_MESH_SEND_SIZE + 31))
2448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2449 MGMT_STATUS_REJECTED);
2453 memset(&rp, 0, sizeof(rp));
2454 rp.max_handles = MESH_HANDLES_MAX;
2456 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2458 if (rp.max_handles <= rp.used_handles) {
2459 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2464 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2465 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2470 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2471 mesh_send_start_complete);
2474 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2475 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2476 MGMT_STATUS_FAILED);
2480 mgmt_mesh_remove(mesh_tx);
2483 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2485 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2486 &mesh_tx->handle, 1);
2490 hci_dev_unlock(hdev);
2494 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2496 struct mgmt_mode *cp = data;
2497 struct mgmt_pending_cmd *cmd;
2501 bt_dev_dbg(hdev, "sock %p", sk);
2503 if (!lmp_le_capable(hdev))
2504 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2505 MGMT_STATUS_NOT_SUPPORTED);
2507 if (cp->val != 0x00 && cp->val != 0x01)
2508 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2509 MGMT_STATUS_INVALID_PARAMS);
2511 /* Bluetooth single mode LE only controllers or dual-mode
2512 * controllers configured as LE only devices, do not allow
2513 * switching LE off. These have either LE enabled explicitly
2514 * or BR/EDR has been previously switched off.
2516 * When trying to enable an already enabled LE, then gracefully
2517 * send a positive response. Trying to disable it however will
2518 * result into rejection.
2520 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2521 if (cp->val == 0x01)
2522 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2524 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2525 MGMT_STATUS_REJECTED);
2531 enabled = lmp_host_le_capable(hdev);
2533 if (!hdev_is_powered(hdev) || val == enabled) {
2534 bool changed = false;
2536 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2537 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2541 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2542 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2546 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2551 err = new_settings(hdev, sk);
2556 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2557 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2558 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2563 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2567 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2571 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2572 MGMT_STATUS_FAILED);
2575 mgmt_pending_remove(cmd);
2579 hci_dev_unlock(hdev);
2583 /* This is a helper function to test for pending mgmt commands that can
2584 * cause CoD or EIR HCI commands. We can only allow one such pending
2585 * mgmt command at a time since otherwise we cannot easily track what
2586 * the current values are, will be, and based on that calculate if a new
2587 * HCI command needs to be sent and if yes with what value.
2589 static bool pending_eir_or_class(struct hci_dev *hdev)
2591 struct mgmt_pending_cmd *cmd;
2593 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2594 switch (cmd->opcode) {
2595 case MGMT_OP_ADD_UUID:
2596 case MGMT_OP_REMOVE_UUID:
2597 case MGMT_OP_SET_DEV_CLASS:
2598 case MGMT_OP_SET_POWERED:
2606 static const u8 bluetooth_base_uuid[] = {
2607 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2608 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2611 static u8 get_uuid_size(const u8 *uuid)
2615 if (memcmp(uuid, bluetooth_base_uuid, 12))
2618 val = get_unaligned_le32(&uuid[12]);
2625 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2627 struct mgmt_pending_cmd *cmd = data;
2629 bt_dev_dbg(hdev, "err %d", err);
2631 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2632 mgmt_status(err), hdev->dev_class, 3);
2634 mgmt_pending_free(cmd);
2637 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2641 err = hci_update_class_sync(hdev);
2645 return hci_update_eir_sync(hdev);
2648 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2650 struct mgmt_cp_add_uuid *cp = data;
2651 struct mgmt_pending_cmd *cmd;
2652 struct bt_uuid *uuid;
2655 bt_dev_dbg(hdev, "sock %p", sk);
2659 if (pending_eir_or_class(hdev)) {
2660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2665 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2671 memcpy(uuid->uuid, cp->uuid, 16);
2672 uuid->svc_hint = cp->svc_hint;
2673 uuid->size = get_uuid_size(cp->uuid);
2675 list_add_tail(&uuid->list, &hdev->uuids);
2677 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2683 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2684 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2686 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2687 mgmt_class_complete);
2689 mgmt_pending_free(cmd);
2694 hci_dev_unlock(hdev);
2698 static bool enable_service_cache(struct hci_dev *hdev)
2700 if (!hdev_is_powered(hdev))
2703 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2704 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2712 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2716 err = hci_update_class_sync(hdev);
2720 return hci_update_eir_sync(hdev);
2723 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2726 struct mgmt_cp_remove_uuid *cp = data;
2727 struct mgmt_pending_cmd *cmd;
2728 struct bt_uuid *match, *tmp;
2729 static const u8 bt_uuid_any[] = {
2730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2734 bt_dev_dbg(hdev, "sock %p", sk);
2738 if (pending_eir_or_class(hdev)) {
2739 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2744 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2745 hci_uuids_clear(hdev);
2747 if (enable_service_cache(hdev)) {
2748 err = mgmt_cmd_complete(sk, hdev->id,
2749 MGMT_OP_REMOVE_UUID,
2750 0, hdev->dev_class, 3);
2759 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2760 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2763 list_del(&match->list);
2769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2770 MGMT_STATUS_INVALID_PARAMS);
2775 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2781 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2782 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2784 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2785 mgmt_class_complete);
2787 mgmt_pending_free(cmd);
2790 hci_dev_unlock(hdev);
2794 static int set_class_sync(struct hci_dev *hdev, void *data)
2798 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2799 cancel_delayed_work_sync(&hdev->service_cache);
2800 err = hci_update_eir_sync(hdev);
2806 return hci_update_class_sync(hdev);
2809 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2812 struct mgmt_cp_set_dev_class *cp = data;
2813 struct mgmt_pending_cmd *cmd;
2816 bt_dev_dbg(hdev, "sock %p", sk);
2818 if (!lmp_bredr_capable(hdev))
2819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2820 MGMT_STATUS_NOT_SUPPORTED);
2824 if (pending_eir_or_class(hdev)) {
2825 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2830 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2831 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2832 MGMT_STATUS_INVALID_PARAMS);
2836 hdev->major_class = cp->major;
2837 hdev->minor_class = cp->minor;
2839 if (!hdev_is_powered(hdev)) {
2840 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2841 hdev->dev_class, 3);
2845 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2851 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2852 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2854 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2855 mgmt_class_complete);
2857 mgmt_pending_free(cmd);
2860 hci_dev_unlock(hdev);
2864 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2867 struct mgmt_cp_load_link_keys *cp = data;
2868 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2869 sizeof(struct mgmt_link_key_info));
2870 u16 key_count, expected_len;
2874 bt_dev_dbg(hdev, "sock %p", sk);
2876 if (!lmp_bredr_capable(hdev))
2877 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2878 MGMT_STATUS_NOT_SUPPORTED);
2880 key_count = __le16_to_cpu(cp->key_count);
2881 if (key_count > max_key_count) {
2882 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2885 MGMT_STATUS_INVALID_PARAMS);
2888 expected_len = struct_size(cp, keys, key_count);
2889 if (expected_len != len) {
2890 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2892 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2893 MGMT_STATUS_INVALID_PARAMS);
2896 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2898 MGMT_STATUS_INVALID_PARAMS);
2900 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2903 for (i = 0; i < key_count; i++) {
2904 struct mgmt_link_key_info *key = &cp->keys[i];
2906 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2907 if (key->type > 0x08)
2908 return mgmt_cmd_status(sk, hdev->id,
2909 MGMT_OP_LOAD_LINK_KEYS,
2910 MGMT_STATUS_INVALID_PARAMS);
2915 hci_link_keys_clear(hdev);
2918 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2920 changed = hci_dev_test_and_clear_flag(hdev,
2921 HCI_KEEP_DEBUG_KEYS);
2924 new_settings(hdev, NULL);
2926 for (i = 0; i < key_count; i++) {
2927 struct mgmt_link_key_info *key = &cp->keys[i];
2929 if (hci_is_blocked_key(hdev,
2930 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2932 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2937 /* Always ignore debug keys and require a new pairing if
2938 * the user wants to use them.
2940 if (key->type == HCI_LK_DEBUG_COMBINATION)
2943 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2944 key->type, key->pin_len, NULL);
2947 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2949 hci_dev_unlock(hdev);
2954 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2955 u8 addr_type, struct sock *skip_sk)
2957 struct mgmt_ev_device_unpaired ev;
2959 bacpy(&ev.addr.bdaddr, bdaddr);
2960 ev.addr.type = addr_type;
2962 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2966 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2968 struct mgmt_pending_cmd *cmd = data;
2969 struct mgmt_cp_unpair_device *cp = cmd->param;
2972 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2974 cmd->cmd_complete(cmd, err);
2975 mgmt_pending_free(cmd);
2978 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2980 struct mgmt_pending_cmd *cmd = data;
2981 struct mgmt_cp_unpair_device *cp = cmd->param;
2982 struct hci_conn *conn;
2984 if (cp->addr.type == BDADDR_BREDR)
2985 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2988 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2989 le_addr_type(cp->addr.type));
2994 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2997 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3000 struct mgmt_cp_unpair_device *cp = data;
3001 struct mgmt_rp_unpair_device rp;
3002 struct hci_conn_params *params;
3003 struct mgmt_pending_cmd *cmd;
3004 struct hci_conn *conn;
3008 memset(&rp, 0, sizeof(rp));
3009 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3010 rp.addr.type = cp->addr.type;
3012 if (!bdaddr_type_is_valid(cp->addr.type))
3013 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3014 MGMT_STATUS_INVALID_PARAMS,
3017 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3018 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3019 MGMT_STATUS_INVALID_PARAMS,
3024 if (!hdev_is_powered(hdev)) {
3025 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3026 MGMT_STATUS_NOT_POWERED, &rp,
3031 if (cp->addr.type == BDADDR_BREDR) {
3032 /* If disconnection is requested, then look up the
3033 * connection. If the remote device is connected, it
3034 * will be later used to terminate the link.
3036 * Setting it to NULL explicitly will cause no
3037 * termination of the link.
3040 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3045 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3047 err = mgmt_cmd_complete(sk, hdev->id,
3048 MGMT_OP_UNPAIR_DEVICE,
3049 MGMT_STATUS_NOT_PAIRED, &rp,
3057 /* LE address type */
3058 addr_type = le_addr_type(cp->addr.type);
3060 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3061 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3063 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3064 MGMT_STATUS_NOT_PAIRED, &rp,
3069 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3071 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3076 /* Defer clearing up the connection parameters until closing to
3077 * give a chance of keeping them if a repairing happens.
3079 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3081 /* Disable auto-connection parameters if present */
3082 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3084 if (params->explicit_connect)
3085 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3087 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3090 /* If disconnection is not requested, then clear the connection
3091 * variable so that the link is not terminated.
3093 if (!cp->disconnect)
3097 /* If the connection variable is set, then termination of the
3098 * link is requested.
3101 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3103 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3107 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3114 cmd->cmd_complete = addr_cmd_complete;
3116 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3117 unpair_device_complete);
3119 mgmt_pending_free(cmd);
3122 hci_dev_unlock(hdev);
3126 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3129 struct mgmt_cp_disconnect *cp = data;
3130 struct mgmt_rp_disconnect rp;
3131 struct mgmt_pending_cmd *cmd;
3132 struct hci_conn *conn;
3135 bt_dev_dbg(hdev, "sock %p", sk);
3137 memset(&rp, 0, sizeof(rp));
3138 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3139 rp.addr.type = cp->addr.type;
3141 if (!bdaddr_type_is_valid(cp->addr.type))
3142 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3143 MGMT_STATUS_INVALID_PARAMS,
3148 if (!test_bit(HCI_UP, &hdev->flags)) {
3149 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3150 MGMT_STATUS_NOT_POWERED, &rp,
3155 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3156 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3157 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3161 if (cp->addr.type == BDADDR_BREDR)
3162 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3165 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3166 le_addr_type(cp->addr.type));
3168 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3169 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3170 MGMT_STATUS_NOT_CONNECTED, &rp,
3175 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3181 cmd->cmd_complete = generic_cmd_complete;
3183 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3185 mgmt_pending_remove(cmd);
3188 hci_dev_unlock(hdev);
3192 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3194 switch (link_type) {
3196 switch (addr_type) {
3197 case ADDR_LE_DEV_PUBLIC:
3198 return BDADDR_LE_PUBLIC;
3201 /* Fallback to LE Random address type */
3202 return BDADDR_LE_RANDOM;
3206 /* Fallback to BR/EDR type */
3207 return BDADDR_BREDR;
3211 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3214 struct mgmt_rp_get_connections *rp;
3219 bt_dev_dbg(hdev, "sock %p", sk);
3223 if (!hdev_is_powered(hdev)) {
3224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3225 MGMT_STATUS_NOT_POWERED);
3230 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3231 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3235 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3242 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3243 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3245 bacpy(&rp->addr[i].bdaddr, &c->dst);
3246 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3247 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3252 rp->conn_count = cpu_to_le16(i);
3254 /* Recalculate length in case of filtered SCO connections, etc */
3255 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3256 struct_size(rp, addr, i));
3261 hci_dev_unlock(hdev);
3265 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3266 struct mgmt_cp_pin_code_neg_reply *cp)
3268 struct mgmt_pending_cmd *cmd;
3271 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3276 cmd->cmd_complete = addr_cmd_complete;
3278 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3279 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3281 mgmt_pending_remove(cmd);
3286 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3289 struct hci_conn *conn;
3290 struct mgmt_cp_pin_code_reply *cp = data;
3291 struct hci_cp_pin_code_reply reply;
3292 struct mgmt_pending_cmd *cmd;
3295 bt_dev_dbg(hdev, "sock %p", sk);
3299 if (!hdev_is_powered(hdev)) {
3300 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3301 MGMT_STATUS_NOT_POWERED);
3305 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3307 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3308 MGMT_STATUS_NOT_CONNECTED);
3312 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3313 struct mgmt_cp_pin_code_neg_reply ncp;
3315 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3317 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3319 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3321 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3322 MGMT_STATUS_INVALID_PARAMS);
3327 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3333 cmd->cmd_complete = addr_cmd_complete;
3335 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3336 reply.pin_len = cp->pin_len;
3337 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3339 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3341 mgmt_pending_remove(cmd);
3344 hci_dev_unlock(hdev);
3348 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3351 struct mgmt_cp_set_io_capability *cp = data;
3353 bt_dev_dbg(hdev, "sock %p", sk);
3355 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3357 MGMT_STATUS_INVALID_PARAMS);
3361 hdev->io_capability = cp->io_capability;
3363 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3365 hci_dev_unlock(hdev);
3367 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3371 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3373 struct hci_dev *hdev = conn->hdev;
3374 struct mgmt_pending_cmd *cmd;
3376 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3377 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3380 if (cmd->user_data != conn)
3389 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3391 struct mgmt_rp_pair_device rp;
3392 struct hci_conn *conn = cmd->user_data;
3395 bacpy(&rp.addr.bdaddr, &conn->dst);
3396 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3398 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3399 status, &rp, sizeof(rp));
3401 /* So we don't get further callbacks for this connection */
3402 conn->connect_cfm_cb = NULL;
3403 conn->security_cfm_cb = NULL;
3404 conn->disconn_cfm_cb = NULL;
3406 hci_conn_drop(conn);
3408 /* The device is paired so there is no need to remove
3409 * its connection parameters anymore.
3411 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3418 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3420 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3421 struct mgmt_pending_cmd *cmd;
3423 cmd = find_pairing(conn);
3425 cmd->cmd_complete(cmd, status);
3426 mgmt_pending_remove(cmd);
3430 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3432 struct mgmt_pending_cmd *cmd;
3434 BT_DBG("status %u", status);
3436 cmd = find_pairing(conn);
3438 BT_DBG("Unable to find a pending command");
3442 cmd->cmd_complete(cmd, mgmt_status(status));
3443 mgmt_pending_remove(cmd);
3446 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3448 struct mgmt_pending_cmd *cmd;
3450 BT_DBG("status %u", status);
3455 cmd = find_pairing(conn);
3457 BT_DBG("Unable to find a pending command");
3461 cmd->cmd_complete(cmd, mgmt_status(status));
3462 mgmt_pending_remove(cmd);
3465 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3468 struct mgmt_cp_pair_device *cp = data;
3469 struct mgmt_rp_pair_device rp;
3470 struct mgmt_pending_cmd *cmd;
3471 u8 sec_level, auth_type;
3472 struct hci_conn *conn;
3475 bt_dev_dbg(hdev, "sock %p", sk);
3477 memset(&rp, 0, sizeof(rp));
3478 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3479 rp.addr.type = cp->addr.type;
3481 if (!bdaddr_type_is_valid(cp->addr.type))
3482 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3483 MGMT_STATUS_INVALID_PARAMS,
3486 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3487 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3488 MGMT_STATUS_INVALID_PARAMS,
3493 if (!hdev_is_powered(hdev)) {
3494 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3495 MGMT_STATUS_NOT_POWERED, &rp,
3500 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3501 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3502 MGMT_STATUS_ALREADY_PAIRED, &rp,
3507 sec_level = BT_SECURITY_MEDIUM;
3508 auth_type = HCI_AT_DEDICATED_BONDING;
3510 if (cp->addr.type == BDADDR_BREDR) {
3511 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3512 auth_type, CONN_REASON_PAIR_DEVICE);
3514 u8 addr_type = le_addr_type(cp->addr.type);
3515 struct hci_conn_params *p;
3517 /* When pairing a new device, it is expected to remember
3518 * this device for future connections. Adding the connection
3519 * parameter information ahead of time allows tracking
3520 * of the peripheral preferred values and will speed up any
3521 * further connection establishment.
3523 * If connection parameters already exist, then they
3524 * will be kept and this function does nothing.
3526 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3528 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3529 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3531 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3532 sec_level, HCI_LE_CONN_TIMEOUT,
3533 CONN_REASON_PAIR_DEVICE);
3539 if (PTR_ERR(conn) == -EBUSY)
3540 status = MGMT_STATUS_BUSY;
3541 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3542 status = MGMT_STATUS_NOT_SUPPORTED;
3543 else if (PTR_ERR(conn) == -ECONNREFUSED)
3544 status = MGMT_STATUS_REJECTED;
3546 status = MGMT_STATUS_CONNECT_FAILED;
3548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3549 status, &rp, sizeof(rp));
3553 if (conn->connect_cfm_cb) {
3554 hci_conn_drop(conn);
3555 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3556 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3560 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3563 hci_conn_drop(conn);
3567 cmd->cmd_complete = pairing_complete;
3569 /* For LE, just connecting isn't a proof that the pairing finished */
3570 if (cp->addr.type == BDADDR_BREDR) {
3571 conn->connect_cfm_cb = pairing_complete_cb;
3572 conn->security_cfm_cb = pairing_complete_cb;
3573 conn->disconn_cfm_cb = pairing_complete_cb;
3575 conn->connect_cfm_cb = le_pairing_complete_cb;
3576 conn->security_cfm_cb = le_pairing_complete_cb;
3577 conn->disconn_cfm_cb = le_pairing_complete_cb;
3580 conn->io_capability = cp->io_cap;
3581 cmd->user_data = hci_conn_get(conn);
3583 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3584 hci_conn_security(conn, sec_level, auth_type, true)) {
3585 cmd->cmd_complete(cmd, 0);
3586 mgmt_pending_remove(cmd);
3592 hci_dev_unlock(hdev);
3596 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3599 struct mgmt_addr_info *addr = data;
3600 struct mgmt_pending_cmd *cmd;
3601 struct hci_conn *conn;
3604 bt_dev_dbg(hdev, "sock %p", sk);
3608 if (!hdev_is_powered(hdev)) {
3609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3610 MGMT_STATUS_NOT_POWERED);
3614 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3616 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3617 MGMT_STATUS_INVALID_PARAMS);
3621 conn = cmd->user_data;
3623 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3624 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3625 MGMT_STATUS_INVALID_PARAMS);
3629 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3630 mgmt_pending_remove(cmd);
3632 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3633 addr, sizeof(*addr));
3635 /* Since user doesn't want to proceed with the connection, abort any
3636 * ongoing pairing and then terminate the link if it was created
3637 * because of the pair device action.
3639 if (addr->type == BDADDR_BREDR)
3640 hci_remove_link_key(hdev, &addr->bdaddr);
3642 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3643 le_addr_type(addr->type));
3645 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3646 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3649 hci_dev_unlock(hdev);
3653 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3654 struct mgmt_addr_info *addr, u16 mgmt_op,
3655 u16 hci_op, __le32 passkey)
3657 struct mgmt_pending_cmd *cmd;
3658 struct hci_conn *conn;
3663 if (!hdev_is_powered(hdev)) {
3664 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3665 MGMT_STATUS_NOT_POWERED, addr,
3670 if (addr->type == BDADDR_BREDR)
3671 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3673 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3674 le_addr_type(addr->type));
3677 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3678 MGMT_STATUS_NOT_CONNECTED, addr,
3683 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3684 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3686 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3687 MGMT_STATUS_SUCCESS, addr,
3690 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3691 MGMT_STATUS_FAILED, addr,
3697 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3703 cmd->cmd_complete = addr_cmd_complete;
3705 /* Continue with pairing via HCI */
3706 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3707 struct hci_cp_user_passkey_reply cp;
3709 bacpy(&cp.bdaddr, &addr->bdaddr);
3710 cp.passkey = passkey;
3711 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3713 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3717 mgmt_pending_remove(cmd);
3720 hci_dev_unlock(hdev);
3724 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3725 void *data, u16 len)
3727 struct mgmt_cp_pin_code_neg_reply *cp = data;
3729 bt_dev_dbg(hdev, "sock %p", sk);
3731 return user_pairing_resp(sk, hdev, &cp->addr,
3732 MGMT_OP_PIN_CODE_NEG_REPLY,
3733 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3736 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3739 struct mgmt_cp_user_confirm_reply *cp = data;
3741 bt_dev_dbg(hdev, "sock %p", sk);
3743 if (len != sizeof(*cp))
3744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3745 MGMT_STATUS_INVALID_PARAMS);
3747 return user_pairing_resp(sk, hdev, &cp->addr,
3748 MGMT_OP_USER_CONFIRM_REPLY,
3749 HCI_OP_USER_CONFIRM_REPLY, 0);
3752 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3753 void *data, u16 len)
3755 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3757 bt_dev_dbg(hdev, "sock %p", sk);
3759 return user_pairing_resp(sk, hdev, &cp->addr,
3760 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3761 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3764 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3767 struct mgmt_cp_user_passkey_reply *cp = data;
3769 bt_dev_dbg(hdev, "sock %p", sk);
3771 return user_pairing_resp(sk, hdev, &cp->addr,
3772 MGMT_OP_USER_PASSKEY_REPLY,
3773 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3776 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3777 void *data, u16 len)
3779 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3781 bt_dev_dbg(hdev, "sock %p", sk);
3783 return user_pairing_resp(sk, hdev, &cp->addr,
3784 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3785 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3788 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3790 struct adv_info *adv_instance;
3792 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3796 /* stop if current instance doesn't need to be changed */
3797 if (!(adv_instance->flags & flags))
3800 cancel_adv_timeout(hdev);
3802 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3806 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3811 static int name_changed_sync(struct hci_dev *hdev, void *data)
3813 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3816 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3818 struct mgmt_pending_cmd *cmd = data;
3819 struct mgmt_cp_set_local_name *cp = cmd->param;
3820 u8 status = mgmt_status(err);
3822 bt_dev_dbg(hdev, "err %d", err);
3824 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3828 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3831 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3834 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3835 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3838 mgmt_pending_remove(cmd);
3841 static int set_name_sync(struct hci_dev *hdev, void *data)
3843 if (lmp_bredr_capable(hdev)) {
3844 hci_update_name_sync(hdev);
3845 hci_update_eir_sync(hdev);
3848 /* The name is stored in the scan response data and so
3849 * no need to update the advertising data here.
3851 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3852 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3857 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3860 struct mgmt_cp_set_local_name *cp = data;
3861 struct mgmt_pending_cmd *cmd;
3864 bt_dev_dbg(hdev, "sock %p", sk);
3868 /* If the old values are the same as the new ones just return a
3869 * direct command complete event.
3871 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3872 !memcmp(hdev->short_name, cp->short_name,
3873 sizeof(hdev->short_name))) {
3874 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3879 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3881 if (!hdev_is_powered(hdev)) {
3882 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3884 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3889 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3890 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3891 ext_info_changed(hdev, sk);
3896 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3900 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3904 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3905 MGMT_STATUS_FAILED);
3908 mgmt_pending_remove(cmd);
3913 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3916 hci_dev_unlock(hdev);
3920 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3922 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3925 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3928 struct mgmt_cp_set_appearance *cp = data;
3932 bt_dev_dbg(hdev, "sock %p", sk);
3934 if (!lmp_le_capable(hdev))
3935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3936 MGMT_STATUS_NOT_SUPPORTED);
3938 appearance = le16_to_cpu(cp->appearance);
3942 if (hdev->appearance != appearance) {
3943 hdev->appearance = appearance;
3945 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3946 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3949 ext_info_changed(hdev, sk);
3952 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3955 hci_dev_unlock(hdev);
3960 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3961 void *data, u16 len)
3963 struct mgmt_rp_get_phy_configuration rp;
3965 bt_dev_dbg(hdev, "sock %p", sk);
3969 memset(&rp, 0, sizeof(rp));
3971 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3972 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3973 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3975 hci_dev_unlock(hdev);
3977 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3981 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3983 struct mgmt_ev_phy_configuration_changed ev;
3985 memset(&ev, 0, sizeof(ev));
3987 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3989 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3993 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3995 struct mgmt_pending_cmd *cmd = data;
3996 struct sk_buff *skb = cmd->skb;
3997 u8 status = mgmt_status(err);
3999 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4004 status = MGMT_STATUS_FAILED;
4005 else if (IS_ERR(skb))
4006 status = mgmt_status(PTR_ERR(skb));
4008 status = mgmt_status(skb->data[0]);
4011 bt_dev_dbg(hdev, "status %d", status);
4014 mgmt_cmd_status(cmd->sk, hdev->id,
4015 MGMT_OP_SET_PHY_CONFIGURATION, status);
4017 mgmt_cmd_complete(cmd->sk, hdev->id,
4018 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4021 mgmt_phy_configuration_changed(hdev, cmd->sk);
4024 if (skb && !IS_ERR(skb))
4027 mgmt_pending_remove(cmd);
4030 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4032 struct mgmt_pending_cmd *cmd = data;
4033 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4034 struct hci_cp_le_set_default_phy cp_phy;
4035 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4037 memset(&cp_phy, 0, sizeof(cp_phy));
4039 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4040 cp_phy.all_phys |= 0x01;
4042 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4043 cp_phy.all_phys |= 0x02;
4045 if (selected_phys & MGMT_PHY_LE_1M_TX)
4046 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4048 if (selected_phys & MGMT_PHY_LE_2M_TX)
4049 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4051 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4052 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4054 if (selected_phys & MGMT_PHY_LE_1M_RX)
4055 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4057 if (selected_phys & MGMT_PHY_LE_2M_RX)
4058 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4060 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4061 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4063 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4064 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4069 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4070 void *data, u16 len)
4072 struct mgmt_cp_set_phy_configuration *cp = data;
4073 struct mgmt_pending_cmd *cmd;
4074 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4075 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4076 bool changed = false;
4079 bt_dev_dbg(hdev, "sock %p", sk);
4081 configurable_phys = get_configurable_phys(hdev);
4082 supported_phys = get_supported_phys(hdev);
4083 selected_phys = __le32_to_cpu(cp->selected_phys);
4085 if (selected_phys & ~supported_phys)
4086 return mgmt_cmd_status(sk, hdev->id,
4087 MGMT_OP_SET_PHY_CONFIGURATION,
4088 MGMT_STATUS_INVALID_PARAMS);
4090 unconfigure_phys = supported_phys & ~configurable_phys;
4092 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4093 return mgmt_cmd_status(sk, hdev->id,
4094 MGMT_OP_SET_PHY_CONFIGURATION,
4095 MGMT_STATUS_INVALID_PARAMS);
4097 if (selected_phys == get_selected_phys(hdev))
4098 return mgmt_cmd_complete(sk, hdev->id,
4099 MGMT_OP_SET_PHY_CONFIGURATION,
4104 if (!hdev_is_powered(hdev)) {
4105 err = mgmt_cmd_status(sk, hdev->id,
4106 MGMT_OP_SET_PHY_CONFIGURATION,
4107 MGMT_STATUS_REJECTED);
4111 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4112 err = mgmt_cmd_status(sk, hdev->id,
4113 MGMT_OP_SET_PHY_CONFIGURATION,
4118 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4119 pkt_type |= (HCI_DH3 | HCI_DM3);
4121 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4123 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4124 pkt_type |= (HCI_DH5 | HCI_DM5);
4126 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4128 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4129 pkt_type &= ~HCI_2DH1;
4131 pkt_type |= HCI_2DH1;
4133 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4134 pkt_type &= ~HCI_2DH3;
4136 pkt_type |= HCI_2DH3;
4138 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4139 pkt_type &= ~HCI_2DH5;
4141 pkt_type |= HCI_2DH5;
4143 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4144 pkt_type &= ~HCI_3DH1;
4146 pkt_type |= HCI_3DH1;
4148 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4149 pkt_type &= ~HCI_3DH3;
4151 pkt_type |= HCI_3DH3;
4153 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4154 pkt_type &= ~HCI_3DH5;
4156 pkt_type |= HCI_3DH5;
4158 if (pkt_type != hdev->pkt_type) {
4159 hdev->pkt_type = pkt_type;
4163 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4164 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4166 mgmt_phy_configuration_changed(hdev, sk);
4168 err = mgmt_cmd_complete(sk, hdev->id,
4169 MGMT_OP_SET_PHY_CONFIGURATION,
4175 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4180 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4181 set_default_phy_complete);
4184 err = mgmt_cmd_status(sk, hdev->id,
4185 MGMT_OP_SET_PHY_CONFIGURATION,
4186 MGMT_STATUS_FAILED);
4189 mgmt_pending_remove(cmd);
4193 hci_dev_unlock(hdev);
4198 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4201 int err = MGMT_STATUS_SUCCESS;
4202 struct mgmt_cp_set_blocked_keys *keys = data;
4203 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4204 sizeof(struct mgmt_blocked_key_info));
4205 u16 key_count, expected_len;
4208 bt_dev_dbg(hdev, "sock %p", sk);
4210 key_count = __le16_to_cpu(keys->key_count);
4211 if (key_count > max_key_count) {
4212 bt_dev_err(hdev, "too big key_count value %u", key_count);
4213 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4214 MGMT_STATUS_INVALID_PARAMS);
4217 expected_len = struct_size(keys, keys, key_count);
4218 if (expected_len != len) {
4219 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4221 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4222 MGMT_STATUS_INVALID_PARAMS);
4227 hci_blocked_keys_clear(hdev);
4229 for (i = 0; i < key_count; ++i) {
4230 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4233 err = MGMT_STATUS_NO_RESOURCES;
4237 b->type = keys->keys[i].type;
4238 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4239 list_add_rcu(&b->list, &hdev->blocked_keys);
4241 hci_dev_unlock(hdev);
4243 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4247 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4248 void *data, u16 len)
4250 struct mgmt_mode *cp = data;
4252 bool changed = false;
4254 bt_dev_dbg(hdev, "sock %p", sk);
4256 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4257 return mgmt_cmd_status(sk, hdev->id,
4258 MGMT_OP_SET_WIDEBAND_SPEECH,
4259 MGMT_STATUS_NOT_SUPPORTED);
4261 if (cp->val != 0x00 && cp->val != 0x01)
4262 return mgmt_cmd_status(sk, hdev->id,
4263 MGMT_OP_SET_WIDEBAND_SPEECH,
4264 MGMT_STATUS_INVALID_PARAMS);
4268 if (hdev_is_powered(hdev) &&
4269 !!cp->val != hci_dev_test_flag(hdev,
4270 HCI_WIDEBAND_SPEECH_ENABLED)) {
4271 err = mgmt_cmd_status(sk, hdev->id,
4272 MGMT_OP_SET_WIDEBAND_SPEECH,
4273 MGMT_STATUS_REJECTED);
4278 changed = !hci_dev_test_and_set_flag(hdev,
4279 HCI_WIDEBAND_SPEECH_ENABLED);
4281 changed = hci_dev_test_and_clear_flag(hdev,
4282 HCI_WIDEBAND_SPEECH_ENABLED);
4284 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4289 err = new_settings(hdev, sk);
4292 hci_dev_unlock(hdev);
4296 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4297 void *data, u16 data_len)
4300 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4303 u8 tx_power_range[2];
4305 bt_dev_dbg(hdev, "sock %p", sk);
4307 memset(&buf, 0, sizeof(buf));
4311 /* When the Read Simple Pairing Options command is supported, then
4312 * the remote public key validation is supported.
4314 * Alternatively, when Microsoft extensions are available, they can
4315 * indicate support for public key validation as well.
4317 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4318 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4320 flags |= 0x02; /* Remote public key validation (LE) */
4322 /* When the Read Encryption Key Size command is supported, then the
4323 * encryption key size is enforced.
4325 if (hdev->commands[20] & 0x10)
4326 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4328 flags |= 0x08; /* Encryption key size enforcement (LE) */
4330 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4333 /* When the Read Simple Pairing Options command is supported, then
4334 * also max encryption key size information is provided.
4336 if (hdev->commands[41] & 0x08)
4337 cap_len = eir_append_le16(rp->cap, cap_len,
4338 MGMT_CAP_MAX_ENC_KEY_SIZE,
4339 hdev->max_enc_key_size);
4341 cap_len = eir_append_le16(rp->cap, cap_len,
4342 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4343 SMP_MAX_ENC_KEY_SIZE);
4345 /* Append the min/max LE tx power parameters if we were able to fetch
4346 * it from the controller
4348 if (hdev->commands[38] & 0x80) {
4349 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4350 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4351 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4355 rp->cap_len = cpu_to_le16(cap_len);
4357 hci_dev_unlock(hdev);
4359 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4360 rp, sizeof(*rp) + cap_len);
4363 #ifdef CONFIG_BT_FEATURE_DEBUG
4364 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4365 static const u8 debug_uuid[16] = {
4366 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4367 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4371 /* 330859bc-7506-492d-9370-9a6f0614037f */
4372 static const u8 quality_report_uuid[16] = {
4373 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4374 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4377 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4378 static const u8 offload_codecs_uuid[16] = {
4379 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4380 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4383 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4384 static const u8 le_simultaneous_roles_uuid[16] = {
4385 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4386 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4389 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4390 static const u8 rpa_resolution_uuid[16] = {
4391 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4392 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4395 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4396 static const u8 iso_socket_uuid[16] = {
4397 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4398 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4401 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4402 static const u8 mgmt_mesh_uuid[16] = {
4403 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4404 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4407 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4408 void *data, u16 data_len)
4410 struct mgmt_rp_read_exp_features_info *rp;
4416 bt_dev_dbg(hdev, "sock %p", sk);
4418 /* Enough space for 7 features */
4419 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4420 rp = kzalloc(len, GFP_KERNEL);
4424 #ifdef CONFIG_BT_FEATURE_DEBUG
4426 flags = bt_dbg_get() ? BIT(0) : 0;
4428 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4429 rp->features[idx].flags = cpu_to_le32(flags);
4434 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4435 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4440 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4441 rp->features[idx].flags = cpu_to_le32(flags);
4445 if (hdev && ll_privacy_capable(hdev)) {
4446 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4447 flags = BIT(0) | BIT(1);
4451 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4452 rp->features[idx].flags = cpu_to_le32(flags);
4456 if (hdev && (aosp_has_quality_report(hdev) ||
4457 hdev->set_quality_report)) {
4458 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4463 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4464 rp->features[idx].flags = cpu_to_le32(flags);
4468 if (hdev && hdev->get_data_path_id) {
4469 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4474 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4475 rp->features[idx].flags = cpu_to_le32(flags);
4479 if (IS_ENABLED(CONFIG_BT_LE)) {
4480 flags = iso_enabled() ? BIT(0) : 0;
4481 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4482 rp->features[idx].flags = cpu_to_le32(flags);
4486 if (hdev && lmp_le_capable(hdev)) {
4487 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4492 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4493 rp->features[idx].flags = cpu_to_le32(flags);
4497 rp->feature_count = cpu_to_le16(idx);
4499 /* After reading the experimental features information, enable
4500 * the events to update client on any future change.
4502 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4504 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4505 MGMT_OP_READ_EXP_FEATURES_INFO,
4506 0, rp, sizeof(*rp) + (20 * idx));
4512 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4515 struct mgmt_ev_exp_feature_changed ev;
4517 memset(&ev, 0, sizeof(ev));
4518 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4519 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4521 // Do we need to be atomic with the conn_flags?
4522 if (enabled && privacy_mode_capable(hdev))
4523 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4525 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4527 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4529 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4533 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4534 bool enabled, struct sock *skip)
4536 struct mgmt_ev_exp_feature_changed ev;
4538 memset(&ev, 0, sizeof(ev));
4539 memcpy(ev.uuid, uuid, 16);
4540 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4542 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4544 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4547 #define EXP_FEAT(_uuid, _set_func) \
4550 .set_func = _set_func, \
4553 /* The zero key uuid is special. Multiple exp features are set through it. */
4554 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4555 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4557 struct mgmt_rp_set_exp_feature rp;
4559 memset(rp.uuid, 0, 16);
4560 rp.flags = cpu_to_le32(0);
4562 #ifdef CONFIG_BT_FEATURE_DEBUG
4564 bool changed = bt_dbg_get();
4569 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4573 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4576 changed = hci_dev_test_and_clear_flag(hdev,
4577 HCI_ENABLE_LL_PRIVACY);
4579 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4583 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4585 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4586 MGMT_OP_SET_EXP_FEATURE, 0,
4590 #ifdef CONFIG_BT_FEATURE_DEBUG
4591 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4592 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4594 struct mgmt_rp_set_exp_feature rp;
4599 /* Command requires to use the non-controller index */
4601 return mgmt_cmd_status(sk, hdev->id,
4602 MGMT_OP_SET_EXP_FEATURE,
4603 MGMT_STATUS_INVALID_INDEX);
4605 /* Parameters are limited to a single octet */
4606 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4607 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4608 MGMT_OP_SET_EXP_FEATURE,
4609 MGMT_STATUS_INVALID_PARAMS);
4611 /* Only boolean on/off is supported */
4612 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4613 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4614 MGMT_OP_SET_EXP_FEATURE,
4615 MGMT_STATUS_INVALID_PARAMS);
4617 val = !!cp->param[0];
4618 changed = val ? !bt_dbg_get() : bt_dbg_get();
4621 memcpy(rp.uuid, debug_uuid, 16);
4622 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4624 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4626 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4627 MGMT_OP_SET_EXP_FEATURE, 0,
4631 exp_feature_changed(hdev, debug_uuid, val, sk);
4637 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4638 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4640 struct mgmt_rp_set_exp_feature rp;
4644 /* Command requires to use the controller index */
4646 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4647 MGMT_OP_SET_EXP_FEATURE,
4648 MGMT_STATUS_INVALID_INDEX);
4650 /* Parameters are limited to a single octet */
4651 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4652 return mgmt_cmd_status(sk, hdev->id,
4653 MGMT_OP_SET_EXP_FEATURE,
4654 MGMT_STATUS_INVALID_PARAMS);
4656 /* Only boolean on/off is supported */
4657 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4658 return mgmt_cmd_status(sk, hdev->id,
4659 MGMT_OP_SET_EXP_FEATURE,
4660 MGMT_STATUS_INVALID_PARAMS);
4662 val = !!cp->param[0];
4665 changed = !hci_dev_test_and_set_flag(hdev,
4666 HCI_MESH_EXPERIMENTAL);
4668 hci_dev_clear_flag(hdev, HCI_MESH);
4669 changed = hci_dev_test_and_clear_flag(hdev,
4670 HCI_MESH_EXPERIMENTAL);
4673 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4674 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4676 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4678 err = mgmt_cmd_complete(sk, hdev->id,
4679 MGMT_OP_SET_EXP_FEATURE, 0,
4683 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4688 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4689 struct mgmt_cp_set_exp_feature *cp,
4692 struct mgmt_rp_set_exp_feature rp;
4697 /* Command requires to use the controller index */
4699 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4700 MGMT_OP_SET_EXP_FEATURE,
4701 MGMT_STATUS_INVALID_INDEX);
4703 /* Changes can only be made when controller is powered down */
4704 if (hdev_is_powered(hdev))
4705 return mgmt_cmd_status(sk, hdev->id,
4706 MGMT_OP_SET_EXP_FEATURE,
4707 MGMT_STATUS_REJECTED);
4709 /* Parameters are limited to a single octet */
4710 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4711 return mgmt_cmd_status(sk, hdev->id,
4712 MGMT_OP_SET_EXP_FEATURE,
4713 MGMT_STATUS_INVALID_PARAMS);
4715 /* Only boolean on/off is supported */
4716 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4717 return mgmt_cmd_status(sk, hdev->id,
4718 MGMT_OP_SET_EXP_FEATURE,
4719 MGMT_STATUS_INVALID_PARAMS);
4721 val = !!cp->param[0];
4724 changed = !hci_dev_test_and_set_flag(hdev,
4725 HCI_ENABLE_LL_PRIVACY);
4726 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4728 /* Enable LL privacy + supported settings changed */
4729 flags = BIT(0) | BIT(1);
4731 changed = hci_dev_test_and_clear_flag(hdev,
4732 HCI_ENABLE_LL_PRIVACY);
4734 /* Disable LL privacy + supported settings changed */
4738 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4739 rp.flags = cpu_to_le32(flags);
4741 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4743 err = mgmt_cmd_complete(sk, hdev->id,
4744 MGMT_OP_SET_EXP_FEATURE, 0,
4748 exp_ll_privacy_feature_changed(val, hdev, sk);
4753 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4754 struct mgmt_cp_set_exp_feature *cp,
4757 struct mgmt_rp_set_exp_feature rp;
4761 /* Command requires to use a valid controller index */
4763 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4764 MGMT_OP_SET_EXP_FEATURE,
4765 MGMT_STATUS_INVALID_INDEX);
4767 /* Parameters are limited to a single octet */
4768 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4769 return mgmt_cmd_status(sk, hdev->id,
4770 MGMT_OP_SET_EXP_FEATURE,
4771 MGMT_STATUS_INVALID_PARAMS);
4773 /* Only boolean on/off is supported */
4774 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4775 return mgmt_cmd_status(sk, hdev->id,
4776 MGMT_OP_SET_EXP_FEATURE,
4777 MGMT_STATUS_INVALID_PARAMS);
4779 hci_req_sync_lock(hdev);
4781 val = !!cp->param[0];
4782 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4784 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4785 err = mgmt_cmd_status(sk, hdev->id,
4786 MGMT_OP_SET_EXP_FEATURE,
4787 MGMT_STATUS_NOT_SUPPORTED);
4788 goto unlock_quality_report;
4792 if (hdev->set_quality_report)
4793 err = hdev->set_quality_report(hdev, val);
4795 err = aosp_set_quality_report(hdev, val);
4798 err = mgmt_cmd_status(sk, hdev->id,
4799 MGMT_OP_SET_EXP_FEATURE,
4800 MGMT_STATUS_FAILED);
4801 goto unlock_quality_report;
4805 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4807 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4810 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4812 memcpy(rp.uuid, quality_report_uuid, 16);
4813 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4814 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4816 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4820 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4822 unlock_quality_report:
4823 hci_req_sync_unlock(hdev);
4827 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4828 struct mgmt_cp_set_exp_feature *cp,
4833 struct mgmt_rp_set_exp_feature rp;
4835 /* Command requires to use a valid controller index */
4837 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4838 MGMT_OP_SET_EXP_FEATURE,
4839 MGMT_STATUS_INVALID_INDEX);
4841 /* Parameters are limited to a single octet */
4842 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4843 return mgmt_cmd_status(sk, hdev->id,
4844 MGMT_OP_SET_EXP_FEATURE,
4845 MGMT_STATUS_INVALID_PARAMS);
4847 /* Only boolean on/off is supported */
4848 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4849 return mgmt_cmd_status(sk, hdev->id,
4850 MGMT_OP_SET_EXP_FEATURE,
4851 MGMT_STATUS_INVALID_PARAMS);
4853 val = !!cp->param[0];
4854 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4856 if (!hdev->get_data_path_id) {
4857 return mgmt_cmd_status(sk, hdev->id,
4858 MGMT_OP_SET_EXP_FEATURE,
4859 MGMT_STATUS_NOT_SUPPORTED);
4864 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4866 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4869 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4872 memcpy(rp.uuid, offload_codecs_uuid, 16);
4873 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4874 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4875 err = mgmt_cmd_complete(sk, hdev->id,
4876 MGMT_OP_SET_EXP_FEATURE, 0,
4880 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4885 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4886 struct mgmt_cp_set_exp_feature *cp,
4891 struct mgmt_rp_set_exp_feature rp;
4893 /* Command requires to use a valid controller index */
4895 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4896 MGMT_OP_SET_EXP_FEATURE,
4897 MGMT_STATUS_INVALID_INDEX);
4899 /* Parameters are limited to a single octet */
4900 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4901 return mgmt_cmd_status(sk, hdev->id,
4902 MGMT_OP_SET_EXP_FEATURE,
4903 MGMT_STATUS_INVALID_PARAMS);
4905 /* Only boolean on/off is supported */
4906 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4907 return mgmt_cmd_status(sk, hdev->id,
4908 MGMT_OP_SET_EXP_FEATURE,
4909 MGMT_STATUS_INVALID_PARAMS);
4911 val = !!cp->param[0];
4912 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4914 if (!hci_dev_le_state_simultaneous(hdev)) {
4915 return mgmt_cmd_status(sk, hdev->id,
4916 MGMT_OP_SET_EXP_FEATURE,
4917 MGMT_STATUS_NOT_SUPPORTED);
4922 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4924 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4927 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4930 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4931 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4932 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4933 err = mgmt_cmd_complete(sk, hdev->id,
4934 MGMT_OP_SET_EXP_FEATURE, 0,
4938 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4944 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4945 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4947 struct mgmt_rp_set_exp_feature rp;
4948 bool val, changed = false;
4951 /* Command requires to use the non-controller index */
4953 return mgmt_cmd_status(sk, hdev->id,
4954 MGMT_OP_SET_EXP_FEATURE,
4955 MGMT_STATUS_INVALID_INDEX);
4957 /* Parameters are limited to a single octet */
4958 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4959 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4960 MGMT_OP_SET_EXP_FEATURE,
4961 MGMT_STATUS_INVALID_PARAMS);
4963 /* Only boolean on/off is supported */
4964 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4965 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4966 MGMT_OP_SET_EXP_FEATURE,
4967 MGMT_STATUS_INVALID_PARAMS);
4969 val = cp->param[0] ? true : false;
4978 memcpy(rp.uuid, iso_socket_uuid, 16);
4979 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4981 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4983 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4984 MGMT_OP_SET_EXP_FEATURE, 0,
4988 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4994 static const struct mgmt_exp_feature {
4996 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4997 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4998 } exp_features[] = {
4999 EXP_FEAT(ZERO_KEY, set_zero_key_func),
5000 #ifdef CONFIG_BT_FEATURE_DEBUG
5001 EXP_FEAT(debug_uuid, set_debug_func),
5003 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5004 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5005 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5006 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5007 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5009 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5012 /* end with a null feature */
5013 EXP_FEAT(NULL, NULL)
5016 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5017 void *data, u16 data_len)
5019 struct mgmt_cp_set_exp_feature *cp = data;
5022 bt_dev_dbg(hdev, "sock %p", sk);
5024 for (i = 0; exp_features[i].uuid; i++) {
5025 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5026 return exp_features[i].set_func(sk, hdev, cp, data_len);
5029 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5030 MGMT_OP_SET_EXP_FEATURE,
5031 MGMT_STATUS_NOT_SUPPORTED);
5034 static u32 get_params_flags(struct hci_dev *hdev,
5035 struct hci_conn_params *params)
5037 u32 flags = hdev->conn_flags;
5039 /* Devices using RPAs can only be programmed in the acceptlist if
5040 * LL Privacy has been enable otherwise they cannot mark
5041 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5043 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5044 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5045 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5050 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5053 struct mgmt_cp_get_device_flags *cp = data;
5054 struct mgmt_rp_get_device_flags rp;
5055 struct bdaddr_list_with_flags *br_params;
5056 struct hci_conn_params *params;
5057 u32 supported_flags;
5058 u32 current_flags = 0;
5059 u8 status = MGMT_STATUS_INVALID_PARAMS;
5061 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5062 &cp->addr.bdaddr, cp->addr.type);
5066 supported_flags = hdev->conn_flags;
5068 memset(&rp, 0, sizeof(rp));
5070 if (cp->addr.type == BDADDR_BREDR) {
5071 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5077 current_flags = br_params->flags;
5079 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5080 le_addr_type(cp->addr.type));
5084 supported_flags = get_params_flags(hdev, params);
5085 current_flags = params->flags;
5088 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5089 rp.addr.type = cp->addr.type;
5090 rp.supported_flags = cpu_to_le32(supported_flags);
5091 rp.current_flags = cpu_to_le32(current_flags);
5093 status = MGMT_STATUS_SUCCESS;
5096 hci_dev_unlock(hdev);
5098 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5102 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5103 bdaddr_t *bdaddr, u8 bdaddr_type,
5104 u32 supported_flags, u32 current_flags)
5106 struct mgmt_ev_device_flags_changed ev;
5108 bacpy(&ev.addr.bdaddr, bdaddr);
5109 ev.addr.type = bdaddr_type;
5110 ev.supported_flags = cpu_to_le32(supported_flags);
5111 ev.current_flags = cpu_to_le32(current_flags);
5113 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5116 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5119 struct mgmt_cp_set_device_flags *cp = data;
5120 struct bdaddr_list_with_flags *br_params;
5121 struct hci_conn_params *params;
5122 u8 status = MGMT_STATUS_INVALID_PARAMS;
5123 u32 supported_flags;
5124 u32 current_flags = __le32_to_cpu(cp->current_flags);
5126 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5127 &cp->addr.bdaddr, cp->addr.type, current_flags);
5129 // We should take hci_dev_lock() early, I think.. conn_flags can change
5130 supported_flags = hdev->conn_flags;
5132 if ((supported_flags | current_flags) != supported_flags) {
5133 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5134 current_flags, supported_flags);
5140 if (cp->addr.type == BDADDR_BREDR) {
5141 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5146 br_params->flags = current_flags;
5147 status = MGMT_STATUS_SUCCESS;
5149 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5150 &cp->addr.bdaddr, cp->addr.type);
5156 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5157 le_addr_type(cp->addr.type));
5159 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5160 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5164 supported_flags = get_params_flags(hdev, params);
5166 if ((supported_flags | current_flags) != supported_flags) {
5167 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5168 current_flags, supported_flags);
5172 WRITE_ONCE(params->flags, current_flags);
5173 status = MGMT_STATUS_SUCCESS;
5175 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5178 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5179 hci_update_passive_scan(hdev);
5182 hci_dev_unlock(hdev);
5185 if (status == MGMT_STATUS_SUCCESS)
5186 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5187 supported_flags, current_flags);
5189 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5190 &cp->addr, sizeof(cp->addr));
5193 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5196 struct mgmt_ev_adv_monitor_added ev;
5198 ev.monitor_handle = cpu_to_le16(handle);
5200 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5203 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5205 struct mgmt_ev_adv_monitor_removed ev;
5206 struct mgmt_pending_cmd *cmd;
5207 struct sock *sk_skip = NULL;
5208 struct mgmt_cp_remove_adv_monitor *cp;
5210 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5214 if (cp->monitor_handle)
5218 ev.monitor_handle = cpu_to_le16(handle);
5220 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5223 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5224 void *data, u16 len)
5226 struct adv_monitor *monitor = NULL;
5227 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5230 __u32 supported = 0;
5232 __u16 num_handles = 0;
5233 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5235 BT_DBG("request for %s", hdev->name);
5239 if (msft_monitor_supported(hdev))
5240 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5242 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5243 handles[num_handles++] = monitor->handle;
5245 hci_dev_unlock(hdev);
5247 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5248 rp = kmalloc(rp_size, GFP_KERNEL);
5252 /* All supported features are currently enabled */
5253 enabled = supported;
5255 rp->supported_features = cpu_to_le32(supported);
5256 rp->enabled_features = cpu_to_le32(enabled);
5257 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5258 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5259 rp->num_handles = cpu_to_le16(num_handles);
5261 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5263 err = mgmt_cmd_complete(sk, hdev->id,
5264 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5265 MGMT_STATUS_SUCCESS, rp, rp_size);
5272 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5273 void *data, int status)
5275 struct mgmt_rp_add_adv_patterns_monitor rp;
5276 struct mgmt_pending_cmd *cmd = data;
5277 struct adv_monitor *monitor = cmd->user_data;
5281 rp.monitor_handle = cpu_to_le16(monitor->handle);
5284 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5285 hdev->adv_monitors_cnt++;
5286 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5287 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5288 hci_update_passive_scan(hdev);
5291 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5292 mgmt_status(status), &rp, sizeof(rp));
5293 mgmt_pending_remove(cmd);
5295 hci_dev_unlock(hdev);
5296 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5297 rp.monitor_handle, status);
5300 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5302 struct mgmt_pending_cmd *cmd = data;
5303 struct adv_monitor *monitor = cmd->user_data;
5305 return hci_add_adv_monitor(hdev, monitor);
5308 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5309 struct adv_monitor *m, u8 status,
5310 void *data, u16 len, u16 op)
5312 struct mgmt_pending_cmd *cmd;
5320 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5321 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5322 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5323 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5324 status = MGMT_STATUS_BUSY;
5328 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5330 status = MGMT_STATUS_NO_RESOURCES;
5335 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5336 mgmt_add_adv_patterns_monitor_complete);
5339 status = MGMT_STATUS_NO_RESOURCES;
5341 status = MGMT_STATUS_FAILED;
5346 hci_dev_unlock(hdev);
5351 hci_free_adv_monitor(hdev, m);
5352 hci_dev_unlock(hdev);
5353 return mgmt_cmd_status(sk, hdev->id, op, status);
5356 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5357 struct mgmt_adv_rssi_thresholds *rssi)
5360 m->rssi.low_threshold = rssi->low_threshold;
5361 m->rssi.low_threshold_timeout =
5362 __le16_to_cpu(rssi->low_threshold_timeout);
5363 m->rssi.high_threshold = rssi->high_threshold;
5364 m->rssi.high_threshold_timeout =
5365 __le16_to_cpu(rssi->high_threshold_timeout);
5366 m->rssi.sampling_period = rssi->sampling_period;
5368 /* Default values. These numbers are the least constricting
5369 * parameters for MSFT API to work, so it behaves as if there
5370 * are no rssi parameter to consider. May need to be changed
5371 * if other API are to be supported.
5373 m->rssi.low_threshold = -127;
5374 m->rssi.low_threshold_timeout = 60;
5375 m->rssi.high_threshold = -127;
5376 m->rssi.high_threshold_timeout = 0;
5377 m->rssi.sampling_period = 0;
5381 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5382 struct mgmt_adv_pattern *patterns)
5384 u8 offset = 0, length = 0;
5385 struct adv_pattern *p = NULL;
5388 for (i = 0; i < pattern_count; i++) {
5389 offset = patterns[i].offset;
5390 length = patterns[i].length;
5391 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5392 length > HCI_MAX_EXT_AD_LENGTH ||
5393 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5394 return MGMT_STATUS_INVALID_PARAMS;
5396 p = kmalloc(sizeof(*p), GFP_KERNEL);
5398 return MGMT_STATUS_NO_RESOURCES;
5400 p->ad_type = patterns[i].ad_type;
5401 p->offset = patterns[i].offset;
5402 p->length = patterns[i].length;
5403 memcpy(p->value, patterns[i].value, p->length);
5405 INIT_LIST_HEAD(&p->list);
5406 list_add(&p->list, &m->patterns);
5409 return MGMT_STATUS_SUCCESS;
5412 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5413 void *data, u16 len)
5415 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5416 struct adv_monitor *m = NULL;
5417 u8 status = MGMT_STATUS_SUCCESS;
5418 size_t expected_size = sizeof(*cp);
5420 BT_DBG("request for %s", hdev->name);
5422 if (len <= sizeof(*cp)) {
5423 status = MGMT_STATUS_INVALID_PARAMS;
5427 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5428 if (len != expected_size) {
5429 status = MGMT_STATUS_INVALID_PARAMS;
5433 m = kzalloc(sizeof(*m), GFP_KERNEL);
5435 status = MGMT_STATUS_NO_RESOURCES;
5439 INIT_LIST_HEAD(&m->patterns);
5441 parse_adv_monitor_rssi(m, NULL);
5442 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5445 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5446 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5449 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5450 void *data, u16 len)
5452 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5453 struct adv_monitor *m = NULL;
5454 u8 status = MGMT_STATUS_SUCCESS;
5455 size_t expected_size = sizeof(*cp);
5457 BT_DBG("request for %s", hdev->name);
5459 if (len <= sizeof(*cp)) {
5460 status = MGMT_STATUS_INVALID_PARAMS;
5464 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5465 if (len != expected_size) {
5466 status = MGMT_STATUS_INVALID_PARAMS;
5470 m = kzalloc(sizeof(*m), GFP_KERNEL);
5472 status = MGMT_STATUS_NO_RESOURCES;
5476 INIT_LIST_HEAD(&m->patterns);
5478 parse_adv_monitor_rssi(m, &cp->rssi);
5479 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5482 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5483 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5486 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5487 void *data, int status)
5489 struct mgmt_rp_remove_adv_monitor rp;
5490 struct mgmt_pending_cmd *cmd = data;
5491 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5495 rp.monitor_handle = cp->monitor_handle;
5498 hci_update_passive_scan(hdev);
5500 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5501 mgmt_status(status), &rp, sizeof(rp));
5502 mgmt_pending_remove(cmd);
5504 hci_dev_unlock(hdev);
5505 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5506 rp.monitor_handle, status);
5509 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5511 struct mgmt_pending_cmd *cmd = data;
5512 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5513 u16 handle = __le16_to_cpu(cp->monitor_handle);
5516 return hci_remove_all_adv_monitor(hdev);
5518 return hci_remove_single_adv_monitor(hdev, handle);
5521 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5522 void *data, u16 len)
5524 struct mgmt_pending_cmd *cmd;
5529 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5530 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5531 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5532 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5533 status = MGMT_STATUS_BUSY;
5537 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5539 status = MGMT_STATUS_NO_RESOURCES;
5543 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5544 mgmt_remove_adv_monitor_complete);
5547 mgmt_pending_remove(cmd);
5550 status = MGMT_STATUS_NO_RESOURCES;
5552 status = MGMT_STATUS_FAILED;
5557 hci_dev_unlock(hdev);
5562 hci_dev_unlock(hdev);
5563 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5567 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5569 struct mgmt_rp_read_local_oob_data mgmt_rp;
5570 size_t rp_size = sizeof(mgmt_rp);
5571 struct mgmt_pending_cmd *cmd = data;
5572 struct sk_buff *skb = cmd->skb;
5573 u8 status = mgmt_status(err);
5577 status = MGMT_STATUS_FAILED;
5578 else if (IS_ERR(skb))
5579 status = mgmt_status(PTR_ERR(skb));
5581 status = mgmt_status(skb->data[0]);
5584 bt_dev_dbg(hdev, "status %d", status);
5587 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5591 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5593 if (!bredr_sc_enabled(hdev)) {
5594 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5596 if (skb->len < sizeof(*rp)) {
5597 mgmt_cmd_status(cmd->sk, hdev->id,
5598 MGMT_OP_READ_LOCAL_OOB_DATA,
5599 MGMT_STATUS_FAILED);
5603 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5604 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5606 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5608 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5610 if (skb->len < sizeof(*rp)) {
5611 mgmt_cmd_status(cmd->sk, hdev->id,
5612 MGMT_OP_READ_LOCAL_OOB_DATA,
5613 MGMT_STATUS_FAILED);
5617 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5618 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5620 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5621 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5624 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5625 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5628 if (skb && !IS_ERR(skb))
5631 mgmt_pending_free(cmd);
5634 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5636 struct mgmt_pending_cmd *cmd = data;
5638 if (bredr_sc_enabled(hdev))
5639 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5641 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5643 if (IS_ERR(cmd->skb))
5644 return PTR_ERR(cmd->skb);
5649 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5650 void *data, u16 data_len)
5652 struct mgmt_pending_cmd *cmd;
5655 bt_dev_dbg(hdev, "sock %p", sk);
5659 if (!hdev_is_powered(hdev)) {
5660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5661 MGMT_STATUS_NOT_POWERED);
5665 if (!lmp_ssp_capable(hdev)) {
5666 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5667 MGMT_STATUS_NOT_SUPPORTED);
5671 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5675 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5676 read_local_oob_data_complete);
5679 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5680 MGMT_STATUS_FAILED);
5683 mgmt_pending_free(cmd);
5687 hci_dev_unlock(hdev);
5691 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5692 void *data, u16 len)
5694 struct mgmt_addr_info *addr = data;
5697 bt_dev_dbg(hdev, "sock %p", sk);
5699 if (!bdaddr_type_is_valid(addr->type))
5700 return mgmt_cmd_complete(sk, hdev->id,
5701 MGMT_OP_ADD_REMOTE_OOB_DATA,
5702 MGMT_STATUS_INVALID_PARAMS,
5703 addr, sizeof(*addr));
5707 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5708 struct mgmt_cp_add_remote_oob_data *cp = data;
5711 if (cp->addr.type != BDADDR_BREDR) {
5712 err = mgmt_cmd_complete(sk, hdev->id,
5713 MGMT_OP_ADD_REMOTE_OOB_DATA,
5714 MGMT_STATUS_INVALID_PARAMS,
5715 &cp->addr, sizeof(cp->addr));
5719 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5720 cp->addr.type, cp->hash,
5721 cp->rand, NULL, NULL);
5723 status = MGMT_STATUS_FAILED;
5725 status = MGMT_STATUS_SUCCESS;
5727 err = mgmt_cmd_complete(sk, hdev->id,
5728 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5729 &cp->addr, sizeof(cp->addr));
5730 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5731 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5732 u8 *rand192, *hash192, *rand256, *hash256;
5735 if (bdaddr_type_is_le(cp->addr.type)) {
5736 /* Enforce zero-valued 192-bit parameters as
5737 * long as legacy SMP OOB isn't implemented.
5739 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5740 memcmp(cp->hash192, ZERO_KEY, 16)) {
5741 err = mgmt_cmd_complete(sk, hdev->id,
5742 MGMT_OP_ADD_REMOTE_OOB_DATA,
5743 MGMT_STATUS_INVALID_PARAMS,
5744 addr, sizeof(*addr));
5751 /* In case one of the P-192 values is set to zero,
5752 * then just disable OOB data for P-192.
5754 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5755 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5759 rand192 = cp->rand192;
5760 hash192 = cp->hash192;
5764 /* In case one of the P-256 values is set to zero, then just
5765 * disable OOB data for P-256.
5767 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5768 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5772 rand256 = cp->rand256;
5773 hash256 = cp->hash256;
5776 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5777 cp->addr.type, hash192, rand192,
5780 status = MGMT_STATUS_FAILED;
5782 status = MGMT_STATUS_SUCCESS;
5784 err = mgmt_cmd_complete(sk, hdev->id,
5785 MGMT_OP_ADD_REMOTE_OOB_DATA,
5786 status, &cp->addr, sizeof(cp->addr));
5788 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5790 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5791 MGMT_STATUS_INVALID_PARAMS);
5795 hci_dev_unlock(hdev);
5799 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5800 void *data, u16 len)
5802 struct mgmt_cp_remove_remote_oob_data *cp = data;
5806 bt_dev_dbg(hdev, "sock %p", sk);
5808 if (cp->addr.type != BDADDR_BREDR)
5809 return mgmt_cmd_complete(sk, hdev->id,
5810 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5811 MGMT_STATUS_INVALID_PARAMS,
5812 &cp->addr, sizeof(cp->addr));
5816 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5817 hci_remote_oob_data_clear(hdev);
5818 status = MGMT_STATUS_SUCCESS;
5822 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5824 status = MGMT_STATUS_INVALID_PARAMS;
5826 status = MGMT_STATUS_SUCCESS;
5829 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5830 status, &cp->addr, sizeof(cp->addr));
5832 hci_dev_unlock(hdev);
5836 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5838 struct mgmt_pending_cmd *cmd;
5840 bt_dev_dbg(hdev, "status %u", status);
5844 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5846 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5849 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5852 cmd->cmd_complete(cmd, mgmt_status(status));
5853 mgmt_pending_remove(cmd);
5856 hci_dev_unlock(hdev);
5859 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5860 uint8_t *mgmt_status)
5863 case DISCOV_TYPE_LE:
5864 *mgmt_status = mgmt_le_support(hdev);
5868 case DISCOV_TYPE_INTERLEAVED:
5869 *mgmt_status = mgmt_le_support(hdev);
5873 case DISCOV_TYPE_BREDR:
5874 *mgmt_status = mgmt_bredr_support(hdev);
5879 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5886 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5888 struct mgmt_pending_cmd *cmd = data;
5890 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5891 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5892 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5895 bt_dev_dbg(hdev, "err %d", err);
5897 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5899 mgmt_pending_remove(cmd);
5901 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5905 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5907 return hci_start_discovery_sync(hdev);
5910 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5911 u16 op, void *data, u16 len)
5913 struct mgmt_cp_start_discovery *cp = data;
5914 struct mgmt_pending_cmd *cmd;
5918 bt_dev_dbg(hdev, "sock %p", sk);
5922 if (!hdev_is_powered(hdev)) {
5923 err = mgmt_cmd_complete(sk, hdev->id, op,
5924 MGMT_STATUS_NOT_POWERED,
5925 &cp->type, sizeof(cp->type));
5929 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5930 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5931 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5932 &cp->type, sizeof(cp->type));
5936 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5937 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5938 &cp->type, sizeof(cp->type));
5942 /* Can't start discovery when it is paused */
5943 if (hdev->discovery_paused) {
5944 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5945 &cp->type, sizeof(cp->type));
5949 /* Clear the discovery filter first to free any previously
5950 * allocated memory for the UUID list.
5952 hci_discovery_filter_clear(hdev);
5954 hdev->discovery.type = cp->type;
5955 hdev->discovery.report_invalid_rssi = false;
5956 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5957 hdev->discovery.limited = true;
5959 hdev->discovery.limited = false;
5961 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5967 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5968 start_discovery_complete);
5970 mgmt_pending_remove(cmd);
5974 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5977 hci_dev_unlock(hdev);
5981 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5982 void *data, u16 len)
5984 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5988 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5989 void *data, u16 len)
5991 return start_discovery_internal(sk, hdev,
5992 MGMT_OP_START_LIMITED_DISCOVERY,
5996 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5997 void *data, u16 len)
5999 struct mgmt_cp_start_service_discovery *cp = data;
6000 struct mgmt_pending_cmd *cmd;
6001 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6002 u16 uuid_count, expected_len;
6006 bt_dev_dbg(hdev, "sock %p", sk);
6010 if (!hdev_is_powered(hdev)) {
6011 err = mgmt_cmd_complete(sk, hdev->id,
6012 MGMT_OP_START_SERVICE_DISCOVERY,
6013 MGMT_STATUS_NOT_POWERED,
6014 &cp->type, sizeof(cp->type));
6018 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6019 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6020 err = mgmt_cmd_complete(sk, hdev->id,
6021 MGMT_OP_START_SERVICE_DISCOVERY,
6022 MGMT_STATUS_BUSY, &cp->type,
6027 if (hdev->discovery_paused) {
6028 err = mgmt_cmd_complete(sk, hdev->id,
6029 MGMT_OP_START_SERVICE_DISCOVERY,
6030 MGMT_STATUS_BUSY, &cp->type,
6035 uuid_count = __le16_to_cpu(cp->uuid_count);
6036 if (uuid_count > max_uuid_count) {
6037 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6039 err = mgmt_cmd_complete(sk, hdev->id,
6040 MGMT_OP_START_SERVICE_DISCOVERY,
6041 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6046 expected_len = sizeof(*cp) + uuid_count * 16;
6047 if (expected_len != len) {
6048 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6050 err = mgmt_cmd_complete(sk, hdev->id,
6051 MGMT_OP_START_SERVICE_DISCOVERY,
6052 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6057 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6058 err = mgmt_cmd_complete(sk, hdev->id,
6059 MGMT_OP_START_SERVICE_DISCOVERY,
6060 status, &cp->type, sizeof(cp->type));
6064 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6071 /* Clear the discovery filter first to free any previously
6072 * allocated memory for the UUID list.
6074 hci_discovery_filter_clear(hdev);
6076 hdev->discovery.result_filtering = true;
6077 hdev->discovery.type = cp->type;
6078 hdev->discovery.rssi = cp->rssi;
6079 hdev->discovery.uuid_count = uuid_count;
6081 if (uuid_count > 0) {
6082 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6084 if (!hdev->discovery.uuids) {
6085 err = mgmt_cmd_complete(sk, hdev->id,
6086 MGMT_OP_START_SERVICE_DISCOVERY,
6088 &cp->type, sizeof(cp->type));
6089 mgmt_pending_remove(cmd);
6094 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6095 start_discovery_complete);
6097 mgmt_pending_remove(cmd);
6101 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6104 hci_dev_unlock(hdev);
6108 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6110 struct mgmt_pending_cmd *cmd;
6112 bt_dev_dbg(hdev, "status %u", status);
6116 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6118 cmd->cmd_complete(cmd, mgmt_status(status));
6119 mgmt_pending_remove(cmd);
6122 hci_dev_unlock(hdev);
6125 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6127 struct mgmt_pending_cmd *cmd = data;
6129 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6132 bt_dev_dbg(hdev, "err %d", err);
6134 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6136 mgmt_pending_remove(cmd);
6139 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6142 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6144 return hci_stop_discovery_sync(hdev);
6147 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6150 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6151 struct mgmt_pending_cmd *cmd;
6154 bt_dev_dbg(hdev, "sock %p", sk);
6158 if (!hci_discovery_active(hdev)) {
6159 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6160 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6161 sizeof(mgmt_cp->type));
6165 if (hdev->discovery.type != mgmt_cp->type) {
6166 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6167 MGMT_STATUS_INVALID_PARAMS,
6168 &mgmt_cp->type, sizeof(mgmt_cp->type));
6172 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6178 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6179 stop_discovery_complete);
6181 mgmt_pending_remove(cmd);
6185 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6188 hci_dev_unlock(hdev);
6192 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6195 struct mgmt_cp_confirm_name *cp = data;
6196 struct inquiry_entry *e;
6199 bt_dev_dbg(hdev, "sock %p", sk);
6203 if (!hci_discovery_active(hdev)) {
6204 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6205 MGMT_STATUS_FAILED, &cp->addr,
6210 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6212 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6213 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6218 if (cp->name_known) {
6219 e->name_state = NAME_KNOWN;
6222 e->name_state = NAME_NEEDED;
6223 hci_inquiry_cache_update_resolve(hdev, e);
6226 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6227 &cp->addr, sizeof(cp->addr));
6230 hci_dev_unlock(hdev);
6234 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6237 struct mgmt_cp_block_device *cp = data;
6241 bt_dev_dbg(hdev, "sock %p", sk);
6243 if (!bdaddr_type_is_valid(cp->addr.type))
6244 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6245 MGMT_STATUS_INVALID_PARAMS,
6246 &cp->addr, sizeof(cp->addr));
6250 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6253 status = MGMT_STATUS_FAILED;
6257 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6259 status = MGMT_STATUS_SUCCESS;
6262 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6263 &cp->addr, sizeof(cp->addr));
6265 hci_dev_unlock(hdev);
6270 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6273 struct mgmt_cp_unblock_device *cp = data;
6277 bt_dev_dbg(hdev, "sock %p", sk);
6279 if (!bdaddr_type_is_valid(cp->addr.type))
6280 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6281 MGMT_STATUS_INVALID_PARAMS,
6282 &cp->addr, sizeof(cp->addr));
6286 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6289 status = MGMT_STATUS_INVALID_PARAMS;
6293 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6295 status = MGMT_STATUS_SUCCESS;
6298 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6299 &cp->addr, sizeof(cp->addr));
6301 hci_dev_unlock(hdev);
6306 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6308 return hci_update_eir_sync(hdev);
6311 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6314 struct mgmt_cp_set_device_id *cp = data;
6318 bt_dev_dbg(hdev, "sock %p", sk);
6320 source = __le16_to_cpu(cp->source);
6322 if (source > 0x0002)
6323 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6324 MGMT_STATUS_INVALID_PARAMS);
6328 hdev->devid_source = source;
6329 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6330 hdev->devid_product = __le16_to_cpu(cp->product);
6331 hdev->devid_version = __le16_to_cpu(cp->version);
6333 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6336 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6338 hci_dev_unlock(hdev);
6343 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6346 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6348 bt_dev_dbg(hdev, "status %d", err);
6351 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6353 struct cmd_lookup match = { NULL, hdev };
6355 struct adv_info *adv_instance;
6356 u8 status = mgmt_status(err);
6359 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6360 cmd_status_rsp, &status);
6364 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6365 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6367 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6369 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6372 new_settings(hdev, match.sk);
6377 /* If "Set Advertising" was just disabled and instance advertising was
6378 * set up earlier, then re-enable multi-instance advertising.
6380 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6381 list_empty(&hdev->adv_instances))
6384 instance = hdev->cur_adv_instance;
6386 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6387 struct adv_info, list);
6391 instance = adv_instance->instance;
6394 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6396 enable_advertising_instance(hdev, err);
6399 static int set_adv_sync(struct hci_dev *hdev, void *data)
6401 struct mgmt_pending_cmd *cmd = data;
6402 struct mgmt_mode *cp = cmd->param;
6405 if (cp->val == 0x02)
6406 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6408 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6410 cancel_adv_timeout(hdev);
6413 /* Switch to instance "0" for the Set Advertising setting.
6414 * We cannot use update_[adv|scan_rsp]_data() here as the
6415 * HCI_ADVERTISING flag is not yet set.
6417 hdev->cur_adv_instance = 0x00;
6419 if (ext_adv_capable(hdev)) {
6420 hci_start_ext_adv_sync(hdev, 0x00);
6422 hci_update_adv_data_sync(hdev, 0x00);
6423 hci_update_scan_rsp_data_sync(hdev, 0x00);
6424 hci_enable_advertising_sync(hdev);
6427 hci_disable_advertising_sync(hdev);
6433 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6436 struct mgmt_mode *cp = data;
6437 struct mgmt_pending_cmd *cmd;
6441 bt_dev_dbg(hdev, "sock %p", sk);
6443 status = mgmt_le_support(hdev);
6445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6448 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6450 MGMT_STATUS_INVALID_PARAMS);
6452 if (hdev->advertising_paused)
6453 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6460 /* The following conditions are ones which mean that we should
6461 * not do any HCI communication but directly send a mgmt
6462 * response to user space (after toggling the flag if
6465 if (!hdev_is_powered(hdev) ||
6466 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6467 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6468 hci_dev_test_flag(hdev, HCI_MESH) ||
6469 hci_conn_num(hdev, LE_LINK) > 0 ||
6470 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6471 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6475 hdev->cur_adv_instance = 0x00;
6476 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6477 if (cp->val == 0x02)
6478 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6480 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6482 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6483 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6486 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6491 err = new_settings(hdev, sk);
6496 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6497 pending_find(MGMT_OP_SET_LE, hdev)) {
6498 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6503 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6507 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6508 set_advertising_complete);
6511 mgmt_pending_remove(cmd);
6514 hci_dev_unlock(hdev);
6518 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6519 void *data, u16 len)
6521 struct mgmt_cp_set_static_address *cp = data;
6524 bt_dev_dbg(hdev, "sock %p", sk);
6526 if (!lmp_le_capable(hdev))
6527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6528 MGMT_STATUS_NOT_SUPPORTED);
6530 if (hdev_is_powered(hdev))
6531 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6532 MGMT_STATUS_REJECTED);
6534 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6535 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6536 return mgmt_cmd_status(sk, hdev->id,
6537 MGMT_OP_SET_STATIC_ADDRESS,
6538 MGMT_STATUS_INVALID_PARAMS);
6540 /* Two most significant bits shall be set */
6541 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6542 return mgmt_cmd_status(sk, hdev->id,
6543 MGMT_OP_SET_STATIC_ADDRESS,
6544 MGMT_STATUS_INVALID_PARAMS);
6549 bacpy(&hdev->static_addr, &cp->bdaddr);
6551 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6555 err = new_settings(hdev, sk);
6558 hci_dev_unlock(hdev);
6562 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6563 void *data, u16 len)
6565 struct mgmt_cp_set_scan_params *cp = data;
6566 __u16 interval, window;
6569 bt_dev_dbg(hdev, "sock %p", sk);
6571 if (!lmp_le_capable(hdev))
6572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6573 MGMT_STATUS_NOT_SUPPORTED);
6575 interval = __le16_to_cpu(cp->interval);
6577 if (interval < 0x0004 || interval > 0x4000)
6578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6579 MGMT_STATUS_INVALID_PARAMS);
6581 window = __le16_to_cpu(cp->window);
6583 if (window < 0x0004 || window > 0x4000)
6584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6585 MGMT_STATUS_INVALID_PARAMS);
6587 if (window > interval)
6588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6589 MGMT_STATUS_INVALID_PARAMS);
6593 hdev->le_scan_interval = interval;
6594 hdev->le_scan_window = window;
6596 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6599 /* If background scan is running, restart it so new parameters are
6602 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6603 hdev->discovery.state == DISCOVERY_STOPPED)
6604 hci_update_passive_scan(hdev);
6606 hci_dev_unlock(hdev);
6611 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6613 struct mgmt_pending_cmd *cmd = data;
6615 bt_dev_dbg(hdev, "err %d", err);
6618 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6621 struct mgmt_mode *cp = cmd->param;
6624 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6626 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6628 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6629 new_settings(hdev, cmd->sk);
6632 mgmt_pending_free(cmd);
6635 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6637 struct mgmt_pending_cmd *cmd = data;
6638 struct mgmt_mode *cp = cmd->param;
6640 return hci_write_fast_connectable_sync(hdev, cp->val);
6643 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6644 void *data, u16 len)
6646 struct mgmt_mode *cp = data;
6647 struct mgmt_pending_cmd *cmd;
6650 bt_dev_dbg(hdev, "sock %p", sk);
6652 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6653 hdev->hci_ver < BLUETOOTH_VER_1_2)
6654 return mgmt_cmd_status(sk, hdev->id,
6655 MGMT_OP_SET_FAST_CONNECTABLE,
6656 MGMT_STATUS_NOT_SUPPORTED);
6658 if (cp->val != 0x00 && cp->val != 0x01)
6659 return mgmt_cmd_status(sk, hdev->id,
6660 MGMT_OP_SET_FAST_CONNECTABLE,
6661 MGMT_STATUS_INVALID_PARAMS);
6665 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6666 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6670 if (!hdev_is_powered(hdev)) {
6671 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6672 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6673 new_settings(hdev, sk);
6677 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6682 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6683 fast_connectable_complete);
6686 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6687 MGMT_STATUS_FAILED);
6690 mgmt_pending_free(cmd);
6694 hci_dev_unlock(hdev);
6699 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6701 struct mgmt_pending_cmd *cmd = data;
6703 bt_dev_dbg(hdev, "err %d", err);
6706 u8 mgmt_err = mgmt_status(err);
6708 /* We need to restore the flag if related HCI commands
6711 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6713 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6715 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6716 new_settings(hdev, cmd->sk);
6719 mgmt_pending_free(cmd);
6722 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6726 status = hci_write_fast_connectable_sync(hdev, false);
6729 status = hci_update_scan_sync(hdev);
6731 /* Since only the advertising data flags will change, there
6732 * is no need to update the scan response data.
6735 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6740 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6742 struct mgmt_mode *cp = data;
6743 struct mgmt_pending_cmd *cmd;
6746 bt_dev_dbg(hdev, "sock %p", sk);
6748 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 MGMT_STATUS_NOT_SUPPORTED);
6752 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754 MGMT_STATUS_REJECTED);
6756 if (cp->val != 0x00 && cp->val != 0x01)
6757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6758 MGMT_STATUS_INVALID_PARAMS);
6762 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6763 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6767 if (!hdev_is_powered(hdev)) {
6769 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6770 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6771 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6772 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6773 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6776 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6778 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6782 err = new_settings(hdev, sk);
6786 /* Reject disabling when powered on */
6788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6789 MGMT_STATUS_REJECTED);
6792 /* When configuring a dual-mode controller to operate
6793 * with LE only and using a static address, then switching
6794 * BR/EDR back on is not allowed.
6796 * Dual-mode controllers shall operate with the public
6797 * address as its identity address for BR/EDR and LE. So
6798 * reject the attempt to create an invalid configuration.
6800 * The same restrictions applies when secure connections
6801 * has been enabled. For BR/EDR this is a controller feature
6802 * while for LE it is a host stack feature. This means that
6803 * switching BR/EDR back on when secure connections has been
6804 * enabled is not a supported transaction.
6806 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6807 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6808 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6809 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6810 MGMT_STATUS_REJECTED);
6815 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6819 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6820 set_bredr_complete);
6823 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6824 MGMT_STATUS_FAILED);
6826 mgmt_pending_free(cmd);
6831 /* We need to flip the bit already here so that
6832 * hci_req_update_adv_data generates the correct flags.
6834 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6837 hci_dev_unlock(hdev);
6841 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6843 struct mgmt_pending_cmd *cmd = data;
6844 struct mgmt_mode *cp;
6846 bt_dev_dbg(hdev, "err %d", err);
6849 u8 mgmt_err = mgmt_status(err);
6851 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6859 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6860 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6863 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6864 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6867 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6868 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6872 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6873 new_settings(hdev, cmd->sk);
6876 mgmt_pending_free(cmd);
6879 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6881 struct mgmt_pending_cmd *cmd = data;
6882 struct mgmt_mode *cp = cmd->param;
6885 /* Force write of val */
6886 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6888 return hci_write_sc_support_sync(hdev, val);
6891 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6892 void *data, u16 len)
6894 struct mgmt_mode *cp = data;
6895 struct mgmt_pending_cmd *cmd;
6899 bt_dev_dbg(hdev, "sock %p", sk);
6901 if (!lmp_sc_capable(hdev) &&
6902 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6903 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6904 MGMT_STATUS_NOT_SUPPORTED);
6906 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6907 lmp_sc_capable(hdev) &&
6908 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910 MGMT_STATUS_REJECTED);
6912 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6914 MGMT_STATUS_INVALID_PARAMS);
6918 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6919 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6923 changed = !hci_dev_test_and_set_flag(hdev,
6925 if (cp->val == 0x02)
6926 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6928 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6930 changed = hci_dev_test_and_clear_flag(hdev,
6932 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6935 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6940 err = new_settings(hdev, sk);
6947 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6948 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6949 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6953 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6957 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6958 set_secure_conn_complete);
6961 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6962 MGMT_STATUS_FAILED);
6964 mgmt_pending_free(cmd);
6968 hci_dev_unlock(hdev);
6972 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6973 void *data, u16 len)
6975 struct mgmt_mode *cp = data;
6976 bool changed, use_changed;
6979 bt_dev_dbg(hdev, "sock %p", sk);
6981 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6983 MGMT_STATUS_INVALID_PARAMS);
6988 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6990 changed = hci_dev_test_and_clear_flag(hdev,
6991 HCI_KEEP_DEBUG_KEYS);
6993 if (cp->val == 0x02)
6994 use_changed = !hci_dev_test_and_set_flag(hdev,
6995 HCI_USE_DEBUG_KEYS);
6997 use_changed = hci_dev_test_and_clear_flag(hdev,
6998 HCI_USE_DEBUG_KEYS);
7000 if (hdev_is_powered(hdev) && use_changed &&
7001 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7002 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7003 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7004 sizeof(mode), &mode);
7007 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7012 err = new_settings(hdev, sk);
7015 hci_dev_unlock(hdev);
7019 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7022 struct mgmt_cp_set_privacy *cp = cp_data;
7026 bt_dev_dbg(hdev, "sock %p", sk);
7028 if (!lmp_le_capable(hdev))
7029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030 MGMT_STATUS_NOT_SUPPORTED);
7032 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7034 MGMT_STATUS_INVALID_PARAMS);
7036 if (hdev_is_powered(hdev))
7037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7038 MGMT_STATUS_REJECTED);
7042 /* If user space supports this command it is also expected to
7043 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7045 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7048 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7049 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7050 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7051 hci_adv_instances_set_rpa_expired(hdev, true);
7052 if (cp->privacy == 0x02)
7053 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7055 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7057 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7058 memset(hdev->irk, 0, sizeof(hdev->irk));
7059 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7060 hci_adv_instances_set_rpa_expired(hdev, false);
7061 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7064 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7069 err = new_settings(hdev, sk);
7072 hci_dev_unlock(hdev);
7076 static bool irk_is_valid(struct mgmt_irk_info *irk)
7078 switch (irk->addr.type) {
7079 case BDADDR_LE_PUBLIC:
7082 case BDADDR_LE_RANDOM:
7083 /* Two most significant bits shall be set */
7084 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7092 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7095 struct mgmt_cp_load_irks *cp = cp_data;
7096 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7097 sizeof(struct mgmt_irk_info));
7098 u16 irk_count, expected_len;
7101 bt_dev_dbg(hdev, "sock %p", sk);
7103 if (!lmp_le_capable(hdev))
7104 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7105 MGMT_STATUS_NOT_SUPPORTED);
7107 irk_count = __le16_to_cpu(cp->irk_count);
7108 if (irk_count > max_irk_count) {
7109 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7111 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7112 MGMT_STATUS_INVALID_PARAMS);
7115 expected_len = struct_size(cp, irks, irk_count);
7116 if (expected_len != len) {
7117 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7119 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7120 MGMT_STATUS_INVALID_PARAMS);
7123 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7125 for (i = 0; i < irk_count; i++) {
7126 struct mgmt_irk_info *key = &cp->irks[i];
7128 if (!irk_is_valid(key))
7129 return mgmt_cmd_status(sk, hdev->id,
7131 MGMT_STATUS_INVALID_PARAMS);
7136 hci_smp_irks_clear(hdev);
7138 for (i = 0; i < irk_count; i++) {
7139 struct mgmt_irk_info *irk = &cp->irks[i];
7140 u8 addr_type = le_addr_type(irk->addr.type);
7142 if (hci_is_blocked_key(hdev,
7143 HCI_BLOCKED_KEY_TYPE_IRK,
7145 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7150 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7151 if (irk->addr.type == BDADDR_BREDR)
7152 addr_type = BDADDR_BREDR;
7154 hci_add_irk(hdev, &irk->addr.bdaddr,
7155 addr_type, irk->val,
7159 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7161 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7163 hci_dev_unlock(hdev);
7168 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7170 if (key->initiator != 0x00 && key->initiator != 0x01)
7173 switch (key->addr.type) {
7174 case BDADDR_LE_PUBLIC:
7177 case BDADDR_LE_RANDOM:
7178 /* Two most significant bits shall be set */
7179 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7187 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7188 void *cp_data, u16 len)
7190 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7191 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7192 sizeof(struct mgmt_ltk_info));
7193 u16 key_count, expected_len;
7196 bt_dev_dbg(hdev, "sock %p", sk);
7198 if (!lmp_le_capable(hdev))
7199 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7200 MGMT_STATUS_NOT_SUPPORTED);
7202 key_count = __le16_to_cpu(cp->key_count);
7203 if (key_count > max_key_count) {
7204 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7206 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7207 MGMT_STATUS_INVALID_PARAMS);
7210 expected_len = struct_size(cp, keys, key_count);
7211 if (expected_len != len) {
7212 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7215 MGMT_STATUS_INVALID_PARAMS);
7218 bt_dev_dbg(hdev, "key_count %u", key_count);
7220 for (i = 0; i < key_count; i++) {
7221 struct mgmt_ltk_info *key = &cp->keys[i];
7223 if (!ltk_is_valid(key))
7224 return mgmt_cmd_status(sk, hdev->id,
7225 MGMT_OP_LOAD_LONG_TERM_KEYS,
7226 MGMT_STATUS_INVALID_PARAMS);
7231 hci_smp_ltks_clear(hdev);
7233 for (i = 0; i < key_count; i++) {
7234 struct mgmt_ltk_info *key = &cp->keys[i];
7235 u8 type, authenticated;
7236 u8 addr_type = le_addr_type(key->addr.type);
7238 if (hci_is_blocked_key(hdev,
7239 HCI_BLOCKED_KEY_TYPE_LTK,
7241 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7246 switch (key->type) {
7247 case MGMT_LTK_UNAUTHENTICATED:
7248 authenticated = 0x00;
7249 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7251 case MGMT_LTK_AUTHENTICATED:
7252 authenticated = 0x01;
7253 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7255 case MGMT_LTK_P256_UNAUTH:
7256 authenticated = 0x00;
7257 type = SMP_LTK_P256;
7259 case MGMT_LTK_P256_AUTH:
7260 authenticated = 0x01;
7261 type = SMP_LTK_P256;
7263 case MGMT_LTK_P256_DEBUG:
7264 authenticated = 0x00;
7265 type = SMP_LTK_P256_DEBUG;
7271 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7272 if (key->addr.type == BDADDR_BREDR)
7273 addr_type = BDADDR_BREDR;
7275 hci_add_ltk(hdev, &key->addr.bdaddr,
7276 addr_type, type, authenticated,
7277 key->val, key->enc_size, key->ediv, key->rand);
7280 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7283 hci_dev_unlock(hdev);
7288 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7290 struct mgmt_pending_cmd *cmd = data;
7291 struct hci_conn *conn = cmd->user_data;
7292 struct mgmt_cp_get_conn_info *cp = cmd->param;
7293 struct mgmt_rp_get_conn_info rp;
7296 bt_dev_dbg(hdev, "err %d", err);
7298 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7300 status = mgmt_status(err);
7301 if (status == MGMT_STATUS_SUCCESS) {
7302 rp.rssi = conn->rssi;
7303 rp.tx_power = conn->tx_power;
7304 rp.max_tx_power = conn->max_tx_power;
7306 rp.rssi = HCI_RSSI_INVALID;
7307 rp.tx_power = HCI_TX_POWER_INVALID;
7308 rp.max_tx_power = HCI_TX_POWER_INVALID;
7311 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7314 mgmt_pending_free(cmd);
7317 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7319 struct mgmt_pending_cmd *cmd = data;
7320 struct mgmt_cp_get_conn_info *cp = cmd->param;
7321 struct hci_conn *conn;
7325 /* Make sure we are still connected */
7326 if (cp->addr.type == BDADDR_BREDR)
7327 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7330 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7332 if (!conn || conn->state != BT_CONNECTED)
7333 return MGMT_STATUS_NOT_CONNECTED;
7335 cmd->user_data = conn;
7336 handle = cpu_to_le16(conn->handle);
7338 /* Refresh RSSI each time */
7339 err = hci_read_rssi_sync(hdev, handle);
7341 /* For LE links TX power does not change thus we don't need to
7342 * query for it once value is known.
7344 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7345 conn->tx_power == HCI_TX_POWER_INVALID))
7346 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7348 /* Max TX power needs to be read only once per connection */
7349 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7350 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7355 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7358 struct mgmt_cp_get_conn_info *cp = data;
7359 struct mgmt_rp_get_conn_info rp;
7360 struct hci_conn *conn;
7361 unsigned long conn_info_age;
7364 bt_dev_dbg(hdev, "sock %p", sk);
7366 memset(&rp, 0, sizeof(rp));
7367 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7368 rp.addr.type = cp->addr.type;
7370 if (!bdaddr_type_is_valid(cp->addr.type))
7371 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7372 MGMT_STATUS_INVALID_PARAMS,
7377 if (!hdev_is_powered(hdev)) {
7378 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7379 MGMT_STATUS_NOT_POWERED, &rp,
7384 if (cp->addr.type == BDADDR_BREDR)
7385 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7388 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7390 if (!conn || conn->state != BT_CONNECTED) {
7391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7392 MGMT_STATUS_NOT_CONNECTED, &rp,
7397 /* To avoid client trying to guess when to poll again for information we
7398 * calculate conn info age as random value between min/max set in hdev.
7400 conn_info_age = hdev->conn_info_min_age +
7401 prandom_u32_max(hdev->conn_info_max_age -
7402 hdev->conn_info_min_age);
7404 /* Query controller to refresh cached values if they are too old or were
7407 if (time_after(jiffies, conn->conn_info_timestamp +
7408 msecs_to_jiffies(conn_info_age)) ||
7409 !conn->conn_info_timestamp) {
7410 struct mgmt_pending_cmd *cmd;
7412 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7417 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7418 cmd, get_conn_info_complete);
7422 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7423 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7426 mgmt_pending_free(cmd);
7431 conn->conn_info_timestamp = jiffies;
7433 /* Cache is valid, just reply with values cached in hci_conn */
7434 rp.rssi = conn->rssi;
7435 rp.tx_power = conn->tx_power;
7436 rp.max_tx_power = conn->max_tx_power;
7438 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7439 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7443 hci_dev_unlock(hdev);
7447 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7449 struct mgmt_pending_cmd *cmd = data;
7450 struct mgmt_cp_get_clock_info *cp = cmd->param;
7451 struct mgmt_rp_get_clock_info rp;
7452 struct hci_conn *conn = cmd->user_data;
7453 u8 status = mgmt_status(err);
7455 bt_dev_dbg(hdev, "err %d", err);
7457 memset(&rp, 0, sizeof(rp));
7458 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7459 rp.addr.type = cp->addr.type;
7464 rp.local_clock = cpu_to_le32(hdev->clock);
7467 rp.piconet_clock = cpu_to_le32(conn->clock);
7468 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7472 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7475 mgmt_pending_free(cmd);
7478 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7480 struct mgmt_pending_cmd *cmd = data;
7481 struct mgmt_cp_get_clock_info *cp = cmd->param;
7482 struct hci_cp_read_clock hci_cp;
7483 struct hci_conn *conn;
7485 memset(&hci_cp, 0, sizeof(hci_cp));
7486 hci_read_clock_sync(hdev, &hci_cp);
7488 /* Make sure connection still exists */
7489 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7490 if (!conn || conn->state != BT_CONNECTED)
7491 return MGMT_STATUS_NOT_CONNECTED;
7493 cmd->user_data = conn;
7494 hci_cp.handle = cpu_to_le16(conn->handle);
7495 hci_cp.which = 0x01; /* Piconet clock */
7497 return hci_read_clock_sync(hdev, &hci_cp);
7500 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7503 struct mgmt_cp_get_clock_info *cp = data;
7504 struct mgmt_rp_get_clock_info rp;
7505 struct mgmt_pending_cmd *cmd;
7506 struct hci_conn *conn;
7509 bt_dev_dbg(hdev, "sock %p", sk);
7511 memset(&rp, 0, sizeof(rp));
7512 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7513 rp.addr.type = cp->addr.type;
7515 if (cp->addr.type != BDADDR_BREDR)
7516 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7517 MGMT_STATUS_INVALID_PARAMS,
7522 if (!hdev_is_powered(hdev)) {
7523 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7524 MGMT_STATUS_NOT_POWERED, &rp,
7529 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7532 if (!conn || conn->state != BT_CONNECTED) {
7533 err = mgmt_cmd_complete(sk, hdev->id,
7534 MGMT_OP_GET_CLOCK_INFO,
7535 MGMT_STATUS_NOT_CONNECTED,
7543 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7547 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7548 get_clock_info_complete);
7551 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7552 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7555 mgmt_pending_free(cmd);
7560 hci_dev_unlock(hdev);
7564 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7566 struct hci_conn *conn;
7568 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7572 if (conn->dst_type != type)
7575 if (conn->state != BT_CONNECTED)
7581 /* This function requires the caller holds hdev->lock */
7582 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7583 u8 addr_type, u8 auto_connect)
7585 struct hci_conn_params *params;
7587 params = hci_conn_params_add(hdev, addr, addr_type);
7591 if (params->auto_connect == auto_connect)
7594 hci_pend_le_list_del_init(params);
7596 switch (auto_connect) {
7597 case HCI_AUTO_CONN_DISABLED:
7598 case HCI_AUTO_CONN_LINK_LOSS:
7599 /* If auto connect is being disabled when we're trying to
7600 * connect to device, keep connecting.
7602 if (params->explicit_connect)
7603 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7605 case HCI_AUTO_CONN_REPORT:
7606 if (params->explicit_connect)
7607 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7609 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7611 case HCI_AUTO_CONN_DIRECT:
7612 case HCI_AUTO_CONN_ALWAYS:
7613 if (!is_connected(hdev, addr, addr_type))
7614 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7618 params->auto_connect = auto_connect;
7620 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7621 addr, addr_type, auto_connect);
7626 static void device_added(struct sock *sk, struct hci_dev *hdev,
7627 bdaddr_t *bdaddr, u8 type, u8 action)
7629 struct mgmt_ev_device_added ev;
7631 bacpy(&ev.addr.bdaddr, bdaddr);
7632 ev.addr.type = type;
7635 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7638 static int add_device_sync(struct hci_dev *hdev, void *data)
7640 return hci_update_passive_scan_sync(hdev);
7643 static int add_device(struct sock *sk, struct hci_dev *hdev,
7644 void *data, u16 len)
7646 struct mgmt_cp_add_device *cp = data;
7647 u8 auto_conn, addr_type;
7648 struct hci_conn_params *params;
7650 u32 current_flags = 0;
7651 u32 supported_flags;
7653 bt_dev_dbg(hdev, "sock %p", sk);
7655 if (!bdaddr_type_is_valid(cp->addr.type) ||
7656 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7657 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7658 MGMT_STATUS_INVALID_PARAMS,
7659 &cp->addr, sizeof(cp->addr));
7661 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7662 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7663 MGMT_STATUS_INVALID_PARAMS,
7664 &cp->addr, sizeof(cp->addr));
7668 if (cp->addr.type == BDADDR_BREDR) {
7669 /* Only incoming connections action is supported for now */
7670 if (cp->action != 0x01) {
7671 err = mgmt_cmd_complete(sk, hdev->id,
7673 MGMT_STATUS_INVALID_PARAMS,
7674 &cp->addr, sizeof(cp->addr));
7678 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7684 hci_update_scan(hdev);
7689 addr_type = le_addr_type(cp->addr.type);
7691 if (cp->action == 0x02)
7692 auto_conn = HCI_AUTO_CONN_ALWAYS;
7693 else if (cp->action == 0x01)
7694 auto_conn = HCI_AUTO_CONN_DIRECT;
7696 auto_conn = HCI_AUTO_CONN_REPORT;
7698 /* Kernel internally uses conn_params with resolvable private
7699 * address, but Add Device allows only identity addresses.
7700 * Make sure it is enforced before calling
7701 * hci_conn_params_lookup.
7703 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7704 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7705 MGMT_STATUS_INVALID_PARAMS,
7706 &cp->addr, sizeof(cp->addr));
7710 /* If the connection parameters don't exist for this device,
7711 * they will be created and configured with defaults.
7713 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7715 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7716 MGMT_STATUS_FAILED, &cp->addr,
7720 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7723 current_flags = params->flags;
7726 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7731 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7732 supported_flags = hdev->conn_flags;
7733 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7734 supported_flags, current_flags);
7736 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7737 MGMT_STATUS_SUCCESS, &cp->addr,
7741 hci_dev_unlock(hdev);
7745 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7746 bdaddr_t *bdaddr, u8 type)
7748 struct mgmt_ev_device_removed ev;
7750 bacpy(&ev.addr.bdaddr, bdaddr);
7751 ev.addr.type = type;
7753 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7756 static int remove_device_sync(struct hci_dev *hdev, void *data)
7758 return hci_update_passive_scan_sync(hdev);
7761 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7762 void *data, u16 len)
7764 struct mgmt_cp_remove_device *cp = data;
7767 bt_dev_dbg(hdev, "sock %p", sk);
7771 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7772 struct hci_conn_params *params;
7775 if (!bdaddr_type_is_valid(cp->addr.type)) {
7776 err = mgmt_cmd_complete(sk, hdev->id,
7777 MGMT_OP_REMOVE_DEVICE,
7778 MGMT_STATUS_INVALID_PARAMS,
7779 &cp->addr, sizeof(cp->addr));
7783 if (cp->addr.type == BDADDR_BREDR) {
7784 err = hci_bdaddr_list_del(&hdev->accept_list,
7788 err = mgmt_cmd_complete(sk, hdev->id,
7789 MGMT_OP_REMOVE_DEVICE,
7790 MGMT_STATUS_INVALID_PARAMS,
7796 hci_update_scan(hdev);
7798 device_removed(sk, hdev, &cp->addr.bdaddr,
7803 addr_type = le_addr_type(cp->addr.type);
7805 /* Kernel internally uses conn_params with resolvable private
7806 * address, but Remove Device allows only identity addresses.
7807 * Make sure it is enforced before calling
7808 * hci_conn_params_lookup.
7810 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7811 err = mgmt_cmd_complete(sk, hdev->id,
7812 MGMT_OP_REMOVE_DEVICE,
7813 MGMT_STATUS_INVALID_PARAMS,
7814 &cp->addr, sizeof(cp->addr));
7818 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7821 err = mgmt_cmd_complete(sk, hdev->id,
7822 MGMT_OP_REMOVE_DEVICE,
7823 MGMT_STATUS_INVALID_PARAMS,
7824 &cp->addr, sizeof(cp->addr));
7828 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7829 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7830 err = mgmt_cmd_complete(sk, hdev->id,
7831 MGMT_OP_REMOVE_DEVICE,
7832 MGMT_STATUS_INVALID_PARAMS,
7833 &cp->addr, sizeof(cp->addr));
7837 hci_conn_params_free(params);
7839 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7841 struct hci_conn_params *p, *tmp;
7842 struct bdaddr_list *b, *btmp;
7844 if (cp->addr.type) {
7845 err = mgmt_cmd_complete(sk, hdev->id,
7846 MGMT_OP_REMOVE_DEVICE,
7847 MGMT_STATUS_INVALID_PARAMS,
7848 &cp->addr, sizeof(cp->addr));
7852 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7853 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7858 hci_update_scan(hdev);
7860 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7861 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7863 device_removed(sk, hdev, &p->addr, p->addr_type);
7864 if (p->explicit_connect) {
7865 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7868 hci_conn_params_free(p);
7871 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7874 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7877 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7878 MGMT_STATUS_SUCCESS, &cp->addr,
7881 hci_dev_unlock(hdev);
7885 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7888 struct mgmt_cp_load_conn_param *cp = data;
7889 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7890 sizeof(struct mgmt_conn_param));
7891 u16 param_count, expected_len;
7894 if (!lmp_le_capable(hdev))
7895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7896 MGMT_STATUS_NOT_SUPPORTED);
7898 param_count = __le16_to_cpu(cp->param_count);
7899 if (param_count > max_param_count) {
7900 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7902 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7903 MGMT_STATUS_INVALID_PARAMS);
7906 expected_len = struct_size(cp, params, param_count);
7907 if (expected_len != len) {
7908 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7910 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7911 MGMT_STATUS_INVALID_PARAMS);
7914 bt_dev_dbg(hdev, "param_count %u", param_count);
7918 hci_conn_params_clear_disabled(hdev);
7920 for (i = 0; i < param_count; i++) {
7921 struct mgmt_conn_param *param = &cp->params[i];
7922 struct hci_conn_params *hci_param;
7923 u16 min, max, latency, timeout;
7926 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7929 if (param->addr.type == BDADDR_LE_PUBLIC) {
7930 addr_type = ADDR_LE_DEV_PUBLIC;
7931 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7932 addr_type = ADDR_LE_DEV_RANDOM;
7934 bt_dev_err(hdev, "ignoring invalid connection parameters");
7938 min = le16_to_cpu(param->min_interval);
7939 max = le16_to_cpu(param->max_interval);
7940 latency = le16_to_cpu(param->latency);
7941 timeout = le16_to_cpu(param->timeout);
7943 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7944 min, max, latency, timeout);
7946 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7947 bt_dev_err(hdev, "ignoring invalid connection parameters");
7951 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7954 bt_dev_err(hdev, "failed to add connection parameters");
7958 hci_param->conn_min_interval = min;
7959 hci_param->conn_max_interval = max;
7960 hci_param->conn_latency = latency;
7961 hci_param->supervision_timeout = timeout;
7964 hci_dev_unlock(hdev);
7966 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7970 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7971 void *data, u16 len)
7973 struct mgmt_cp_set_external_config *cp = data;
7977 bt_dev_dbg(hdev, "sock %p", sk);
7979 if (hdev_is_powered(hdev))
7980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7981 MGMT_STATUS_REJECTED);
7983 if (cp->config != 0x00 && cp->config != 0x01)
7984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7985 MGMT_STATUS_INVALID_PARAMS);
7987 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7989 MGMT_STATUS_NOT_SUPPORTED);
7994 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7996 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7998 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8005 err = new_options(hdev, sk);
8007 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8008 mgmt_index_removed(hdev);
8010 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8011 hci_dev_set_flag(hdev, HCI_CONFIG);
8012 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8014 queue_work(hdev->req_workqueue, &hdev->power_on);
8016 set_bit(HCI_RAW, &hdev->flags);
8017 mgmt_index_added(hdev);
8022 hci_dev_unlock(hdev);
8026 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8027 void *data, u16 len)
8029 struct mgmt_cp_set_public_address *cp = data;
8033 bt_dev_dbg(hdev, "sock %p", sk);
8035 if (hdev_is_powered(hdev))
8036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8037 MGMT_STATUS_REJECTED);
8039 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8040 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8041 MGMT_STATUS_INVALID_PARAMS);
8043 if (!hdev->set_bdaddr)
8044 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8045 MGMT_STATUS_NOT_SUPPORTED);
8049 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8050 bacpy(&hdev->public_addr, &cp->bdaddr);
8052 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8059 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8060 err = new_options(hdev, sk);
8062 if (is_configured(hdev)) {
8063 mgmt_index_removed(hdev);
8065 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8067 hci_dev_set_flag(hdev, HCI_CONFIG);
8068 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8070 queue_work(hdev->req_workqueue, &hdev->power_on);
8074 hci_dev_unlock(hdev);
8078 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8081 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8082 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8083 u8 *h192, *r192, *h256, *r256;
8084 struct mgmt_pending_cmd *cmd = data;
8085 struct sk_buff *skb = cmd->skb;
8086 u8 status = mgmt_status(err);
8089 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8094 status = MGMT_STATUS_FAILED;
8095 else if (IS_ERR(skb))
8096 status = mgmt_status(PTR_ERR(skb));
8098 status = mgmt_status(skb->data[0]);
8101 bt_dev_dbg(hdev, "status %u", status);
8103 mgmt_cp = cmd->param;
8106 status = mgmt_status(status);
8113 } else if (!bredr_sc_enabled(hdev)) {
8114 struct hci_rp_read_local_oob_data *rp;
8116 if (skb->len != sizeof(*rp)) {
8117 status = MGMT_STATUS_FAILED;
8120 status = MGMT_STATUS_SUCCESS;
8121 rp = (void *)skb->data;
8123 eir_len = 5 + 18 + 18;
8130 struct hci_rp_read_local_oob_ext_data *rp;
8132 if (skb->len != sizeof(*rp)) {
8133 status = MGMT_STATUS_FAILED;
8136 status = MGMT_STATUS_SUCCESS;
8137 rp = (void *)skb->data;
8139 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8140 eir_len = 5 + 18 + 18;
8144 eir_len = 5 + 18 + 18 + 18 + 18;
8154 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8161 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8162 hdev->dev_class, 3);
8165 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8166 EIR_SSP_HASH_C192, h192, 16);
8167 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8168 EIR_SSP_RAND_R192, r192, 16);
8172 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8173 EIR_SSP_HASH_C256, h256, 16);
8174 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8175 EIR_SSP_RAND_R256, r256, 16);
8179 mgmt_rp->type = mgmt_cp->type;
8180 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8182 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8183 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8184 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8185 if (err < 0 || status)
8188 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8190 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8191 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8192 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8194 if (skb && !IS_ERR(skb))
8198 mgmt_pending_remove(cmd);
8201 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8202 struct mgmt_cp_read_local_oob_ext_data *cp)
8204 struct mgmt_pending_cmd *cmd;
8207 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8212 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8213 read_local_oob_ext_data_complete);
8216 mgmt_pending_remove(cmd);
8223 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8224 void *data, u16 data_len)
8226 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8227 struct mgmt_rp_read_local_oob_ext_data *rp;
8230 u8 status, flags, role, addr[7], hash[16], rand[16];
8233 bt_dev_dbg(hdev, "sock %p", sk);
8235 if (hdev_is_powered(hdev)) {
8237 case BIT(BDADDR_BREDR):
8238 status = mgmt_bredr_support(hdev);
8244 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8245 status = mgmt_le_support(hdev);
8249 eir_len = 9 + 3 + 18 + 18 + 3;
8252 status = MGMT_STATUS_INVALID_PARAMS;
8257 status = MGMT_STATUS_NOT_POWERED;
8261 rp_len = sizeof(*rp) + eir_len;
8262 rp = kmalloc(rp_len, GFP_ATOMIC);
8266 if (!status && !lmp_ssp_capable(hdev)) {
8267 status = MGMT_STATUS_NOT_SUPPORTED;
8278 case BIT(BDADDR_BREDR):
8279 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8280 err = read_local_ssp_oob_req(hdev, sk, cp);
8281 hci_dev_unlock(hdev);
8285 status = MGMT_STATUS_FAILED;
8288 eir_len = eir_append_data(rp->eir, eir_len,
8290 hdev->dev_class, 3);
8293 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8294 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8295 smp_generate_oob(hdev, hash, rand) < 0) {
8296 hci_dev_unlock(hdev);
8297 status = MGMT_STATUS_FAILED;
8301 /* This should return the active RPA, but since the RPA
8302 * is only programmed on demand, it is really hard to fill
8303 * this in at the moment. For now disallow retrieving
8304 * local out-of-band data when privacy is in use.
8306 * Returning the identity address will not help here since
8307 * pairing happens before the identity resolving key is
8308 * known and thus the connection establishment happens
8309 * based on the RPA and not the identity address.
8311 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8312 hci_dev_unlock(hdev);
8313 status = MGMT_STATUS_REJECTED;
8317 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8318 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8319 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8320 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8321 memcpy(addr, &hdev->static_addr, 6);
8324 memcpy(addr, &hdev->bdaddr, 6);
8328 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8329 addr, sizeof(addr));
8331 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8336 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8337 &role, sizeof(role));
8339 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8340 eir_len = eir_append_data(rp->eir, eir_len,
8342 hash, sizeof(hash));
8344 eir_len = eir_append_data(rp->eir, eir_len,
8346 rand, sizeof(rand));
8349 flags = mgmt_get_adv_discov_flags(hdev);
8351 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8352 flags |= LE_AD_NO_BREDR;
8354 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8355 &flags, sizeof(flags));
8359 hci_dev_unlock(hdev);
8361 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8363 status = MGMT_STATUS_SUCCESS;
8366 rp->type = cp->type;
8367 rp->eir_len = cpu_to_le16(eir_len);
8369 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8370 status, rp, sizeof(*rp) + eir_len);
8371 if (err < 0 || status)
8374 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8375 rp, sizeof(*rp) + eir_len,
8376 HCI_MGMT_OOB_DATA_EVENTS, sk);
8384 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8388 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8389 flags |= MGMT_ADV_FLAG_DISCOV;
8390 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8391 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8392 flags |= MGMT_ADV_FLAG_APPEARANCE;
8393 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8394 flags |= MGMT_ADV_PARAM_DURATION;
8395 flags |= MGMT_ADV_PARAM_TIMEOUT;
8396 flags |= MGMT_ADV_PARAM_INTERVALS;
8397 flags |= MGMT_ADV_PARAM_TX_POWER;
8398 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8400 /* In extended adv TX_POWER returned from Set Adv Param
8401 * will be always valid.
8403 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8404 flags |= MGMT_ADV_FLAG_TX_POWER;
8406 if (ext_adv_capable(hdev)) {
8407 flags |= MGMT_ADV_FLAG_SEC_1M;
8408 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8409 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8411 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8412 flags |= MGMT_ADV_FLAG_SEC_2M;
8414 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8415 flags |= MGMT_ADV_FLAG_SEC_CODED;
8421 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8422 void *data, u16 data_len)
8424 struct mgmt_rp_read_adv_features *rp;
8427 struct adv_info *adv_instance;
8428 u32 supported_flags;
8431 bt_dev_dbg(hdev, "sock %p", sk);
8433 if (!lmp_le_capable(hdev))
8434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8435 MGMT_STATUS_REJECTED);
8439 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8440 rp = kmalloc(rp_len, GFP_ATOMIC);
8442 hci_dev_unlock(hdev);
8446 supported_flags = get_supported_adv_flags(hdev);
8448 rp->supported_flags = cpu_to_le32(supported_flags);
8449 rp->max_adv_data_len = max_adv_len(hdev);
8450 rp->max_scan_rsp_len = max_adv_len(hdev);
8451 rp->max_instances = hdev->le_num_of_adv_sets;
8452 rp->num_instances = hdev->adv_instance_cnt;
8454 instance = rp->instance;
8455 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8456 /* Only instances 1-le_num_of_adv_sets are externally visible */
8457 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8458 *instance = adv_instance->instance;
8461 rp->num_instances--;
8466 hci_dev_unlock(hdev);
8468 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8469 MGMT_STATUS_SUCCESS, rp, rp_len);
8476 static u8 calculate_name_len(struct hci_dev *hdev)
8478 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8480 return eir_append_local_name(hdev, buf, 0);
8483 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8486 u8 max_len = max_adv_len(hdev);
8489 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8490 MGMT_ADV_FLAG_LIMITED_DISCOV |
8491 MGMT_ADV_FLAG_MANAGED_FLAGS))
8494 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8497 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8498 max_len -= calculate_name_len(hdev);
8500 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8507 static bool flags_managed(u32 adv_flags)
8509 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8510 MGMT_ADV_FLAG_LIMITED_DISCOV |
8511 MGMT_ADV_FLAG_MANAGED_FLAGS);
8514 static bool tx_power_managed(u32 adv_flags)
8516 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8519 static bool name_managed(u32 adv_flags)
8521 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8524 static bool appearance_managed(u32 adv_flags)
8526 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8529 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8530 u8 len, bool is_adv_data)
8535 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8540 /* Make sure that the data is correctly formatted. */
8541 for (i = 0; i < len; i += (cur_len + 1)) {
8547 if (data[i + 1] == EIR_FLAGS &&
8548 (!is_adv_data || flags_managed(adv_flags)))
8551 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8554 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8557 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8560 if (data[i + 1] == EIR_APPEARANCE &&
8561 appearance_managed(adv_flags))
8564 /* If the current field length would exceed the total data
8565 * length, then it's invalid.
8567 if (i + cur_len >= len)
8574 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8576 u32 supported_flags, phy_flags;
8578 /* The current implementation only supports a subset of the specified
8579 * flags. Also need to check mutual exclusiveness of sec flags.
8581 supported_flags = get_supported_adv_flags(hdev);
8582 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8583 if (adv_flags & ~supported_flags ||
8584 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8590 static bool adv_busy(struct hci_dev *hdev)
8592 return pending_find(MGMT_OP_SET_LE, hdev);
8595 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8598 struct adv_info *adv, *n;
8600 bt_dev_dbg(hdev, "err %d", err);
8604 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8611 adv->pending = false;
8615 instance = adv->instance;
8617 if (hdev->cur_adv_instance == instance)
8618 cancel_adv_timeout(hdev);
8620 hci_remove_adv_instance(hdev, instance);
8621 mgmt_advertising_removed(sk, hdev, instance);
8624 hci_dev_unlock(hdev);
8627 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8629 struct mgmt_pending_cmd *cmd = data;
8630 struct mgmt_cp_add_advertising *cp = cmd->param;
8631 struct mgmt_rp_add_advertising rp;
8633 memset(&rp, 0, sizeof(rp));
8635 rp.instance = cp->instance;
8638 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8641 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8642 mgmt_status(err), &rp, sizeof(rp));
8644 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8646 mgmt_pending_free(cmd);
8649 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8651 struct mgmt_pending_cmd *cmd = data;
8652 struct mgmt_cp_add_advertising *cp = cmd->param;
8654 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8657 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8658 void *data, u16 data_len)
8660 struct mgmt_cp_add_advertising *cp = data;
8661 struct mgmt_rp_add_advertising rp;
8664 u16 timeout, duration;
8665 unsigned int prev_instance_cnt;
8666 u8 schedule_instance = 0;
8667 struct adv_info *adv, *next_instance;
8669 struct mgmt_pending_cmd *cmd;
8671 bt_dev_dbg(hdev, "sock %p", sk);
8673 status = mgmt_le_support(hdev);
8675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8678 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8679 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8680 MGMT_STATUS_INVALID_PARAMS);
8682 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8684 MGMT_STATUS_INVALID_PARAMS);
8686 flags = __le32_to_cpu(cp->flags);
8687 timeout = __le16_to_cpu(cp->timeout);
8688 duration = __le16_to_cpu(cp->duration);
8690 if (!requested_adv_flags_are_valid(hdev, flags))
8691 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8692 MGMT_STATUS_INVALID_PARAMS);
8696 if (timeout && !hdev_is_powered(hdev)) {
8697 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8698 MGMT_STATUS_REJECTED);
8702 if (adv_busy(hdev)) {
8703 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8708 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8709 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8710 cp->scan_rsp_len, false)) {
8711 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8712 MGMT_STATUS_INVALID_PARAMS);
8716 prev_instance_cnt = hdev->adv_instance_cnt;
8718 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8719 cp->adv_data_len, cp->data,
8721 cp->data + cp->adv_data_len,
8723 HCI_ADV_TX_POWER_NO_PREFERENCE,
8724 hdev->le_adv_min_interval,
8725 hdev->le_adv_max_interval, 0);
8727 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8728 MGMT_STATUS_FAILED);
8732 /* Only trigger an advertising added event if a new instance was
8735 if (hdev->adv_instance_cnt > prev_instance_cnt)
8736 mgmt_advertising_added(sk, hdev, cp->instance);
8738 if (hdev->cur_adv_instance == cp->instance) {
8739 /* If the currently advertised instance is being changed then
8740 * cancel the current advertising and schedule the next
8741 * instance. If there is only one instance then the overridden
8742 * advertising data will be visible right away.
8744 cancel_adv_timeout(hdev);
8746 next_instance = hci_get_next_instance(hdev, cp->instance);
8748 schedule_instance = next_instance->instance;
8749 } else if (!hdev->adv_instance_timeout) {
8750 /* Immediately advertise the new instance if no other
8751 * instance is currently being advertised.
8753 schedule_instance = cp->instance;
8756 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8757 * there is no instance to be advertised then we have no HCI
8758 * communication to make. Simply return.
8760 if (!hdev_is_powered(hdev) ||
8761 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8762 !schedule_instance) {
8763 rp.instance = cp->instance;
8764 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8765 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8769 /* We're good to go, update advertising data, parameters, and start
8772 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8779 cp->instance = schedule_instance;
8781 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8782 add_advertising_complete);
8784 mgmt_pending_free(cmd);
8787 hci_dev_unlock(hdev);
8792 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8795 struct mgmt_pending_cmd *cmd = data;
8796 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8797 struct mgmt_rp_add_ext_adv_params rp;
8798 struct adv_info *adv;
8801 BT_DBG("%s", hdev->name);
8805 adv = hci_find_adv_instance(hdev, cp->instance);
8809 rp.instance = cp->instance;
8810 rp.tx_power = adv->tx_power;
8812 /* While we're at it, inform userspace of the available space for this
8813 * advertisement, given the flags that will be used.
8815 flags = __le32_to_cpu(cp->flags);
8816 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8817 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8820 /* If this advertisement was previously advertising and we
8821 * failed to update it, we signal that it has been removed and
8822 * delete its structure
8825 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8827 hci_remove_adv_instance(hdev, cp->instance);
8829 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8832 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8833 mgmt_status(err), &rp, sizeof(rp));
8838 mgmt_pending_free(cmd);
8840 hci_dev_unlock(hdev);
8843 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8845 struct mgmt_pending_cmd *cmd = data;
8846 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8848 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8851 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8852 void *data, u16 data_len)
8854 struct mgmt_cp_add_ext_adv_params *cp = data;
8855 struct mgmt_rp_add_ext_adv_params rp;
8856 struct mgmt_pending_cmd *cmd = NULL;
8857 struct adv_info *adv;
8858 u32 flags, min_interval, max_interval;
8859 u16 timeout, duration;
8864 BT_DBG("%s", hdev->name);
8866 status = mgmt_le_support(hdev);
8868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8871 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8872 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8873 MGMT_STATUS_INVALID_PARAMS);
8875 /* The purpose of breaking add_advertising into two separate MGMT calls
8876 * for params and data is to allow more parameters to be added to this
8877 * structure in the future. For this reason, we verify that we have the
8878 * bare minimum structure we know of when the interface was defined. Any
8879 * extra parameters we don't know about will be ignored in this request.
8881 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8883 MGMT_STATUS_INVALID_PARAMS);
8885 flags = __le32_to_cpu(cp->flags);
8887 if (!requested_adv_flags_are_valid(hdev, flags))
8888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8889 MGMT_STATUS_INVALID_PARAMS);
8893 /* In new interface, we require that we are powered to register */
8894 if (!hdev_is_powered(hdev)) {
8895 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8896 MGMT_STATUS_REJECTED);
8900 if (adv_busy(hdev)) {
8901 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8906 /* Parse defined parameters from request, use defaults otherwise */
8907 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8908 __le16_to_cpu(cp->timeout) : 0;
8910 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8911 __le16_to_cpu(cp->duration) :
8912 hdev->def_multi_adv_rotation_duration;
8914 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8915 __le32_to_cpu(cp->min_interval) :
8916 hdev->le_adv_min_interval;
8918 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8919 __le32_to_cpu(cp->max_interval) :
8920 hdev->le_adv_max_interval;
8922 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8924 HCI_ADV_TX_POWER_NO_PREFERENCE;
8926 /* Create advertising instance with no advertising or response data */
8927 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8928 timeout, duration, tx_power, min_interval,
8932 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8933 MGMT_STATUS_FAILED);
8937 /* Submit request for advertising params if ext adv available */
8938 if (ext_adv_capable(hdev)) {
8939 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8943 hci_remove_adv_instance(hdev, cp->instance);
8947 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8948 add_ext_adv_params_complete);
8950 mgmt_pending_free(cmd);
8952 rp.instance = cp->instance;
8953 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8954 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8955 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8956 err = mgmt_cmd_complete(sk, hdev->id,
8957 MGMT_OP_ADD_EXT_ADV_PARAMS,
8958 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8962 hci_dev_unlock(hdev);
8967 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8969 struct mgmt_pending_cmd *cmd = data;
8970 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8971 struct mgmt_rp_add_advertising rp;
8973 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8975 memset(&rp, 0, sizeof(rp));
8977 rp.instance = cp->instance;
8980 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8983 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8984 mgmt_status(err), &rp, sizeof(rp));
8986 mgmt_pending_free(cmd);
8989 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8991 struct mgmt_pending_cmd *cmd = data;
8992 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8995 if (ext_adv_capable(hdev)) {
8996 err = hci_update_adv_data_sync(hdev, cp->instance);
9000 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9004 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9007 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9010 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9013 struct mgmt_cp_add_ext_adv_data *cp = data;
9014 struct mgmt_rp_add_ext_adv_data rp;
9015 u8 schedule_instance = 0;
9016 struct adv_info *next_instance;
9017 struct adv_info *adv_instance;
9019 struct mgmt_pending_cmd *cmd;
9021 BT_DBG("%s", hdev->name);
9025 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9027 if (!adv_instance) {
9028 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9029 MGMT_STATUS_INVALID_PARAMS);
9033 /* In new interface, we require that we are powered to register */
9034 if (!hdev_is_powered(hdev)) {
9035 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9036 MGMT_STATUS_REJECTED);
9037 goto clear_new_instance;
9040 if (adv_busy(hdev)) {
9041 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9043 goto clear_new_instance;
9046 /* Validate new data */
9047 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9048 cp->adv_data_len, true) ||
9049 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9050 cp->adv_data_len, cp->scan_rsp_len, false)) {
9051 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9052 MGMT_STATUS_INVALID_PARAMS);
9053 goto clear_new_instance;
9056 /* Set the data in the advertising instance */
9057 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9058 cp->data, cp->scan_rsp_len,
9059 cp->data + cp->adv_data_len);
9061 /* If using software rotation, determine next instance to use */
9062 if (hdev->cur_adv_instance == cp->instance) {
9063 /* If the currently advertised instance is being changed
9064 * then cancel the current advertising and schedule the
9065 * next instance. If there is only one instance then the
9066 * overridden advertising data will be visible right
9069 cancel_adv_timeout(hdev);
9071 next_instance = hci_get_next_instance(hdev, cp->instance);
9073 schedule_instance = next_instance->instance;
9074 } else if (!hdev->adv_instance_timeout) {
9075 /* Immediately advertise the new instance if no other
9076 * instance is currently being advertised.
9078 schedule_instance = cp->instance;
9081 /* If the HCI_ADVERTISING flag is set or there is no instance to
9082 * be advertised then we have no HCI communication to make.
9085 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9086 if (adv_instance->pending) {
9087 mgmt_advertising_added(sk, hdev, cp->instance);
9088 adv_instance->pending = false;
9090 rp.instance = cp->instance;
9091 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9092 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9096 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9100 goto clear_new_instance;
9103 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9104 add_ext_adv_data_complete);
9106 mgmt_pending_free(cmd);
9107 goto clear_new_instance;
9110 /* We were successful in updating data, so trigger advertising_added
9111 * event if this is an instance that wasn't previously advertising. If
9112 * a failure occurs in the requests we initiated, we will remove the
9113 * instance again in add_advertising_complete
9115 if (adv_instance->pending)
9116 mgmt_advertising_added(sk, hdev, cp->instance);
9121 hci_remove_adv_instance(hdev, cp->instance);
9124 hci_dev_unlock(hdev);
9129 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9132 struct mgmt_pending_cmd *cmd = data;
9133 struct mgmt_cp_remove_advertising *cp = cmd->param;
9134 struct mgmt_rp_remove_advertising rp;
9136 bt_dev_dbg(hdev, "err %d", err);
9138 memset(&rp, 0, sizeof(rp));
9139 rp.instance = cp->instance;
9142 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9145 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9146 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9148 mgmt_pending_free(cmd);
9151 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9153 struct mgmt_pending_cmd *cmd = data;
9154 struct mgmt_cp_remove_advertising *cp = cmd->param;
9157 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9161 if (list_empty(&hdev->adv_instances))
9162 err = hci_disable_advertising_sync(hdev);
9167 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9168 void *data, u16 data_len)
9170 struct mgmt_cp_remove_advertising *cp = data;
9171 struct mgmt_pending_cmd *cmd;
9174 bt_dev_dbg(hdev, "sock %p", sk);
9178 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9179 err = mgmt_cmd_status(sk, hdev->id,
9180 MGMT_OP_REMOVE_ADVERTISING,
9181 MGMT_STATUS_INVALID_PARAMS);
9185 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9186 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9191 if (list_empty(&hdev->adv_instances)) {
9192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9193 MGMT_STATUS_INVALID_PARAMS);
9197 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9204 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9205 remove_advertising_complete);
9207 mgmt_pending_free(cmd);
9210 hci_dev_unlock(hdev);
9215 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9216 void *data, u16 data_len)
9218 struct mgmt_cp_get_adv_size_info *cp = data;
9219 struct mgmt_rp_get_adv_size_info rp;
9220 u32 flags, supported_flags;
9222 bt_dev_dbg(hdev, "sock %p", sk);
9224 if (!lmp_le_capable(hdev))
9225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9226 MGMT_STATUS_REJECTED);
9228 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9229 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9230 MGMT_STATUS_INVALID_PARAMS);
9232 flags = __le32_to_cpu(cp->flags);
9234 /* The current implementation only supports a subset of the specified
9237 supported_flags = get_supported_adv_flags(hdev);
9238 if (flags & ~supported_flags)
9239 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9240 MGMT_STATUS_INVALID_PARAMS);
9242 rp.instance = cp->instance;
9243 rp.flags = cp->flags;
9244 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9245 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9247 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9248 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9251 static const struct hci_mgmt_handler mgmt_handlers[] = {
9252 { NULL }, /* 0x0000 (no command) */
9253 { read_version, MGMT_READ_VERSION_SIZE,
9255 HCI_MGMT_UNTRUSTED },
9256 { read_commands, MGMT_READ_COMMANDS_SIZE,
9258 HCI_MGMT_UNTRUSTED },
9259 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9261 HCI_MGMT_UNTRUSTED },
9262 { read_controller_info, MGMT_READ_INFO_SIZE,
9263 HCI_MGMT_UNTRUSTED },
9264 { set_powered, MGMT_SETTING_SIZE },
9265 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9266 { set_connectable, MGMT_SETTING_SIZE },
9267 { set_fast_connectable, MGMT_SETTING_SIZE },
9268 { set_bondable, MGMT_SETTING_SIZE },
9269 { set_link_security, MGMT_SETTING_SIZE },
9270 { set_ssp, MGMT_SETTING_SIZE },
9271 { set_hs, MGMT_SETTING_SIZE },
9272 { set_le, MGMT_SETTING_SIZE },
9273 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9274 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9275 { add_uuid, MGMT_ADD_UUID_SIZE },
9276 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9277 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9279 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9281 { disconnect, MGMT_DISCONNECT_SIZE },
9282 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9283 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9284 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9285 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9286 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9287 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9288 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9289 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9290 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9291 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9292 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9293 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9294 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9296 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9297 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9298 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9299 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9300 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9301 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9302 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9303 { set_advertising, MGMT_SETTING_SIZE },
9304 { set_bredr, MGMT_SETTING_SIZE },
9305 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9306 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9307 { set_secure_conn, MGMT_SETTING_SIZE },
9308 { set_debug_keys, MGMT_SETTING_SIZE },
9309 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9310 { load_irks, MGMT_LOAD_IRKS_SIZE,
9312 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9313 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9314 { add_device, MGMT_ADD_DEVICE_SIZE },
9315 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9316 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9318 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9320 HCI_MGMT_UNTRUSTED },
9321 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9322 HCI_MGMT_UNCONFIGURED |
9323 HCI_MGMT_UNTRUSTED },
9324 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9325 HCI_MGMT_UNCONFIGURED },
9326 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9327 HCI_MGMT_UNCONFIGURED },
9328 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9330 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9331 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9333 HCI_MGMT_UNTRUSTED },
9334 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9335 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9337 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9338 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9339 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9340 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9341 HCI_MGMT_UNTRUSTED },
9342 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9343 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9344 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9345 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9347 { set_wideband_speech, MGMT_SETTING_SIZE },
9348 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9349 HCI_MGMT_UNTRUSTED },
9350 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9351 HCI_MGMT_UNTRUSTED |
9352 HCI_MGMT_HDEV_OPTIONAL },
9353 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9355 HCI_MGMT_HDEV_OPTIONAL },
9356 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9357 HCI_MGMT_UNTRUSTED },
9358 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9360 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9361 HCI_MGMT_UNTRUSTED },
9362 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9364 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9365 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9366 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9367 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9369 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9370 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9372 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9374 { add_adv_patterns_monitor_rssi,
9375 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9377 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9379 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9380 { mesh_send, MGMT_MESH_SEND_SIZE,
9382 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9385 void mgmt_index_added(struct hci_dev *hdev)
9387 struct mgmt_ev_ext_index ev;
9389 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9392 switch (hdev->dev_type) {
9394 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9395 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9396 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9399 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9400 HCI_MGMT_INDEX_EVENTS);
9413 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9414 HCI_MGMT_EXT_INDEX_EVENTS);
9417 void mgmt_index_removed(struct hci_dev *hdev)
9419 struct mgmt_ev_ext_index ev;
9420 u8 status = MGMT_STATUS_INVALID_INDEX;
9422 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9425 switch (hdev->dev_type) {
9427 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9429 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9430 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9431 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9434 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9435 HCI_MGMT_INDEX_EVENTS);
9448 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9449 HCI_MGMT_EXT_INDEX_EVENTS);
9451 /* Cancel any remaining timed work */
9452 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9454 cancel_delayed_work_sync(&hdev->discov_off);
9455 cancel_delayed_work_sync(&hdev->service_cache);
9456 cancel_delayed_work_sync(&hdev->rpa_expired);
9459 void mgmt_power_on(struct hci_dev *hdev, int err)
9461 struct cmd_lookup match = { NULL, hdev };
9463 bt_dev_dbg(hdev, "err %d", err);
9468 restart_le_actions(hdev);
9469 hci_update_passive_scan(hdev);
9472 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9474 new_settings(hdev, match.sk);
9479 hci_dev_unlock(hdev);
9482 void __mgmt_power_off(struct hci_dev *hdev)
9484 struct cmd_lookup match = { NULL, hdev };
9485 u8 status, zero_cod[] = { 0, 0, 0 };
9487 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9489 /* If the power off is because of hdev unregistration let
9490 * use the appropriate INVALID_INDEX status. Otherwise use
9491 * NOT_POWERED. We cover both scenarios here since later in
9492 * mgmt_index_removed() any hci_conn callbacks will have already
9493 * been triggered, potentially causing misleading DISCONNECTED
9496 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9497 status = MGMT_STATUS_INVALID_INDEX;
9499 status = MGMT_STATUS_NOT_POWERED;
9501 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9503 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9504 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9505 zero_cod, sizeof(zero_cod),
9506 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9507 ext_info_changed(hdev, NULL);
9510 new_settings(hdev, match.sk);
9516 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9518 struct mgmt_pending_cmd *cmd;
9521 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9525 if (err == -ERFKILL)
9526 status = MGMT_STATUS_RFKILLED;
9528 status = MGMT_STATUS_FAILED;
9530 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9532 mgmt_pending_remove(cmd);
9535 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9538 struct mgmt_ev_new_link_key ev;
9540 memset(&ev, 0, sizeof(ev));
9542 ev.store_hint = persistent;
9543 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9544 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9545 ev.key.type = key->type;
9546 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9547 ev.key.pin_len = key->pin_len;
9549 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9552 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9554 switch (ltk->type) {
9556 case SMP_LTK_RESPONDER:
9557 if (ltk->authenticated)
9558 return MGMT_LTK_AUTHENTICATED;
9559 return MGMT_LTK_UNAUTHENTICATED;
9561 if (ltk->authenticated)
9562 return MGMT_LTK_P256_AUTH;
9563 return MGMT_LTK_P256_UNAUTH;
9564 case SMP_LTK_P256_DEBUG:
9565 return MGMT_LTK_P256_DEBUG;
9568 return MGMT_LTK_UNAUTHENTICATED;
9571 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9573 struct mgmt_ev_new_long_term_key ev;
9575 memset(&ev, 0, sizeof(ev));
9577 /* Devices using resolvable or non-resolvable random addresses
9578 * without providing an identity resolving key don't require
9579 * to store long term keys. Their addresses will change the
9582 * Only when a remote device provides an identity address
9583 * make sure the long term key is stored. If the remote
9584 * identity is known, the long term keys are internally
9585 * mapped to the identity address. So allow static random
9586 * and public addresses here.
9588 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9589 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9590 ev.store_hint = 0x00;
9592 ev.store_hint = persistent;
9594 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9595 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9596 ev.key.type = mgmt_ltk_type(key);
9597 ev.key.enc_size = key->enc_size;
9598 ev.key.ediv = key->ediv;
9599 ev.key.rand = key->rand;
9601 if (key->type == SMP_LTK)
9602 ev.key.initiator = 1;
9604 /* Make sure we copy only the significant bytes based on the
9605 * encryption key size, and set the rest of the value to zeroes.
9607 memcpy(ev.key.val, key->val, key->enc_size);
9608 memset(ev.key.val + key->enc_size, 0,
9609 sizeof(ev.key.val) - key->enc_size);
9611 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9614 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9616 struct mgmt_ev_new_irk ev;
9618 memset(&ev, 0, sizeof(ev));
9620 ev.store_hint = persistent;
9622 bacpy(&ev.rpa, &irk->rpa);
9623 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9624 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9625 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9627 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9630 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9633 struct mgmt_ev_new_csrk ev;
9635 memset(&ev, 0, sizeof(ev));
9637 /* Devices using resolvable or non-resolvable random addresses
9638 * without providing an identity resolving key don't require
9639 * to store signature resolving keys. Their addresses will change
9640 * the next time around.
9642 * Only when a remote device provides an identity address
9643 * make sure the signature resolving key is stored. So allow
9644 * static random and public addresses here.
9646 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9647 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9648 ev.store_hint = 0x00;
9650 ev.store_hint = persistent;
9652 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9653 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9654 ev.key.type = csrk->type;
9655 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9657 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9660 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9661 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9662 u16 max_interval, u16 latency, u16 timeout)
9664 struct mgmt_ev_new_conn_param ev;
9666 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9669 memset(&ev, 0, sizeof(ev));
9670 bacpy(&ev.addr.bdaddr, bdaddr);
9671 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9672 ev.store_hint = store_hint;
9673 ev.min_interval = cpu_to_le16(min_interval);
9674 ev.max_interval = cpu_to_le16(max_interval);
9675 ev.latency = cpu_to_le16(latency);
9676 ev.timeout = cpu_to_le16(timeout);
9678 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9681 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9682 u8 *name, u8 name_len)
9684 struct sk_buff *skb;
9685 struct mgmt_ev_device_connected *ev;
9689 /* allocate buff for LE or BR/EDR adv */
9690 if (conn->le_adv_data_len > 0)
9691 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9692 sizeof(*ev) + conn->le_adv_data_len);
9694 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9695 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9696 eir_precalc_len(sizeof(conn->dev_class)));
9698 ev = skb_put(skb, sizeof(*ev));
9699 bacpy(&ev->addr.bdaddr, &conn->dst);
9700 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9703 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9705 ev->flags = __cpu_to_le32(flags);
9707 /* We must ensure that the EIR Data fields are ordered and
9708 * unique. Keep it simple for now and avoid the problem by not
9709 * adding any BR/EDR data to the LE adv.
9711 if (conn->le_adv_data_len > 0) {
9712 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9713 eir_len = conn->le_adv_data_len;
9716 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9718 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9719 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9720 conn->dev_class, sizeof(conn->dev_class));
9723 ev->eir_len = cpu_to_le16(eir_len);
9725 mgmt_event_skb(skb, NULL);
9728 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9730 struct sock **sk = data;
9732 cmd->cmd_complete(cmd, 0);
9737 mgmt_pending_remove(cmd);
9740 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9742 struct hci_dev *hdev = data;
9743 struct mgmt_cp_unpair_device *cp = cmd->param;
9745 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9747 cmd->cmd_complete(cmd, 0);
9748 mgmt_pending_remove(cmd);
9751 bool mgmt_powering_down(struct hci_dev *hdev)
9753 struct mgmt_pending_cmd *cmd;
9754 struct mgmt_mode *cp;
9756 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9767 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9768 u8 link_type, u8 addr_type, u8 reason,
9769 bool mgmt_connected)
9771 struct mgmt_ev_device_disconnected ev;
9772 struct sock *sk = NULL;
9774 if (!mgmt_connected)
9777 if (link_type != ACL_LINK && link_type != LE_LINK)
9780 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9782 bacpy(&ev.addr.bdaddr, bdaddr);
9783 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9786 /* Report disconnects due to suspend */
9787 if (hdev->suspended)
9788 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9790 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9795 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9799 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9800 u8 link_type, u8 addr_type, u8 status)
9802 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9803 struct mgmt_cp_disconnect *cp;
9804 struct mgmt_pending_cmd *cmd;
9806 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9809 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9815 if (bacmp(bdaddr, &cp->addr.bdaddr))
9818 if (cp->addr.type != bdaddr_type)
9821 cmd->cmd_complete(cmd, mgmt_status(status));
9822 mgmt_pending_remove(cmd);
9825 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9826 u8 addr_type, u8 status)
9828 struct mgmt_ev_connect_failed ev;
9830 bacpy(&ev.addr.bdaddr, bdaddr);
9831 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9832 ev.status = mgmt_status(status);
9834 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9837 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9839 struct mgmt_ev_pin_code_request ev;
9841 bacpy(&ev.addr.bdaddr, bdaddr);
9842 ev.addr.type = BDADDR_BREDR;
9845 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9848 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9851 struct mgmt_pending_cmd *cmd;
9853 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9857 cmd->cmd_complete(cmd, mgmt_status(status));
9858 mgmt_pending_remove(cmd);
9861 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9864 struct mgmt_pending_cmd *cmd;
9866 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9870 cmd->cmd_complete(cmd, mgmt_status(status));
9871 mgmt_pending_remove(cmd);
9874 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9875 u8 link_type, u8 addr_type, u32 value,
9878 struct mgmt_ev_user_confirm_request ev;
9880 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9882 bacpy(&ev.addr.bdaddr, bdaddr);
9883 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9884 ev.confirm_hint = confirm_hint;
9885 ev.value = cpu_to_le32(value);
9887 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9891 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9892 u8 link_type, u8 addr_type)
9894 struct mgmt_ev_user_passkey_request ev;
9896 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9898 bacpy(&ev.addr.bdaddr, bdaddr);
9899 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9901 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9905 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9906 u8 link_type, u8 addr_type, u8 status,
9909 struct mgmt_pending_cmd *cmd;
9911 cmd = pending_find(opcode, hdev);
9915 cmd->cmd_complete(cmd, mgmt_status(status));
9916 mgmt_pending_remove(cmd);
9921 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9922 u8 link_type, u8 addr_type, u8 status)
9924 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9925 status, MGMT_OP_USER_CONFIRM_REPLY);
9928 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9929 u8 link_type, u8 addr_type, u8 status)
9931 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9933 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9936 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9937 u8 link_type, u8 addr_type, u8 status)
9939 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9940 status, MGMT_OP_USER_PASSKEY_REPLY);
9943 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9944 u8 link_type, u8 addr_type, u8 status)
9946 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9948 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9951 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9952 u8 link_type, u8 addr_type, u32 passkey,
9955 struct mgmt_ev_passkey_notify ev;
9957 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9959 bacpy(&ev.addr.bdaddr, bdaddr);
9960 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9961 ev.passkey = __cpu_to_le32(passkey);
9962 ev.entered = entered;
9964 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9967 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9969 struct mgmt_ev_auth_failed ev;
9970 struct mgmt_pending_cmd *cmd;
9971 u8 status = mgmt_status(hci_status);
9973 bacpy(&ev.addr.bdaddr, &conn->dst);
9974 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9977 cmd = find_pairing(conn);
9979 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9980 cmd ? cmd->sk : NULL);
9983 cmd->cmd_complete(cmd, status);
9984 mgmt_pending_remove(cmd);
9988 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9990 struct cmd_lookup match = { NULL, hdev };
9994 u8 mgmt_err = mgmt_status(status);
9995 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9996 cmd_status_rsp, &mgmt_err);
10000 if (test_bit(HCI_AUTH, &hdev->flags))
10001 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10003 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10005 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10009 new_settings(hdev, match.sk);
10012 sock_put(match.sk);
10015 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10017 struct cmd_lookup *match = data;
10019 if (match->sk == NULL) {
10020 match->sk = cmd->sk;
10021 sock_hold(match->sk);
10025 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10028 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10030 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10031 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10032 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10035 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10036 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10037 ext_info_changed(hdev, NULL);
10041 sock_put(match.sk);
10044 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10046 struct mgmt_cp_set_local_name ev;
10047 struct mgmt_pending_cmd *cmd;
10052 memset(&ev, 0, sizeof(ev));
10053 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10054 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10056 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10058 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10060 /* If this is a HCI command related to powering on the
10061 * HCI dev don't send any mgmt signals.
10063 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10067 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10068 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10069 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10072 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10076 for (i = 0; i < uuid_count; i++) {
10077 if (!memcmp(uuid, uuids[i], 16))
10084 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10088 while (parsed < eir_len) {
10089 u8 field_len = eir[0];
10093 if (field_len == 0)
10096 if (eir_len - parsed < field_len + 1)
10100 case EIR_UUID16_ALL:
10101 case EIR_UUID16_SOME:
10102 for (i = 0; i + 3 <= field_len; i += 2) {
10103 memcpy(uuid, bluetooth_base_uuid, 16);
10104 uuid[13] = eir[i + 3];
10105 uuid[12] = eir[i + 2];
10106 if (has_uuid(uuid, uuid_count, uuids))
10110 case EIR_UUID32_ALL:
10111 case EIR_UUID32_SOME:
10112 for (i = 0; i + 5 <= field_len; i += 4) {
10113 memcpy(uuid, bluetooth_base_uuid, 16);
10114 uuid[15] = eir[i + 5];
10115 uuid[14] = eir[i + 4];
10116 uuid[13] = eir[i + 3];
10117 uuid[12] = eir[i + 2];
10118 if (has_uuid(uuid, uuid_count, uuids))
10122 case EIR_UUID128_ALL:
10123 case EIR_UUID128_SOME:
10124 for (i = 0; i + 17 <= field_len; i += 16) {
10125 memcpy(uuid, eir + i + 2, 16);
10126 if (has_uuid(uuid, uuid_count, uuids))
10132 parsed += field_len + 1;
10133 eir += field_len + 1;
10139 static void restart_le_scan(struct hci_dev *hdev)
10141 /* If controller is not scanning we are done. */
10142 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10145 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10146 hdev->discovery.scan_start +
10147 hdev->discovery.scan_duration))
10150 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10151 DISCOV_LE_RESTART_DELAY);
10154 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10155 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10157 /* If a RSSI threshold has been specified, and
10158 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10159 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10160 * is set, let it through for further processing, as we might need to
10161 * restart the scan.
10163 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10164 * the results are also dropped.
10166 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10167 (rssi == HCI_RSSI_INVALID ||
10168 (rssi < hdev->discovery.rssi &&
10169 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10172 if (hdev->discovery.uuid_count != 0) {
10173 /* If a list of UUIDs is provided in filter, results with no
10174 * matching UUID should be dropped.
10176 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10177 hdev->discovery.uuids) &&
10178 !eir_has_uuids(scan_rsp, scan_rsp_len,
10179 hdev->discovery.uuid_count,
10180 hdev->discovery.uuids))
10184 /* If duplicate filtering does not report RSSI changes, then restart
10185 * scanning to ensure updated result with updated RSSI values.
10187 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10188 restart_le_scan(hdev);
10190 /* Validate RSSI value against the RSSI threshold once more. */
10191 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10192 rssi < hdev->discovery.rssi)
10199 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10200 bdaddr_t *bdaddr, u8 addr_type)
10202 struct mgmt_ev_adv_monitor_device_lost ev;
10204 ev.monitor_handle = cpu_to_le16(handle);
10205 bacpy(&ev.addr.bdaddr, bdaddr);
10206 ev.addr.type = addr_type;
10208 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10212 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10213 struct sk_buff *skb,
10214 struct sock *skip_sk,
10217 struct sk_buff *advmon_skb;
10218 size_t advmon_skb_len;
10219 __le16 *monitor_handle;
10224 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10225 sizeof(struct mgmt_ev_device_found)) + skb->len;
10226 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10231 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10232 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10233 * store monitor_handle of the matched monitor.
10235 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10236 *monitor_handle = cpu_to_le16(handle);
10237 skb_put_data(advmon_skb, skb->data, skb->len);
10239 mgmt_event_skb(advmon_skb, skip_sk);
10242 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10243 bdaddr_t *bdaddr, bool report_device,
10244 struct sk_buff *skb,
10245 struct sock *skip_sk)
10247 struct monitored_device *dev, *tmp;
10248 bool matched = false;
10249 bool notified = false;
10251 /* We have received the Advertisement Report because:
10252 * 1. the kernel has initiated active discovery
10253 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10255 * 3. if none of the above is true, we have one or more active
10256 * Advertisement Monitor
10258 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10259 * and report ONLY one advertisement per device for the matched Monitor
10260 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10262 * For case 3, since we are not active scanning and all advertisements
10263 * received are due to a matched Advertisement Monitor, report all
10264 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10266 if (report_device && !hdev->advmon_pend_notify) {
10267 mgmt_event_skb(skb, skip_sk);
10271 hdev->advmon_pend_notify = false;
10273 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10274 if (!bacmp(&dev->bdaddr, bdaddr)) {
10277 if (!dev->notified) {
10278 mgmt_send_adv_monitor_device_found(hdev, skb,
10282 dev->notified = true;
10286 if (!dev->notified)
10287 hdev->advmon_pend_notify = true;
10290 if (!report_device &&
10291 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10292 /* Handle 0 indicates that we are not active scanning and this
10293 * is a subsequent advertisement report for an already matched
10294 * Advertisement Monitor or the controller offloading support
10295 * is not available.
10297 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10301 mgmt_event_skb(skb, skip_sk);
10306 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10307 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10308 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10311 struct sk_buff *skb;
10312 struct mgmt_ev_mesh_device_found *ev;
10315 if (!hdev->mesh_ad_types[0])
10318 /* Scan for requested AD types */
10320 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10321 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10322 if (!hdev->mesh_ad_types[j])
10325 if (hdev->mesh_ad_types[j] == eir[i + 1])
10331 if (scan_rsp_len > 0) {
10332 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10333 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10334 if (!hdev->mesh_ad_types[j])
10337 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10346 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10347 sizeof(*ev) + eir_len + scan_rsp_len);
10351 ev = skb_put(skb, sizeof(*ev));
10353 bacpy(&ev->addr.bdaddr, bdaddr);
10354 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10356 ev->flags = cpu_to_le32(flags);
10357 ev->instant = cpu_to_le64(instant);
10360 /* Copy EIR or advertising data into event */
10361 skb_put_data(skb, eir, eir_len);
10363 if (scan_rsp_len > 0)
10364 /* Append scan response data to event */
10365 skb_put_data(skb, scan_rsp, scan_rsp_len);
10367 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10369 mgmt_event_skb(skb, NULL);
10372 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10373 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10374 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10377 struct sk_buff *skb;
10378 struct mgmt_ev_device_found *ev;
10379 bool report_device = hci_discovery_active(hdev);
10381 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10382 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10383 eir, eir_len, scan_rsp, scan_rsp_len,
10386 /* Don't send events for a non-kernel initiated discovery. With
10387 * LE one exception is if we have pend_le_reports > 0 in which
10388 * case we're doing passive scanning and want these events.
10390 if (!hci_discovery_active(hdev)) {
10391 if (link_type == ACL_LINK)
10393 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10394 report_device = true;
10395 else if (!hci_is_adv_monitoring(hdev))
10399 if (hdev->discovery.result_filtering) {
10400 /* We are using service discovery */
10401 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10406 if (hdev->discovery.limited) {
10407 /* Check for limited discoverable bit */
10409 if (!(dev_class[1] & 0x20))
10412 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10413 if (!flags || !(flags[0] & LE_AD_LIMITED))
10418 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10419 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10420 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10424 ev = skb_put(skb, sizeof(*ev));
10426 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10427 * RSSI value was reported as 0 when not available. This behavior
10428 * is kept when using device discovery. This is required for full
10429 * backwards compatibility with the API.
10431 * However when using service discovery, the value 127 will be
10432 * returned when the RSSI is not available.
10434 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10435 link_type == ACL_LINK)
10438 bacpy(&ev->addr.bdaddr, bdaddr);
10439 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10441 ev->flags = cpu_to_le32(flags);
10444 /* Copy EIR or advertising data into event */
10445 skb_put_data(skb, eir, eir_len);
10447 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10450 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10452 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10455 if (scan_rsp_len > 0)
10456 /* Append scan response data to event */
10457 skb_put_data(skb, scan_rsp, scan_rsp_len);
10459 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10461 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10464 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10465 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10467 struct sk_buff *skb;
10468 struct mgmt_ev_device_found *ev;
10472 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10473 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10475 ev = skb_put(skb, sizeof(*ev));
10476 bacpy(&ev->addr.bdaddr, bdaddr);
10477 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10481 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10483 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10485 ev->eir_len = cpu_to_le16(eir_len);
10486 ev->flags = cpu_to_le32(flags);
10488 mgmt_event_skb(skb, NULL);
10491 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10493 struct mgmt_ev_discovering ev;
10495 bt_dev_dbg(hdev, "discovering %u", discovering);
10497 memset(&ev, 0, sizeof(ev));
10498 ev.type = hdev->discovery.type;
10499 ev.discovering = discovering;
10501 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10504 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10506 struct mgmt_ev_controller_suspend ev;
10508 ev.suspend_state = state;
10509 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10512 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10515 struct mgmt_ev_controller_resume ev;
10517 ev.wake_reason = reason;
10519 bacpy(&ev.addr.bdaddr, bdaddr);
10520 ev.addr.type = addr_type;
10522 memset(&ev.addr, 0, sizeof(ev.addr));
10525 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10528 static struct hci_mgmt_chan chan = {
10529 .channel = HCI_CHANNEL_CONTROL,
10530 .handler_count = ARRAY_SIZE(mgmt_handlers),
10531 .handlers = mgmt_handlers,
10532 .hdev_init = mgmt_init_hdev,
10535 int mgmt_init(void)
10537 return hci_mgmt_chan_register(&chan);
10540 void mgmt_exit(void)
10542 hci_mgmt_chan_unregister(&chan);
10545 void mgmt_cleanup(struct sock *sk)
10547 struct mgmt_mesh_tx *mesh_tx;
10548 struct hci_dev *hdev;
10550 read_lock(&hci_dev_list_lock);
10552 list_for_each_entry(hdev, &hci_dev_list, list) {
10554 mesh_tx = mgmt_mesh_next(hdev, sk);
10557 mesh_send_complete(hdev, mesh_tx, true);
10561 read_unlock(&hci_dev_list_lock);