2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
135 MGMT_OP_MESH_SEND_CANCEL,
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
154 MGMT_EV_DEVICE_FOUND,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
198 static const u16 mgmt_untrusted_events[] = {
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
286 static u8 mgmt_errno_status(int err)
290 return MGMT_STATUS_SUCCESS;
292 return MGMT_STATUS_REJECTED;
294 return MGMT_STATUS_INVALID_PARAMS;
296 return MGMT_STATUS_NOT_SUPPORTED;
298 return MGMT_STATUS_BUSY;
300 return MGMT_STATUS_AUTH_FAILED;
302 return MGMT_STATUS_NO_RESOURCES;
304 return MGMT_STATUS_ALREADY_CONNECTED;
306 return MGMT_STATUS_DISCONNECTED;
309 return MGMT_STATUS_FAILED;
312 static u8 mgmt_status(int err)
315 return mgmt_errno_status(err);
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
320 return MGMT_STATUS_FAILED;
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
350 static u8 le_addr_type(u8 mgmt_addr_type)
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
355 return ADDR_LE_DEV_RANDOM;
358 void mgmt_fill_version_info(void *ver)
360 struct mgmt_rp_read_version *rp = ver;
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
369 struct mgmt_rp_read_version rp;
371 bt_dev_dbg(hdev, "sock %p", sk);
373 mgmt_fill_version_info(&rp);
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
387 bt_dev_dbg(hdev, "sock %p", sk);
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
399 rp = kmalloc(rp_size, GFP_KERNEL);
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
415 __le16 *opcode = rp->opcodes;
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
434 struct mgmt_rp_read_index_list *rp;
440 bt_dev_dbg(hdev, "sock %p", sk);
442 read_lock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
454 read_unlock(&hci_dev_list_lock);
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
481 read_unlock(&hci_dev_list_lock);
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
494 struct mgmt_rp_read_unconf_index_list *rp;
500 bt_dev_dbg(hdev, "sock %p", sk);
502 read_lock(&hci_dev_list_lock);
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
514 read_unlock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
541 read_unlock(&hci_dev_list_lock);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
554 struct mgmt_rp_read_ext_index_list *rp;
559 bt_dev_dbg(hdev, "sock %p", sk);
561 read_lock(&hci_dev_list_lock);
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
571 read_unlock(&hci_dev_list_lock);
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
604 rp->num_controllers = cpu_to_le16(count);
606 read_unlock(&hci_dev_list_lock);
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
625 static bool is_configured(struct hci_dev *hdev)
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
639 static __le32 get_missing_options(struct hci_dev *hdev)
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
652 return cpu_to_le32(options);
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
657 __le32 options = get_missing_options(hdev);
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
665 __le32 options = get_missing_options(hdev);
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
674 struct mgmt_rp_read_config_info rp;
677 bt_dev_dbg(hdev, "sock %p", sk);
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
693 hci_dev_unlock(hdev);
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
699 static u32 get_supported_phys(struct hci_dev *hdev)
701 u32 supported_phys = 0;
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
748 return supported_phys;
751 static u32 get_selected_phys(struct hci_dev *hdev)
753 u32 selected_phys = 0;
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
811 return selected_phys;
814 static u32 get_configurable_phys(struct hci_dev *hdev)
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
820 static u32 get_supported_settings(struct hci_dev *hdev)
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
840 if (lmp_sc_capable(hdev))
841 settings |= MGMT_SETTING_SECURE_CONN;
843 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
845 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 if (lmp_le_capable(hdev)) {
849 settings |= MGMT_SETTING_LE;
850 settings |= MGMT_SETTING_SECURE_CONN;
851 settings |= MGMT_SETTING_PRIVACY;
852 settings |= MGMT_SETTING_STATIC_ADDRESS;
853 settings |= MGMT_SETTING_ADVERTISING;
856 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
858 settings |= MGMT_SETTING_CONFIGURATION;
860 if (cis_central_capable(hdev))
861 settings |= MGMT_SETTING_CIS_CENTRAL;
863 if (cis_peripheral_capable(hdev))
864 settings |= MGMT_SETTING_CIS_PERIPHERAL;
866 settings |= MGMT_SETTING_PHY_CONFIGURATION;
871 static u32 get_current_settings(struct hci_dev *hdev)
875 if (hdev_is_powered(hdev))
876 settings |= MGMT_SETTING_POWERED;
878 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
879 settings |= MGMT_SETTING_CONNECTABLE;
881 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
882 settings |= MGMT_SETTING_FAST_CONNECTABLE;
884 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 settings |= MGMT_SETTING_DISCOVERABLE;
887 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
888 settings |= MGMT_SETTING_BONDABLE;
890 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
891 settings |= MGMT_SETTING_BREDR;
893 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
894 settings |= MGMT_SETTING_LE;
896 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
897 settings |= MGMT_SETTING_LINK_SECURITY;
899 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
900 settings |= MGMT_SETTING_SSP;
902 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
903 settings |= MGMT_SETTING_ADVERTISING;
905 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
906 settings |= MGMT_SETTING_SECURE_CONN;
908 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
909 settings |= MGMT_SETTING_DEBUG_KEYS;
911 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
912 settings |= MGMT_SETTING_PRIVACY;
914 /* The current setting for static address has two purposes. The
915 * first is to indicate if the static address will be used and
916 * the second is to indicate if it is actually set.
918 * This means if the static address is not configured, this flag
919 * will never be set. If the address is configured, then if the
920 * address is actually used decides if the flag is set or not.
922 * For single mode LE only controllers and dual-mode controllers
923 * with BR/EDR disabled, the existence of the static address will
926 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
927 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
928 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
929 if (bacmp(&hdev->static_addr, BDADDR_ANY))
930 settings |= MGMT_SETTING_STATIC_ADDRESS;
933 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
934 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
936 if (cis_central_capable(hdev))
937 settings |= MGMT_SETTING_CIS_CENTRAL;
939 if (cis_peripheral_capable(hdev))
940 settings |= MGMT_SETTING_CIS_PERIPHERAL;
942 if (bis_capable(hdev))
943 settings |= MGMT_SETTING_ISO_BROADCASTER;
945 if (sync_recv_capable(hdev))
946 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
951 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
953 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
956 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
958 struct mgmt_pending_cmd *cmd;
960 /* If there's a pending mgmt command the flags will not yet have
961 * their final values, so check for this first.
963 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
965 struct mgmt_mode *cp = cmd->param;
967 return LE_AD_GENERAL;
968 else if (cp->val == 0x02)
969 return LE_AD_LIMITED;
971 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
972 return LE_AD_LIMITED;
973 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
974 return LE_AD_GENERAL;
980 bool mgmt_get_connectable(struct hci_dev *hdev)
982 struct mgmt_pending_cmd *cmd;
984 /* If there's a pending mgmt command the flag will not yet have
985 * it's final value, so check for this first.
987 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
989 struct mgmt_mode *cp = cmd->param;
994 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
997 static int service_cache_sync(struct hci_dev *hdev, void *data)
999 hci_update_eir_sync(hdev);
1000 hci_update_class_sync(hdev);
1005 static void service_cache_off(struct work_struct *work)
1007 struct hci_dev *hdev = container_of(work, struct hci_dev,
1008 service_cache.work);
1010 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1013 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1016 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1018 /* The generation of a new RPA and programming it into the
1019 * controller happens in the hci_req_enable_advertising()
1022 if (ext_adv_capable(hdev))
1023 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1025 return hci_enable_advertising_sync(hdev);
1028 static void rpa_expired(struct work_struct *work)
1030 struct hci_dev *hdev = container_of(work, struct hci_dev,
1033 bt_dev_dbg(hdev, "");
1035 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1037 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1040 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1043 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1045 static void discov_off(struct work_struct *work)
1047 struct hci_dev *hdev = container_of(work, struct hci_dev,
1050 bt_dev_dbg(hdev, "");
1054 /* When discoverable timeout triggers, then just make sure
1055 * the limited discoverable flag is cleared. Even in the case
1056 * of a timeout triggered from general discoverable, it is
1057 * safe to unconditionally clear the flag.
1059 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061 hdev->discov_timeout = 0;
1063 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1065 mgmt_new_settings(hdev);
1067 hci_dev_unlock(hdev);
1070 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1072 static void mesh_send_complete(struct hci_dev *hdev,
1073 struct mgmt_mesh_tx *mesh_tx, bool silent)
1075 u8 handle = mesh_tx->handle;
1078 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1079 sizeof(handle), NULL);
1081 mgmt_mesh_remove(mesh_tx);
1084 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1086 struct mgmt_mesh_tx *mesh_tx;
1088 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089 hci_disable_advertising_sync(hdev);
1090 mesh_tx = mgmt_mesh_next(hdev, NULL);
1093 mesh_send_complete(hdev, mesh_tx, false);
1098 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1100 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1102 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1107 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1108 mesh_send_start_complete);
1111 mesh_send_complete(hdev, mesh_tx, false);
1113 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1116 static void mesh_send_done(struct work_struct *work)
1118 struct hci_dev *hdev = container_of(work, struct hci_dev,
1119 mesh_send_done.work);
1121 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1124 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1127 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1129 if (hci_dev_test_flag(hdev, HCI_MGMT))
1132 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1134 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1139 /* Non-mgmt controlled devices get this bit set
1140 * implicitly so that pairing works for them, however
1141 * for mgmt we require user-space to explicitly enable
1144 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1146 hci_dev_set_flag(hdev, HCI_MGMT);
1149 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150 void *data, u16 data_len)
1152 struct mgmt_rp_read_info rp;
1154 bt_dev_dbg(hdev, "sock %p", sk);
1158 memset(&rp, 0, sizeof(rp));
1160 bacpy(&rp.bdaddr, &hdev->bdaddr);
1162 rp.version = hdev->hci_ver;
1163 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1165 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1168 memcpy(rp.dev_class, hdev->dev_class, 3);
1170 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1173 hci_dev_unlock(hdev);
1175 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1179 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1184 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186 hdev->dev_class, 3);
1188 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1192 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1193 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194 hdev->dev_name, name_len);
1196 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1197 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198 hdev->short_name, name_len);
1203 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204 void *data, u16 data_len)
1207 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1210 bt_dev_dbg(hdev, "sock %p", sk);
1212 memset(&buf, 0, sizeof(buf));
1216 bacpy(&rp->bdaddr, &hdev->bdaddr);
1218 rp->version = hdev->hci_ver;
1219 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1221 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1225 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1226 rp->eir_len = cpu_to_le16(eir_len);
1228 hci_dev_unlock(hdev);
1230 /* If this command is called at least once, then the events
1231 * for class of device and local name changes are disabled
1232 * and only the new extended controller information event
1235 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1236 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1237 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1240 sizeof(*rp) + eir_len);
1243 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1246 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1249 memset(buf, 0, sizeof(buf));
1251 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1252 ev->eir_len = cpu_to_le16(eir_len);
1254 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1255 sizeof(*ev) + eir_len,
1256 HCI_MGMT_EXT_INFO_EVENTS, skip);
1259 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1261 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1263 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1267 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1269 struct mgmt_ev_advertising_added ev;
1271 ev.instance = instance;
1273 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1276 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1279 struct mgmt_ev_advertising_removed ev;
1281 ev.instance = instance;
1283 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1286 static void cancel_adv_timeout(struct hci_dev *hdev)
1288 if (hdev->adv_instance_timeout) {
1289 hdev->adv_instance_timeout = 0;
1290 cancel_delayed_work(&hdev->adv_instance_expire);
1294 /* This function requires the caller holds hdev->lock */
1295 static void restart_le_actions(struct hci_dev *hdev)
1297 struct hci_conn_params *p;
1299 list_for_each_entry(p, &hdev->le_conn_params, list) {
1300 /* Needed for AUTO_OFF case where might not "really"
1301 * have been powered off.
1303 hci_pend_le_list_del_init(p);
1305 switch (p->auto_connect) {
1306 case HCI_AUTO_CONN_DIRECT:
1307 case HCI_AUTO_CONN_ALWAYS:
1308 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1310 case HCI_AUTO_CONN_REPORT:
1311 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1319 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1321 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1323 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1324 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1327 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1329 struct mgmt_pending_cmd *cmd = data;
1330 struct mgmt_mode *cp;
1332 /* Make sure cmd still outstanding. */
1333 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1338 bt_dev_dbg(hdev, "err %d", err);
1343 restart_le_actions(hdev);
1344 hci_update_passive_scan(hdev);
1345 hci_dev_unlock(hdev);
1348 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1350 /* Only call new_setting for power on as power off is deferred
1351 * to hdev->power_off work which does call hci_dev_do_close.
1354 new_settings(hdev, cmd->sk);
1356 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1360 mgmt_pending_remove(cmd);
1363 static int set_powered_sync(struct hci_dev *hdev, void *data)
1365 struct mgmt_pending_cmd *cmd = data;
1366 struct mgmt_mode *cp = cmd->param;
1368 BT_DBG("%s", hdev->name);
1370 return hci_set_powered_sync(hdev, cp->val);
1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1376 struct mgmt_mode *cp = data;
1377 struct mgmt_pending_cmd *cmd;
1380 bt_dev_dbg(hdev, "sock %p", sk);
1382 if (cp->val != 0x00 && cp->val != 0x01)
1383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 MGMT_STATUS_INVALID_PARAMS);
1389 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1390 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1396 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1397 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1402 if (!!cp->val == hdev_is_powered(hdev)) {
1403 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1407 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1413 /* Cancel potentially blocking sync operation before power off */
1414 if (cp->val == 0x00) {
1415 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1416 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1417 mgmt_set_powered_complete);
1419 /* Use hci_cmd_sync_submit since hdev might not be running */
1420 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1421 mgmt_set_powered_complete);
1425 mgmt_pending_remove(cmd);
1428 hci_dev_unlock(hdev);
1432 int mgmt_new_settings(struct hci_dev *hdev)
1434 return new_settings(hdev, NULL);
1439 struct hci_dev *hdev;
1443 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1445 struct cmd_lookup *match = data;
1447 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1449 list_del(&cmd->list);
1451 if (match->sk == NULL) {
1452 match->sk = cmd->sk;
1453 sock_hold(match->sk);
1456 mgmt_pending_free(cmd);
1459 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1464 mgmt_pending_remove(cmd);
1467 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1469 if (cmd->cmd_complete) {
1472 cmd->cmd_complete(cmd, *status);
1473 mgmt_pending_remove(cmd);
1478 cmd_status_rsp(cmd, data);
1481 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1483 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1484 cmd->param, cmd->param_len);
1487 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1489 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490 cmd->param, sizeof(struct mgmt_addr_info));
1493 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1495 if (!lmp_bredr_capable(hdev))
1496 return MGMT_STATUS_NOT_SUPPORTED;
1497 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1498 return MGMT_STATUS_REJECTED;
1500 return MGMT_STATUS_SUCCESS;
1503 static u8 mgmt_le_support(struct hci_dev *hdev)
1505 if (!lmp_le_capable(hdev))
1506 return MGMT_STATUS_NOT_SUPPORTED;
1507 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1508 return MGMT_STATUS_REJECTED;
1510 return MGMT_STATUS_SUCCESS;
1513 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1516 struct mgmt_pending_cmd *cmd = data;
1518 bt_dev_dbg(hdev, "err %d", err);
1520 /* Make sure cmd still outstanding. */
1521 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1527 u8 mgmt_err = mgmt_status(err);
1528 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1529 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1533 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1534 hdev->discov_timeout > 0) {
1535 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1536 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1539 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1540 new_settings(hdev, cmd->sk);
1543 mgmt_pending_remove(cmd);
1544 hci_dev_unlock(hdev);
1547 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1549 BT_DBG("%s", hdev->name);
1551 return hci_update_discoverable_sync(hdev);
1554 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1557 struct mgmt_cp_set_discoverable *cp = data;
1558 struct mgmt_pending_cmd *cmd;
1562 bt_dev_dbg(hdev, "sock %p", sk);
1564 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1565 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_REJECTED);
1569 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_INVALID_PARAMS);
1573 timeout = __le16_to_cpu(cp->timeout);
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1578 if ((cp->val == 0x00 && timeout > 0) ||
1579 (cp->val == 0x02 && timeout == 0))
1580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1585 if (!hdev_is_powered(hdev) && timeout > 0) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_NOT_POWERED);
1591 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1598 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1599 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1604 if (hdev->advertising_paused) {
1605 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1610 if (!hdev_is_powered(hdev)) {
1611 bool changed = false;
1613 /* Setting limited discoverable when powered off is
1614 * not a valid operation since it requires a timeout
1615 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1617 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1618 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1622 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1627 err = new_settings(hdev, sk);
1632 /* If the current mode is the same, then just update the timeout
1633 * value with the new value. And if only the timeout gets updated,
1634 * then no need for any HCI transactions.
1636 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1637 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1638 HCI_LIMITED_DISCOVERABLE)) {
1639 cancel_delayed_work(&hdev->discov_off);
1640 hdev->discov_timeout = timeout;
1642 if (cp->val && hdev->discov_timeout > 0) {
1643 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1644 queue_delayed_work(hdev->req_workqueue,
1645 &hdev->discov_off, to);
1648 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1652 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1658 /* Cancel any potential discoverable timeout that might be
1659 * still active and store new timeout value. The arming of
1660 * the timeout happens in the complete handler.
1662 cancel_delayed_work(&hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1666 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1668 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1670 /* Limited discoverable mode */
1671 if (cp->val == 0x02)
1672 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1676 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1677 mgmt_set_discoverable_complete);
1680 mgmt_pending_remove(cmd);
1683 hci_dev_unlock(hdev);
1687 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1690 struct mgmt_pending_cmd *cmd = data;
1692 bt_dev_dbg(hdev, "err %d", err);
1694 /* Make sure cmd still outstanding. */
1695 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1701 u8 mgmt_err = mgmt_status(err);
1702 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1706 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1707 new_settings(hdev, cmd->sk);
1710 mgmt_pending_remove(cmd);
1712 hci_dev_unlock(hdev);
1715 static int set_connectable_update_settings(struct hci_dev *hdev,
1716 struct sock *sk, u8 val)
1718 bool changed = false;
1721 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1725 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1727 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1731 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1736 hci_update_scan(hdev);
1737 hci_update_passive_scan(hdev);
1738 return new_settings(hdev, sk);
1744 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1746 BT_DBG("%s", hdev->name);
1748 return hci_update_connectable_sync(hdev);
1751 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1754 struct mgmt_mode *cp = data;
1755 struct mgmt_pending_cmd *cmd;
1758 bt_dev_dbg(hdev, "sock %p", sk);
1760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_REJECTED);
1765 if (cp->val != 0x00 && cp->val != 0x01)
1766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767 MGMT_STATUS_INVALID_PARAMS);
1771 if (!hdev_is_powered(hdev)) {
1772 err = set_connectable_update_settings(hdev, sk, cp->val);
1776 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1783 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1790 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1792 if (hdev->discov_timeout > 0)
1793 cancel_delayed_work(&hdev->discov_off);
1795 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1800 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1801 mgmt_set_connectable_complete);
1804 mgmt_pending_remove(cmd);
1807 hci_dev_unlock(hdev);
1811 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1814 struct mgmt_mode *cp = data;
1818 bt_dev_dbg(hdev, "sock %p", sk);
1820 if (cp->val != 0x00 && cp->val != 0x01)
1821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1822 MGMT_STATUS_INVALID_PARAMS);
1827 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1829 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1831 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1836 /* In limited privacy mode the change of bondable mode
1837 * may affect the local advertising address.
1839 hci_update_discoverable(hdev);
1841 err = new_settings(hdev, sk);
1845 hci_dev_unlock(hdev);
1849 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1852 struct mgmt_mode *cp = data;
1853 struct mgmt_pending_cmd *cmd;
1857 bt_dev_dbg(hdev, "sock %p", sk);
1859 status = mgmt_bredr_support(hdev);
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 if (cp->val != 0x00 && cp->val != 0x01)
1865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866 MGMT_STATUS_INVALID_PARAMS);
1870 if (!hdev_is_powered(hdev)) {
1871 bool changed = false;
1873 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1883 err = new_settings(hdev, sk);
1888 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1896 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1901 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1907 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1909 mgmt_pending_remove(cmd);
1914 hci_dev_unlock(hdev);
1918 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1920 struct cmd_lookup match = { NULL, hdev };
1921 struct mgmt_pending_cmd *cmd = data;
1922 struct mgmt_mode *cp = cmd->param;
1923 u8 enable = cp->val;
1926 /* Make sure cmd still outstanding. */
1927 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1931 u8 mgmt_err = mgmt_status(err);
1933 if (enable && hci_dev_test_and_clear_flag(hdev,
1935 new_settings(hdev, NULL);
1938 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1944 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1946 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1949 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1952 new_settings(hdev, match.sk);
1957 hci_update_eir_sync(hdev);
1960 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1962 struct mgmt_pending_cmd *cmd = data;
1963 struct mgmt_mode *cp = cmd->param;
1964 bool changed = false;
1968 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1970 err = hci_write_ssp_mode_sync(hdev, cp->val);
1972 if (!err && changed)
1973 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1978 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1980 struct mgmt_mode *cp = data;
1981 struct mgmt_pending_cmd *cmd;
1985 bt_dev_dbg(hdev, "sock %p", sk);
1987 status = mgmt_bredr_support(hdev);
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1991 if (!lmp_ssp_capable(hdev))
1992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1993 MGMT_STATUS_NOT_SUPPORTED);
1995 if (cp->val != 0x00 && cp->val != 0x01)
1996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1997 MGMT_STATUS_INVALID_PARAMS);
2001 if (!hdev_is_powered(hdev)) {
2005 changed = !hci_dev_test_and_set_flag(hdev,
2008 changed = hci_dev_test_and_clear_flag(hdev,
2012 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2017 err = new_settings(hdev, sk);
2022 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2028 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2029 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2033 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2037 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2041 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2042 MGMT_STATUS_FAILED);
2045 mgmt_pending_remove(cmd);
2049 hci_dev_unlock(hdev);
2053 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2055 bt_dev_dbg(hdev, "sock %p", sk);
2057 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 MGMT_STATUS_NOT_SUPPORTED);
2061 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2063 struct cmd_lookup match = { NULL, hdev };
2064 u8 status = mgmt_status(err);
2066 bt_dev_dbg(hdev, "err %d", err);
2069 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2074 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2076 new_settings(hdev, match.sk);
2082 static int set_le_sync(struct hci_dev *hdev, void *data)
2084 struct mgmt_pending_cmd *cmd = data;
2085 struct mgmt_mode *cp = cmd->param;
2090 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2092 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2093 hci_disable_advertising_sync(hdev);
2095 if (ext_adv_capable(hdev))
2096 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2098 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2101 err = hci_write_le_host_supported_sync(hdev, val, 0);
2103 /* Make sure the controller has a good default for
2104 * advertising data. Restrict the update to when LE
2105 * has actually been enabled. During power on, the
2106 * update in powered_update_hci will take care of it.
2108 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2109 if (ext_adv_capable(hdev)) {
2112 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2114 hci_update_scan_rsp_data_sync(hdev, 0x00);
2116 hci_update_adv_data_sync(hdev, 0x00);
2117 hci_update_scan_rsp_data_sync(hdev, 0x00);
2120 hci_update_passive_scan(hdev);
2126 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2128 struct mgmt_pending_cmd *cmd = data;
2129 u8 status = mgmt_status(err);
2130 struct sock *sk = cmd->sk;
2133 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2134 cmd_status_rsp, &status);
2138 mgmt_pending_remove(cmd);
2139 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2142 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2144 struct mgmt_pending_cmd *cmd = data;
2145 struct mgmt_cp_set_mesh *cp = cmd->param;
2146 size_t len = cmd->param_len;
2148 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2151 hci_dev_set_flag(hdev, HCI_MESH);
2153 hci_dev_clear_flag(hdev, HCI_MESH);
2157 /* If filters don't fit, forward all adv pkts */
2158 if (len <= sizeof(hdev->mesh_ad_types))
2159 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2161 hci_update_passive_scan_sync(hdev);
2165 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2167 struct mgmt_cp_set_mesh *cp = data;
2168 struct mgmt_pending_cmd *cmd;
2171 bt_dev_dbg(hdev, "sock %p", sk);
2173 if (!lmp_le_capable(hdev) ||
2174 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2175 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2176 MGMT_STATUS_NOT_SUPPORTED);
2178 if (cp->enable != 0x00 && cp->enable != 0x01)
2179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 MGMT_STATUS_INVALID_PARAMS);
2184 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2188 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2193 MGMT_STATUS_FAILED);
2196 mgmt_pending_remove(cmd);
2199 hci_dev_unlock(hdev);
2203 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2205 struct mgmt_mesh_tx *mesh_tx = data;
2206 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2207 unsigned long mesh_send_interval;
2208 u8 mgmt_err = mgmt_status(err);
2210 /* Report any errors here, but don't report completion */
2213 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2214 /* Send Complete Error Code for handle */
2215 mesh_send_complete(hdev, mesh_tx, false);
2219 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2220 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2221 mesh_send_interval);
2224 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2226 struct mgmt_mesh_tx *mesh_tx = data;
2227 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2228 struct adv_info *adv, *next_instance;
2229 u8 instance = hdev->le_num_of_adv_sets + 1;
2230 u16 timeout, duration;
2233 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2234 return MGMT_STATUS_BUSY;
2237 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2238 adv = hci_add_adv_instance(hdev, instance, 0,
2239 send->adv_data_len, send->adv_data,
2242 HCI_ADV_TX_POWER_NO_PREFERENCE,
2243 hdev->le_adv_min_interval,
2244 hdev->le_adv_max_interval,
2248 mesh_tx->instance = instance;
2252 if (hdev->cur_adv_instance == instance) {
2253 /* If the currently advertised instance is being changed then
2254 * cancel the current advertising and schedule the next
2255 * instance. If there is only one instance then the overridden
2256 * advertising data will be visible right away.
2258 cancel_adv_timeout(hdev);
2260 next_instance = hci_get_next_instance(hdev, instance);
2262 instance = next_instance->instance;
2265 } else if (hdev->adv_instance_timeout) {
2266 /* Immediately advertise the new instance if no other, or
2267 * let it go naturally from queue if ADV is already happening
2273 return hci_schedule_adv_instance_sync(hdev, instance, true);
2278 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2280 struct mgmt_rp_mesh_read_features *rp = data;
2282 if (rp->used_handles >= rp->max_handles)
2285 rp->handles[rp->used_handles++] = mesh_tx->handle;
2288 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2289 void *data, u16 len)
2291 struct mgmt_rp_mesh_read_features rp;
2293 if (!lmp_le_capable(hdev) ||
2294 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2295 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2296 MGMT_STATUS_NOT_SUPPORTED);
2298 memset(&rp, 0, sizeof(rp));
2299 rp.index = cpu_to_le16(hdev->id);
2300 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2301 rp.max_handles = MESH_HANDLES_MAX;
2306 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2308 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2309 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2311 hci_dev_unlock(hdev);
2315 static int send_cancel(struct hci_dev *hdev, void *data)
2317 struct mgmt_pending_cmd *cmd = data;
2318 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2319 struct mgmt_mesh_tx *mesh_tx;
2321 if (!cancel->handle) {
2323 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2326 mesh_send_complete(hdev, mesh_tx, false);
2329 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2331 if (mesh_tx && mesh_tx->sk == cmd->sk)
2332 mesh_send_complete(hdev, mesh_tx, false);
2335 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2337 mgmt_pending_free(cmd);
2342 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2343 void *data, u16 len)
2345 struct mgmt_pending_cmd *cmd;
2348 if (!lmp_le_capable(hdev) ||
2349 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2351 MGMT_STATUS_NOT_SUPPORTED);
2353 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2355 MGMT_STATUS_REJECTED);
2358 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2362 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2365 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2366 MGMT_STATUS_FAILED);
2369 mgmt_pending_free(cmd);
2372 hci_dev_unlock(hdev);
2376 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2378 struct mgmt_mesh_tx *mesh_tx;
2379 struct mgmt_cp_mesh_send *send = data;
2380 struct mgmt_rp_mesh_read_features rp;
2384 if (!lmp_le_capable(hdev) ||
2385 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2387 MGMT_STATUS_NOT_SUPPORTED);
2388 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2389 len <= MGMT_MESH_SEND_SIZE ||
2390 len > (MGMT_MESH_SEND_SIZE + 31))
2391 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2392 MGMT_STATUS_REJECTED);
2396 memset(&rp, 0, sizeof(rp));
2397 rp.max_handles = MESH_HANDLES_MAX;
2399 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2401 if (rp.max_handles <= rp.used_handles) {
2402 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2407 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2408 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2413 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2414 mesh_send_start_complete);
2417 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2419 MGMT_STATUS_FAILED);
2423 mgmt_mesh_remove(mesh_tx);
2426 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2428 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2429 &mesh_tx->handle, 1);
2433 hci_dev_unlock(hdev);
2437 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2439 struct mgmt_mode *cp = data;
2440 struct mgmt_pending_cmd *cmd;
2444 bt_dev_dbg(hdev, "sock %p", sk);
2446 if (!lmp_le_capable(hdev))
2447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2448 MGMT_STATUS_NOT_SUPPORTED);
2450 if (cp->val != 0x00 && cp->val != 0x01)
2451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2452 MGMT_STATUS_INVALID_PARAMS);
2454 /* Bluetooth single mode LE only controllers or dual-mode
2455 * controllers configured as LE only devices, do not allow
2456 * switching LE off. These have either LE enabled explicitly
2457 * or BR/EDR has been previously switched off.
2459 * When trying to enable an already enabled LE, then gracefully
2460 * send a positive response. Trying to disable it however will
2461 * result into rejection.
2463 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2464 if (cp->val == 0x01)
2465 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2468 MGMT_STATUS_REJECTED);
2474 enabled = lmp_host_le_capable(hdev);
2476 if (!hdev_is_powered(hdev) || val == enabled) {
2477 bool changed = false;
2479 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2480 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2484 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2485 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2489 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2494 err = new_settings(hdev, sk);
2499 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2500 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2501 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2506 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2510 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2514 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2515 MGMT_STATUS_FAILED);
2518 mgmt_pending_remove(cmd);
2522 hci_dev_unlock(hdev);
2526 /* This is a helper function to test for pending mgmt commands that can
2527 * cause CoD or EIR HCI commands. We can only allow one such pending
2528 * mgmt command at a time since otherwise we cannot easily track what
2529 * the current values are, will be, and based on that calculate if a new
2530 * HCI command needs to be sent and if yes with what value.
2532 static bool pending_eir_or_class(struct hci_dev *hdev)
2534 struct mgmt_pending_cmd *cmd;
2536 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2537 switch (cmd->opcode) {
2538 case MGMT_OP_ADD_UUID:
2539 case MGMT_OP_REMOVE_UUID:
2540 case MGMT_OP_SET_DEV_CLASS:
2541 case MGMT_OP_SET_POWERED:
2549 static const u8 bluetooth_base_uuid[] = {
2550 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2551 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2554 static u8 get_uuid_size(const u8 *uuid)
2558 if (memcmp(uuid, bluetooth_base_uuid, 12))
2561 val = get_unaligned_le32(&uuid[12]);
2568 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2570 struct mgmt_pending_cmd *cmd = data;
2572 bt_dev_dbg(hdev, "err %d", err);
2574 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2575 mgmt_status(err), hdev->dev_class, 3);
2577 mgmt_pending_free(cmd);
2580 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2584 err = hci_update_class_sync(hdev);
2588 return hci_update_eir_sync(hdev);
2591 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2593 struct mgmt_cp_add_uuid *cp = data;
2594 struct mgmt_pending_cmd *cmd;
2595 struct bt_uuid *uuid;
2598 bt_dev_dbg(hdev, "sock %p", sk);
2602 if (pending_eir_or_class(hdev)) {
2603 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2608 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2614 memcpy(uuid->uuid, cp->uuid, 16);
2615 uuid->svc_hint = cp->svc_hint;
2616 uuid->size = get_uuid_size(cp->uuid);
2618 list_add_tail(&uuid->list, &hdev->uuids);
2620 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2626 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2627 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2629 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2630 mgmt_class_complete);
2632 mgmt_pending_free(cmd);
2637 hci_dev_unlock(hdev);
2641 static bool enable_service_cache(struct hci_dev *hdev)
2643 if (!hdev_is_powered(hdev))
2646 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2647 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2655 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2659 err = hci_update_class_sync(hdev);
2663 return hci_update_eir_sync(hdev);
2666 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2669 struct mgmt_cp_remove_uuid *cp = data;
2670 struct mgmt_pending_cmd *cmd;
2671 struct bt_uuid *match, *tmp;
2672 static const u8 bt_uuid_any[] = {
2673 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2677 bt_dev_dbg(hdev, "sock %p", sk);
2681 if (pending_eir_or_class(hdev)) {
2682 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2687 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2688 hci_uuids_clear(hdev);
2690 if (enable_service_cache(hdev)) {
2691 err = mgmt_cmd_complete(sk, hdev->id,
2692 MGMT_OP_REMOVE_UUID,
2693 0, hdev->dev_class, 3);
2702 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2703 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2706 list_del(&match->list);
2712 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2713 MGMT_STATUS_INVALID_PARAMS);
2718 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2724 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2725 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2727 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2728 mgmt_class_complete);
2730 mgmt_pending_free(cmd);
2733 hci_dev_unlock(hdev);
2737 static int set_class_sync(struct hci_dev *hdev, void *data)
2741 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2742 cancel_delayed_work_sync(&hdev->service_cache);
2743 err = hci_update_eir_sync(hdev);
2749 return hci_update_class_sync(hdev);
2752 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2755 struct mgmt_cp_set_dev_class *cp = data;
2756 struct mgmt_pending_cmd *cmd;
2759 bt_dev_dbg(hdev, "sock %p", sk);
2761 if (!lmp_bredr_capable(hdev))
2762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2763 MGMT_STATUS_NOT_SUPPORTED);
2767 if (pending_eir_or_class(hdev)) {
2768 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2773 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2774 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2775 MGMT_STATUS_INVALID_PARAMS);
2779 hdev->major_class = cp->major;
2780 hdev->minor_class = cp->minor;
2782 if (!hdev_is_powered(hdev)) {
2783 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2784 hdev->dev_class, 3);
2788 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2794 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2795 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2797 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2798 mgmt_class_complete);
2800 mgmt_pending_free(cmd);
2803 hci_dev_unlock(hdev);
2807 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2810 struct mgmt_cp_load_link_keys *cp = data;
2811 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2812 sizeof(struct mgmt_link_key_info));
2813 u16 key_count, expected_len;
2817 bt_dev_dbg(hdev, "sock %p", sk);
2819 if (!lmp_bredr_capable(hdev))
2820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2821 MGMT_STATUS_NOT_SUPPORTED);
2823 key_count = __le16_to_cpu(cp->key_count);
2824 if (key_count > max_key_count) {
2825 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2828 MGMT_STATUS_INVALID_PARAMS);
2831 expected_len = struct_size(cp, keys, key_count);
2832 if (expected_len != len) {
2833 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2836 MGMT_STATUS_INVALID_PARAMS);
2839 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2841 MGMT_STATUS_INVALID_PARAMS);
2843 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2846 for (i = 0; i < key_count; i++) {
2847 struct mgmt_link_key_info *key = &cp->keys[i];
2849 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2850 if (key->type > 0x08)
2851 return mgmt_cmd_status(sk, hdev->id,
2852 MGMT_OP_LOAD_LINK_KEYS,
2853 MGMT_STATUS_INVALID_PARAMS);
2858 hci_link_keys_clear(hdev);
2861 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2863 changed = hci_dev_test_and_clear_flag(hdev,
2864 HCI_KEEP_DEBUG_KEYS);
2867 new_settings(hdev, NULL);
2869 for (i = 0; i < key_count; i++) {
2870 struct mgmt_link_key_info *key = &cp->keys[i];
2872 if (hci_is_blocked_key(hdev,
2873 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2875 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2880 /* Always ignore debug keys and require a new pairing if
2881 * the user wants to use them.
2883 if (key->type == HCI_LK_DEBUG_COMBINATION)
2886 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2887 key->type, key->pin_len, NULL);
2890 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2892 hci_dev_unlock(hdev);
2897 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2898 u8 addr_type, struct sock *skip_sk)
2900 struct mgmt_ev_device_unpaired ev;
2902 bacpy(&ev.addr.bdaddr, bdaddr);
2903 ev.addr.type = addr_type;
2905 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2909 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2911 struct mgmt_pending_cmd *cmd = data;
2912 struct mgmt_cp_unpair_device *cp = cmd->param;
2915 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2917 cmd->cmd_complete(cmd, err);
2918 mgmt_pending_free(cmd);
2921 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2923 struct mgmt_pending_cmd *cmd = data;
2924 struct mgmt_cp_unpair_device *cp = cmd->param;
2925 struct hci_conn *conn;
2927 if (cp->addr.type == BDADDR_BREDR)
2928 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2931 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2932 le_addr_type(cp->addr.type));
2937 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2940 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2943 struct mgmt_cp_unpair_device *cp = data;
2944 struct mgmt_rp_unpair_device rp;
2945 struct hci_conn_params *params;
2946 struct mgmt_pending_cmd *cmd;
2947 struct hci_conn *conn;
2951 memset(&rp, 0, sizeof(rp));
2952 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2953 rp.addr.type = cp->addr.type;
2955 if (!bdaddr_type_is_valid(cp->addr.type))
2956 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2957 MGMT_STATUS_INVALID_PARAMS,
2960 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2961 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2962 MGMT_STATUS_INVALID_PARAMS,
2967 if (!hdev_is_powered(hdev)) {
2968 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2969 MGMT_STATUS_NOT_POWERED, &rp,
2974 if (cp->addr.type == BDADDR_BREDR) {
2975 /* If disconnection is requested, then look up the
2976 * connection. If the remote device is connected, it
2977 * will be later used to terminate the link.
2979 * Setting it to NULL explicitly will cause no
2980 * termination of the link.
2983 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2988 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2990 err = mgmt_cmd_complete(sk, hdev->id,
2991 MGMT_OP_UNPAIR_DEVICE,
2992 MGMT_STATUS_NOT_PAIRED, &rp,
3000 /* LE address type */
3001 addr_type = le_addr_type(cp->addr.type);
3003 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3004 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3006 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3007 MGMT_STATUS_NOT_PAIRED, &rp,
3012 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3014 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3019 /* Defer clearing up the connection parameters until closing to
3020 * give a chance of keeping them if a repairing happens.
3022 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3024 /* Disable auto-connection parameters if present */
3025 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3027 if (params->explicit_connect)
3028 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3030 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3033 /* If disconnection is not requested, then clear the connection
3034 * variable so that the link is not terminated.
3036 if (!cp->disconnect)
3040 /* If the connection variable is set, then termination of the
3041 * link is requested.
3044 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3046 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3050 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3057 cmd->cmd_complete = addr_cmd_complete;
3059 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3060 unpair_device_complete);
3062 mgmt_pending_free(cmd);
3065 hci_dev_unlock(hdev);
3069 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3072 struct mgmt_cp_disconnect *cp = data;
3073 struct mgmt_rp_disconnect rp;
3074 struct mgmt_pending_cmd *cmd;
3075 struct hci_conn *conn;
3078 bt_dev_dbg(hdev, "sock %p", sk);
3080 memset(&rp, 0, sizeof(rp));
3081 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3082 rp.addr.type = cp->addr.type;
3084 if (!bdaddr_type_is_valid(cp->addr.type))
3085 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3086 MGMT_STATUS_INVALID_PARAMS,
3091 if (!test_bit(HCI_UP, &hdev->flags)) {
3092 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3093 MGMT_STATUS_NOT_POWERED, &rp,
3098 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3099 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3100 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3104 if (cp->addr.type == BDADDR_BREDR)
3105 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3108 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3109 le_addr_type(cp->addr.type));
3111 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3112 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3113 MGMT_STATUS_NOT_CONNECTED, &rp,
3118 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3124 cmd->cmd_complete = generic_cmd_complete;
3126 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3128 mgmt_pending_remove(cmd);
3131 hci_dev_unlock(hdev);
3135 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3137 switch (link_type) {
3140 switch (addr_type) {
3141 case ADDR_LE_DEV_PUBLIC:
3142 return BDADDR_LE_PUBLIC;
3145 /* Fallback to LE Random address type */
3146 return BDADDR_LE_RANDOM;
3150 /* Fallback to BR/EDR type */
3151 return BDADDR_BREDR;
3155 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3158 struct mgmt_rp_get_connections *rp;
3163 bt_dev_dbg(hdev, "sock %p", sk);
3167 if (!hdev_is_powered(hdev)) {
3168 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3169 MGMT_STATUS_NOT_POWERED);
3174 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3175 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3179 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3186 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3187 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3189 bacpy(&rp->addr[i].bdaddr, &c->dst);
3190 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3191 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3196 rp->conn_count = cpu_to_le16(i);
3198 /* Recalculate length in case of filtered SCO connections, etc */
3199 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3200 struct_size(rp, addr, i));
3205 hci_dev_unlock(hdev);
3209 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3210 struct mgmt_cp_pin_code_neg_reply *cp)
3212 struct mgmt_pending_cmd *cmd;
3215 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3220 cmd->cmd_complete = addr_cmd_complete;
3222 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3223 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3225 mgmt_pending_remove(cmd);
3230 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3233 struct hci_conn *conn;
3234 struct mgmt_cp_pin_code_reply *cp = data;
3235 struct hci_cp_pin_code_reply reply;
3236 struct mgmt_pending_cmd *cmd;
3239 bt_dev_dbg(hdev, "sock %p", sk);
3243 if (!hdev_is_powered(hdev)) {
3244 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3245 MGMT_STATUS_NOT_POWERED);
3249 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3251 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3252 MGMT_STATUS_NOT_CONNECTED);
3256 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3257 struct mgmt_cp_pin_code_neg_reply ncp;
3259 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3261 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3263 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3265 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3266 MGMT_STATUS_INVALID_PARAMS);
3271 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3277 cmd->cmd_complete = addr_cmd_complete;
3279 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3280 reply.pin_len = cp->pin_len;
3281 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3283 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3285 mgmt_pending_remove(cmd);
3288 hci_dev_unlock(hdev);
3292 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3295 struct mgmt_cp_set_io_capability *cp = data;
3297 bt_dev_dbg(hdev, "sock %p", sk);
3299 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3300 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3301 MGMT_STATUS_INVALID_PARAMS);
3305 hdev->io_capability = cp->io_capability;
3307 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3309 hci_dev_unlock(hdev);
3311 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3315 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3317 struct hci_dev *hdev = conn->hdev;
3318 struct mgmt_pending_cmd *cmd;
3320 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3321 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3324 if (cmd->user_data != conn)
3333 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3335 struct mgmt_rp_pair_device rp;
3336 struct hci_conn *conn = cmd->user_data;
3339 bacpy(&rp.addr.bdaddr, &conn->dst);
3340 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3342 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3343 status, &rp, sizeof(rp));
3345 /* So we don't get further callbacks for this connection */
3346 conn->connect_cfm_cb = NULL;
3347 conn->security_cfm_cb = NULL;
3348 conn->disconn_cfm_cb = NULL;
3350 hci_conn_drop(conn);
3352 /* The device is paired so there is no need to remove
3353 * its connection parameters anymore.
3355 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3362 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3364 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3365 struct mgmt_pending_cmd *cmd;
3367 cmd = find_pairing(conn);
3369 cmd->cmd_complete(cmd, status);
3370 mgmt_pending_remove(cmd);
3374 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3376 struct mgmt_pending_cmd *cmd;
3378 BT_DBG("status %u", status);
3380 cmd = find_pairing(conn);
3382 BT_DBG("Unable to find a pending command");
3386 cmd->cmd_complete(cmd, mgmt_status(status));
3387 mgmt_pending_remove(cmd);
3390 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3392 struct mgmt_pending_cmd *cmd;
3394 BT_DBG("status %u", status);
3399 cmd = find_pairing(conn);
3401 BT_DBG("Unable to find a pending command");
3405 cmd->cmd_complete(cmd, mgmt_status(status));
3406 mgmt_pending_remove(cmd);
3409 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3412 struct mgmt_cp_pair_device *cp = data;
3413 struct mgmt_rp_pair_device rp;
3414 struct mgmt_pending_cmd *cmd;
3415 u8 sec_level, auth_type;
3416 struct hci_conn *conn;
3419 bt_dev_dbg(hdev, "sock %p", sk);
3421 memset(&rp, 0, sizeof(rp));
3422 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3423 rp.addr.type = cp->addr.type;
3425 if (!bdaddr_type_is_valid(cp->addr.type))
3426 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3427 MGMT_STATUS_INVALID_PARAMS,
3430 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3431 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3432 MGMT_STATUS_INVALID_PARAMS,
3437 if (!hdev_is_powered(hdev)) {
3438 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3439 MGMT_STATUS_NOT_POWERED, &rp,
3444 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3445 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3446 MGMT_STATUS_ALREADY_PAIRED, &rp,
3451 sec_level = BT_SECURITY_MEDIUM;
3452 auth_type = HCI_AT_DEDICATED_BONDING;
3454 if (cp->addr.type == BDADDR_BREDR) {
3455 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3456 auth_type, CONN_REASON_PAIR_DEVICE,
3457 HCI_ACL_CONN_TIMEOUT);
3459 u8 addr_type = le_addr_type(cp->addr.type);
3460 struct hci_conn_params *p;
3462 /* When pairing a new device, it is expected to remember
3463 * this device for future connections. Adding the connection
3464 * parameter information ahead of time allows tracking
3465 * of the peripheral preferred values and will speed up any
3466 * further connection establishment.
3468 * If connection parameters already exist, then they
3469 * will be kept and this function does nothing.
3471 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3473 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3474 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3476 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3477 sec_level, HCI_LE_CONN_TIMEOUT,
3478 CONN_REASON_PAIR_DEVICE);
3484 if (PTR_ERR(conn) == -EBUSY)
3485 status = MGMT_STATUS_BUSY;
3486 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3487 status = MGMT_STATUS_NOT_SUPPORTED;
3488 else if (PTR_ERR(conn) == -ECONNREFUSED)
3489 status = MGMT_STATUS_REJECTED;
3491 status = MGMT_STATUS_CONNECT_FAILED;
3493 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3494 status, &rp, sizeof(rp));
3498 if (conn->connect_cfm_cb) {
3499 hci_conn_drop(conn);
3500 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3501 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3505 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3508 hci_conn_drop(conn);
3512 cmd->cmd_complete = pairing_complete;
3514 /* For LE, just connecting isn't a proof that the pairing finished */
3515 if (cp->addr.type == BDADDR_BREDR) {
3516 conn->connect_cfm_cb = pairing_complete_cb;
3517 conn->security_cfm_cb = pairing_complete_cb;
3518 conn->disconn_cfm_cb = pairing_complete_cb;
3520 conn->connect_cfm_cb = le_pairing_complete_cb;
3521 conn->security_cfm_cb = le_pairing_complete_cb;
3522 conn->disconn_cfm_cb = le_pairing_complete_cb;
3525 conn->io_capability = cp->io_cap;
3526 cmd->user_data = hci_conn_get(conn);
3528 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3529 hci_conn_security(conn, sec_level, auth_type, true)) {
3530 cmd->cmd_complete(cmd, 0);
3531 mgmt_pending_remove(cmd);
3537 hci_dev_unlock(hdev);
3541 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3544 struct mgmt_addr_info *addr = data;
3545 struct mgmt_pending_cmd *cmd;
3546 struct hci_conn *conn;
3549 bt_dev_dbg(hdev, "sock %p", sk);
3553 if (!hdev_is_powered(hdev)) {
3554 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3555 MGMT_STATUS_NOT_POWERED);
3559 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3561 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3562 MGMT_STATUS_INVALID_PARAMS);
3566 conn = cmd->user_data;
3568 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3569 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3570 MGMT_STATUS_INVALID_PARAMS);
3574 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3575 mgmt_pending_remove(cmd);
3577 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3578 addr, sizeof(*addr));
3580 /* Since user doesn't want to proceed with the connection, abort any
3581 * ongoing pairing and then terminate the link if it was created
3582 * because of the pair device action.
3584 if (addr->type == BDADDR_BREDR)
3585 hci_remove_link_key(hdev, &addr->bdaddr);
3587 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3588 le_addr_type(addr->type));
3590 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3591 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3594 hci_dev_unlock(hdev);
3598 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3599 struct mgmt_addr_info *addr, u16 mgmt_op,
3600 u16 hci_op, __le32 passkey)
3602 struct mgmt_pending_cmd *cmd;
3603 struct hci_conn *conn;
3608 if (!hdev_is_powered(hdev)) {
3609 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3610 MGMT_STATUS_NOT_POWERED, addr,
3615 if (addr->type == BDADDR_BREDR)
3616 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3618 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3619 le_addr_type(addr->type));
3622 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3623 MGMT_STATUS_NOT_CONNECTED, addr,
3628 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3629 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3631 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3632 MGMT_STATUS_SUCCESS, addr,
3635 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3636 MGMT_STATUS_FAILED, addr,
3642 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3648 cmd->cmd_complete = addr_cmd_complete;
3650 /* Continue with pairing via HCI */
3651 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3652 struct hci_cp_user_passkey_reply cp;
3654 bacpy(&cp.bdaddr, &addr->bdaddr);
3655 cp.passkey = passkey;
3656 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3658 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3662 mgmt_pending_remove(cmd);
3665 hci_dev_unlock(hdev);
3669 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3670 void *data, u16 len)
3672 struct mgmt_cp_pin_code_neg_reply *cp = data;
3674 bt_dev_dbg(hdev, "sock %p", sk);
3676 return user_pairing_resp(sk, hdev, &cp->addr,
3677 MGMT_OP_PIN_CODE_NEG_REPLY,
3678 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3681 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3684 struct mgmt_cp_user_confirm_reply *cp = data;
3686 bt_dev_dbg(hdev, "sock %p", sk);
3688 if (len != sizeof(*cp))
3689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3690 MGMT_STATUS_INVALID_PARAMS);
3692 return user_pairing_resp(sk, hdev, &cp->addr,
3693 MGMT_OP_USER_CONFIRM_REPLY,
3694 HCI_OP_USER_CONFIRM_REPLY, 0);
3697 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3698 void *data, u16 len)
3700 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3702 bt_dev_dbg(hdev, "sock %p", sk);
3704 return user_pairing_resp(sk, hdev, &cp->addr,
3705 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3706 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3709 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3712 struct mgmt_cp_user_passkey_reply *cp = data;
3714 bt_dev_dbg(hdev, "sock %p", sk);
3716 return user_pairing_resp(sk, hdev, &cp->addr,
3717 MGMT_OP_USER_PASSKEY_REPLY,
3718 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3721 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3722 void *data, u16 len)
3724 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3726 bt_dev_dbg(hdev, "sock %p", sk);
3728 return user_pairing_resp(sk, hdev, &cp->addr,
3729 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3730 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3733 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3735 struct adv_info *adv_instance;
3737 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3741 /* stop if current instance doesn't need to be changed */
3742 if (!(adv_instance->flags & flags))
3745 cancel_adv_timeout(hdev);
3747 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3751 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3756 static int name_changed_sync(struct hci_dev *hdev, void *data)
3758 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3761 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3763 struct mgmt_pending_cmd *cmd = data;
3764 struct mgmt_cp_set_local_name *cp = cmd->param;
3765 u8 status = mgmt_status(err);
3767 bt_dev_dbg(hdev, "err %d", err);
3769 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3773 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3776 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3779 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3780 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3783 mgmt_pending_remove(cmd);
3786 static int set_name_sync(struct hci_dev *hdev, void *data)
3788 if (lmp_bredr_capable(hdev)) {
3789 hci_update_name_sync(hdev);
3790 hci_update_eir_sync(hdev);
3793 /* The name is stored in the scan response data and so
3794 * no need to update the advertising data here.
3796 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3797 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3802 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3805 struct mgmt_cp_set_local_name *cp = data;
3806 struct mgmt_pending_cmd *cmd;
3809 bt_dev_dbg(hdev, "sock %p", sk);
3813 /* If the old values are the same as the new ones just return a
3814 * direct command complete event.
3816 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3817 !memcmp(hdev->short_name, cp->short_name,
3818 sizeof(hdev->short_name))) {
3819 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3824 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3826 if (!hdev_is_powered(hdev)) {
3827 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3829 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3834 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3835 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3836 ext_info_changed(hdev, sk);
3841 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3845 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3849 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3850 MGMT_STATUS_FAILED);
3853 mgmt_pending_remove(cmd);
3858 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3861 hci_dev_unlock(hdev);
3865 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3867 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3870 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3873 struct mgmt_cp_set_appearance *cp = data;
3877 bt_dev_dbg(hdev, "sock %p", sk);
3879 if (!lmp_le_capable(hdev))
3880 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3881 MGMT_STATUS_NOT_SUPPORTED);
3883 appearance = le16_to_cpu(cp->appearance);
3887 if (hdev->appearance != appearance) {
3888 hdev->appearance = appearance;
3890 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3891 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3894 ext_info_changed(hdev, sk);
3897 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3900 hci_dev_unlock(hdev);
3905 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3906 void *data, u16 len)
3908 struct mgmt_rp_get_phy_configuration rp;
3910 bt_dev_dbg(hdev, "sock %p", sk);
3914 memset(&rp, 0, sizeof(rp));
3916 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3917 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3918 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3920 hci_dev_unlock(hdev);
3922 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3926 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3928 struct mgmt_ev_phy_configuration_changed ev;
3930 memset(&ev, 0, sizeof(ev));
3932 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3934 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3938 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3940 struct mgmt_pending_cmd *cmd = data;
3941 struct sk_buff *skb = cmd->skb;
3942 u8 status = mgmt_status(err);
3944 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3949 status = MGMT_STATUS_FAILED;
3950 else if (IS_ERR(skb))
3951 status = mgmt_status(PTR_ERR(skb));
3953 status = mgmt_status(skb->data[0]);
3956 bt_dev_dbg(hdev, "status %d", status);
3959 mgmt_cmd_status(cmd->sk, hdev->id,
3960 MGMT_OP_SET_PHY_CONFIGURATION, status);
3962 mgmt_cmd_complete(cmd->sk, hdev->id,
3963 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3966 mgmt_phy_configuration_changed(hdev, cmd->sk);
3969 if (skb && !IS_ERR(skb))
3972 mgmt_pending_remove(cmd);
3975 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3977 struct mgmt_pending_cmd *cmd = data;
3978 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3979 struct hci_cp_le_set_default_phy cp_phy;
3980 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3982 memset(&cp_phy, 0, sizeof(cp_phy));
3984 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3985 cp_phy.all_phys |= 0x01;
3987 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3988 cp_phy.all_phys |= 0x02;
3990 if (selected_phys & MGMT_PHY_LE_1M_TX)
3991 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3993 if (selected_phys & MGMT_PHY_LE_2M_TX)
3994 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3996 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3997 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3999 if (selected_phys & MGMT_PHY_LE_1M_RX)
4000 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4002 if (selected_phys & MGMT_PHY_LE_2M_RX)
4003 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4005 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4006 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4008 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4009 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4014 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4015 void *data, u16 len)
4017 struct mgmt_cp_set_phy_configuration *cp = data;
4018 struct mgmt_pending_cmd *cmd;
4019 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4020 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4021 bool changed = false;
4024 bt_dev_dbg(hdev, "sock %p", sk);
4026 configurable_phys = get_configurable_phys(hdev);
4027 supported_phys = get_supported_phys(hdev);
4028 selected_phys = __le32_to_cpu(cp->selected_phys);
4030 if (selected_phys & ~supported_phys)
4031 return mgmt_cmd_status(sk, hdev->id,
4032 MGMT_OP_SET_PHY_CONFIGURATION,
4033 MGMT_STATUS_INVALID_PARAMS);
4035 unconfigure_phys = supported_phys & ~configurable_phys;
4037 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4038 return mgmt_cmd_status(sk, hdev->id,
4039 MGMT_OP_SET_PHY_CONFIGURATION,
4040 MGMT_STATUS_INVALID_PARAMS);
4042 if (selected_phys == get_selected_phys(hdev))
4043 return mgmt_cmd_complete(sk, hdev->id,
4044 MGMT_OP_SET_PHY_CONFIGURATION,
4049 if (!hdev_is_powered(hdev)) {
4050 err = mgmt_cmd_status(sk, hdev->id,
4051 MGMT_OP_SET_PHY_CONFIGURATION,
4052 MGMT_STATUS_REJECTED);
4056 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4057 err = mgmt_cmd_status(sk, hdev->id,
4058 MGMT_OP_SET_PHY_CONFIGURATION,
4063 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4064 pkt_type |= (HCI_DH3 | HCI_DM3);
4066 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4068 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4069 pkt_type |= (HCI_DH5 | HCI_DM5);
4071 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4073 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4074 pkt_type &= ~HCI_2DH1;
4076 pkt_type |= HCI_2DH1;
4078 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4079 pkt_type &= ~HCI_2DH3;
4081 pkt_type |= HCI_2DH3;
4083 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4084 pkt_type &= ~HCI_2DH5;
4086 pkt_type |= HCI_2DH5;
4088 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4089 pkt_type &= ~HCI_3DH1;
4091 pkt_type |= HCI_3DH1;
4093 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4094 pkt_type &= ~HCI_3DH3;
4096 pkt_type |= HCI_3DH3;
4098 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4099 pkt_type &= ~HCI_3DH5;
4101 pkt_type |= HCI_3DH5;
4103 if (pkt_type != hdev->pkt_type) {
4104 hdev->pkt_type = pkt_type;
4108 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4109 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4111 mgmt_phy_configuration_changed(hdev, sk);
4113 err = mgmt_cmd_complete(sk, hdev->id,
4114 MGMT_OP_SET_PHY_CONFIGURATION,
4120 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4125 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4126 set_default_phy_complete);
4129 err = mgmt_cmd_status(sk, hdev->id,
4130 MGMT_OP_SET_PHY_CONFIGURATION,
4131 MGMT_STATUS_FAILED);
4134 mgmt_pending_remove(cmd);
4138 hci_dev_unlock(hdev);
4143 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4146 int err = MGMT_STATUS_SUCCESS;
4147 struct mgmt_cp_set_blocked_keys *keys = data;
4148 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4149 sizeof(struct mgmt_blocked_key_info));
4150 u16 key_count, expected_len;
4153 bt_dev_dbg(hdev, "sock %p", sk);
4155 key_count = __le16_to_cpu(keys->key_count);
4156 if (key_count > max_key_count) {
4157 bt_dev_err(hdev, "too big key_count value %u", key_count);
4158 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4159 MGMT_STATUS_INVALID_PARAMS);
4162 expected_len = struct_size(keys, keys, key_count);
4163 if (expected_len != len) {
4164 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4167 MGMT_STATUS_INVALID_PARAMS);
4172 hci_blocked_keys_clear(hdev);
4174 for (i = 0; i < key_count; ++i) {
4175 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4178 err = MGMT_STATUS_NO_RESOURCES;
4182 b->type = keys->keys[i].type;
4183 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4184 list_add_rcu(&b->list, &hdev->blocked_keys);
4186 hci_dev_unlock(hdev);
4188 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4192 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4193 void *data, u16 len)
4195 struct mgmt_mode *cp = data;
4197 bool changed = false;
4199 bt_dev_dbg(hdev, "sock %p", sk);
4201 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4202 return mgmt_cmd_status(sk, hdev->id,
4203 MGMT_OP_SET_WIDEBAND_SPEECH,
4204 MGMT_STATUS_NOT_SUPPORTED);
4206 if (cp->val != 0x00 && cp->val != 0x01)
4207 return mgmt_cmd_status(sk, hdev->id,
4208 MGMT_OP_SET_WIDEBAND_SPEECH,
4209 MGMT_STATUS_INVALID_PARAMS);
4213 if (hdev_is_powered(hdev) &&
4214 !!cp->val != hci_dev_test_flag(hdev,
4215 HCI_WIDEBAND_SPEECH_ENABLED)) {
4216 err = mgmt_cmd_status(sk, hdev->id,
4217 MGMT_OP_SET_WIDEBAND_SPEECH,
4218 MGMT_STATUS_REJECTED);
4223 changed = !hci_dev_test_and_set_flag(hdev,
4224 HCI_WIDEBAND_SPEECH_ENABLED);
4226 changed = hci_dev_test_and_clear_flag(hdev,
4227 HCI_WIDEBAND_SPEECH_ENABLED);
4229 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4234 err = new_settings(hdev, sk);
4237 hci_dev_unlock(hdev);
4241 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4242 void *data, u16 data_len)
4245 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4248 u8 tx_power_range[2];
4250 bt_dev_dbg(hdev, "sock %p", sk);
4252 memset(&buf, 0, sizeof(buf));
4256 /* When the Read Simple Pairing Options command is supported, then
4257 * the remote public key validation is supported.
4259 * Alternatively, when Microsoft extensions are available, they can
4260 * indicate support for public key validation as well.
4262 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4263 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4265 flags |= 0x02; /* Remote public key validation (LE) */
4267 /* When the Read Encryption Key Size command is supported, then the
4268 * encryption key size is enforced.
4270 if (hdev->commands[20] & 0x10)
4271 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4273 flags |= 0x08; /* Encryption key size enforcement (LE) */
4275 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4278 /* When the Read Simple Pairing Options command is supported, then
4279 * also max encryption key size information is provided.
4281 if (hdev->commands[41] & 0x08)
4282 cap_len = eir_append_le16(rp->cap, cap_len,
4283 MGMT_CAP_MAX_ENC_KEY_SIZE,
4284 hdev->max_enc_key_size);
4286 cap_len = eir_append_le16(rp->cap, cap_len,
4287 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4288 SMP_MAX_ENC_KEY_SIZE);
4290 /* Append the min/max LE tx power parameters if we were able to fetch
4291 * it from the controller
4293 if (hdev->commands[38] & 0x80) {
4294 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4295 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4296 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4300 rp->cap_len = cpu_to_le16(cap_len);
4302 hci_dev_unlock(hdev);
4304 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4305 rp, sizeof(*rp) + cap_len);
4308 #ifdef CONFIG_BT_FEATURE_DEBUG
4309 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4310 static const u8 debug_uuid[16] = {
4311 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4312 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4316 /* 330859bc-7506-492d-9370-9a6f0614037f */
4317 static const u8 quality_report_uuid[16] = {
4318 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4319 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4322 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4323 static const u8 offload_codecs_uuid[16] = {
4324 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4325 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4328 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4329 static const u8 le_simultaneous_roles_uuid[16] = {
4330 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4331 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4334 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4335 static const u8 rpa_resolution_uuid[16] = {
4336 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4337 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4340 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4341 static const u8 iso_socket_uuid[16] = {
4342 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4343 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4346 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4347 static const u8 mgmt_mesh_uuid[16] = {
4348 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4349 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4352 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4353 void *data, u16 data_len)
4355 struct mgmt_rp_read_exp_features_info *rp;
4361 bt_dev_dbg(hdev, "sock %p", sk);
4363 /* Enough space for 7 features */
4364 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4365 rp = kzalloc(len, GFP_KERNEL);
4369 #ifdef CONFIG_BT_FEATURE_DEBUG
4371 flags = bt_dbg_get() ? BIT(0) : 0;
4373 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4374 rp->features[idx].flags = cpu_to_le32(flags);
4379 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4380 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4385 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4386 rp->features[idx].flags = cpu_to_le32(flags);
4390 if (hdev && ll_privacy_capable(hdev)) {
4391 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4392 flags = BIT(0) | BIT(1);
4396 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4397 rp->features[idx].flags = cpu_to_le32(flags);
4401 if (hdev && (aosp_has_quality_report(hdev) ||
4402 hdev->set_quality_report)) {
4403 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4408 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4409 rp->features[idx].flags = cpu_to_le32(flags);
4413 if (hdev && hdev->get_data_path_id) {
4414 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4419 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4420 rp->features[idx].flags = cpu_to_le32(flags);
4424 if (IS_ENABLED(CONFIG_BT_LE)) {
4425 flags = iso_enabled() ? BIT(0) : 0;
4426 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4427 rp->features[idx].flags = cpu_to_le32(flags);
4431 if (hdev && lmp_le_capable(hdev)) {
4432 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4437 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4438 rp->features[idx].flags = cpu_to_le32(flags);
4442 rp->feature_count = cpu_to_le16(idx);
4444 /* After reading the experimental features information, enable
4445 * the events to update client on any future change.
4447 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4449 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4450 MGMT_OP_READ_EXP_FEATURES_INFO,
4451 0, rp, sizeof(*rp) + (20 * idx));
4457 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4460 struct mgmt_ev_exp_feature_changed ev;
4462 memset(&ev, 0, sizeof(ev));
4463 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4464 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4466 // Do we need to be atomic with the conn_flags?
4467 if (enabled && privacy_mode_capable(hdev))
4468 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4470 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4472 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4474 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4478 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4479 bool enabled, struct sock *skip)
4481 struct mgmt_ev_exp_feature_changed ev;
4483 memset(&ev, 0, sizeof(ev));
4484 memcpy(ev.uuid, uuid, 16);
4485 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4487 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4489 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4492 #define EXP_FEAT(_uuid, _set_func) \
4495 .set_func = _set_func, \
4498 /* The zero key uuid is special. Multiple exp features are set through it. */
4499 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4500 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4502 struct mgmt_rp_set_exp_feature rp;
4504 memset(rp.uuid, 0, 16);
4505 rp.flags = cpu_to_le32(0);
4507 #ifdef CONFIG_BT_FEATURE_DEBUG
4509 bool changed = bt_dbg_get();
4514 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4518 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4521 changed = hci_dev_test_and_clear_flag(hdev,
4522 HCI_ENABLE_LL_PRIVACY);
4524 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4528 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4530 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4531 MGMT_OP_SET_EXP_FEATURE, 0,
4535 #ifdef CONFIG_BT_FEATURE_DEBUG
4536 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4537 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4539 struct mgmt_rp_set_exp_feature rp;
4544 /* Command requires to use the non-controller index */
4546 return mgmt_cmd_status(sk, hdev->id,
4547 MGMT_OP_SET_EXP_FEATURE,
4548 MGMT_STATUS_INVALID_INDEX);
4550 /* Parameters are limited to a single octet */
4551 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4552 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4553 MGMT_OP_SET_EXP_FEATURE,
4554 MGMT_STATUS_INVALID_PARAMS);
4556 /* Only boolean on/off is supported */
4557 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4558 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4559 MGMT_OP_SET_EXP_FEATURE,
4560 MGMT_STATUS_INVALID_PARAMS);
4562 val = !!cp->param[0];
4563 changed = val ? !bt_dbg_get() : bt_dbg_get();
4566 memcpy(rp.uuid, debug_uuid, 16);
4567 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4569 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4571 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4572 MGMT_OP_SET_EXP_FEATURE, 0,
4576 exp_feature_changed(hdev, debug_uuid, val, sk);
4582 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4583 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4585 struct mgmt_rp_set_exp_feature rp;
4589 /* Command requires to use the controller index */
4591 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4592 MGMT_OP_SET_EXP_FEATURE,
4593 MGMT_STATUS_INVALID_INDEX);
4595 /* Parameters are limited to a single octet */
4596 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4597 return mgmt_cmd_status(sk, hdev->id,
4598 MGMT_OP_SET_EXP_FEATURE,
4599 MGMT_STATUS_INVALID_PARAMS);
4601 /* Only boolean on/off is supported */
4602 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4603 return mgmt_cmd_status(sk, hdev->id,
4604 MGMT_OP_SET_EXP_FEATURE,
4605 MGMT_STATUS_INVALID_PARAMS);
4607 val = !!cp->param[0];
4610 changed = !hci_dev_test_and_set_flag(hdev,
4611 HCI_MESH_EXPERIMENTAL);
4613 hci_dev_clear_flag(hdev, HCI_MESH);
4614 changed = hci_dev_test_and_clear_flag(hdev,
4615 HCI_MESH_EXPERIMENTAL);
4618 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4619 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4621 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4623 err = mgmt_cmd_complete(sk, hdev->id,
4624 MGMT_OP_SET_EXP_FEATURE, 0,
4628 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4633 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4634 struct mgmt_cp_set_exp_feature *cp,
4637 struct mgmt_rp_set_exp_feature rp;
4642 /* Command requires to use the controller index */
4644 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4645 MGMT_OP_SET_EXP_FEATURE,
4646 MGMT_STATUS_INVALID_INDEX);
4648 /* Changes can only be made when controller is powered down */
4649 if (hdev_is_powered(hdev))
4650 return mgmt_cmd_status(sk, hdev->id,
4651 MGMT_OP_SET_EXP_FEATURE,
4652 MGMT_STATUS_REJECTED);
4654 /* Parameters are limited to a single octet */
4655 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4656 return mgmt_cmd_status(sk, hdev->id,
4657 MGMT_OP_SET_EXP_FEATURE,
4658 MGMT_STATUS_INVALID_PARAMS);
4660 /* Only boolean on/off is supported */
4661 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4662 return mgmt_cmd_status(sk, hdev->id,
4663 MGMT_OP_SET_EXP_FEATURE,
4664 MGMT_STATUS_INVALID_PARAMS);
4666 val = !!cp->param[0];
4669 changed = !hci_dev_test_and_set_flag(hdev,
4670 HCI_ENABLE_LL_PRIVACY);
4671 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4673 /* Enable LL privacy + supported settings changed */
4674 flags = BIT(0) | BIT(1);
4676 changed = hci_dev_test_and_clear_flag(hdev,
4677 HCI_ENABLE_LL_PRIVACY);
4679 /* Disable LL privacy + supported settings changed */
4683 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4684 rp.flags = cpu_to_le32(flags);
4686 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4688 err = mgmt_cmd_complete(sk, hdev->id,
4689 MGMT_OP_SET_EXP_FEATURE, 0,
4693 exp_ll_privacy_feature_changed(val, hdev, sk);
4698 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4699 struct mgmt_cp_set_exp_feature *cp,
4702 struct mgmt_rp_set_exp_feature rp;
4706 /* Command requires to use a valid controller index */
4708 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4709 MGMT_OP_SET_EXP_FEATURE,
4710 MGMT_STATUS_INVALID_INDEX);
4712 /* Parameters are limited to a single octet */
4713 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4714 return mgmt_cmd_status(sk, hdev->id,
4715 MGMT_OP_SET_EXP_FEATURE,
4716 MGMT_STATUS_INVALID_PARAMS);
4718 /* Only boolean on/off is supported */
4719 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4720 return mgmt_cmd_status(sk, hdev->id,
4721 MGMT_OP_SET_EXP_FEATURE,
4722 MGMT_STATUS_INVALID_PARAMS);
4724 hci_req_sync_lock(hdev);
4726 val = !!cp->param[0];
4727 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4729 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4730 err = mgmt_cmd_status(sk, hdev->id,
4731 MGMT_OP_SET_EXP_FEATURE,
4732 MGMT_STATUS_NOT_SUPPORTED);
4733 goto unlock_quality_report;
4737 if (hdev->set_quality_report)
4738 err = hdev->set_quality_report(hdev, val);
4740 err = aosp_set_quality_report(hdev, val);
4743 err = mgmt_cmd_status(sk, hdev->id,
4744 MGMT_OP_SET_EXP_FEATURE,
4745 MGMT_STATUS_FAILED);
4746 goto unlock_quality_report;
4750 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4752 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4755 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4757 memcpy(rp.uuid, quality_report_uuid, 16);
4758 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4759 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4761 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4765 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4767 unlock_quality_report:
4768 hci_req_sync_unlock(hdev);
4772 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4773 struct mgmt_cp_set_exp_feature *cp,
4778 struct mgmt_rp_set_exp_feature rp;
4780 /* Command requires to use a valid controller index */
4782 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4783 MGMT_OP_SET_EXP_FEATURE,
4784 MGMT_STATUS_INVALID_INDEX);
4786 /* Parameters are limited to a single octet */
4787 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4788 return mgmt_cmd_status(sk, hdev->id,
4789 MGMT_OP_SET_EXP_FEATURE,
4790 MGMT_STATUS_INVALID_PARAMS);
4792 /* Only boolean on/off is supported */
4793 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4794 return mgmt_cmd_status(sk, hdev->id,
4795 MGMT_OP_SET_EXP_FEATURE,
4796 MGMT_STATUS_INVALID_PARAMS);
4798 val = !!cp->param[0];
4799 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4801 if (!hdev->get_data_path_id) {
4802 return mgmt_cmd_status(sk, hdev->id,
4803 MGMT_OP_SET_EXP_FEATURE,
4804 MGMT_STATUS_NOT_SUPPORTED);
4809 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4811 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4814 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4817 memcpy(rp.uuid, offload_codecs_uuid, 16);
4818 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4819 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4820 err = mgmt_cmd_complete(sk, hdev->id,
4821 MGMT_OP_SET_EXP_FEATURE, 0,
4825 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4830 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4831 struct mgmt_cp_set_exp_feature *cp,
4836 struct mgmt_rp_set_exp_feature rp;
4838 /* Command requires to use a valid controller index */
4840 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4841 MGMT_OP_SET_EXP_FEATURE,
4842 MGMT_STATUS_INVALID_INDEX);
4844 /* Parameters are limited to a single octet */
4845 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4846 return mgmt_cmd_status(sk, hdev->id,
4847 MGMT_OP_SET_EXP_FEATURE,
4848 MGMT_STATUS_INVALID_PARAMS);
4850 /* Only boolean on/off is supported */
4851 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4852 return mgmt_cmd_status(sk, hdev->id,
4853 MGMT_OP_SET_EXP_FEATURE,
4854 MGMT_STATUS_INVALID_PARAMS);
4856 val = !!cp->param[0];
4857 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4859 if (!hci_dev_le_state_simultaneous(hdev)) {
4860 return mgmt_cmd_status(sk, hdev->id,
4861 MGMT_OP_SET_EXP_FEATURE,
4862 MGMT_STATUS_NOT_SUPPORTED);
4867 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4869 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4872 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4875 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4876 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4877 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4878 err = mgmt_cmd_complete(sk, hdev->id,
4879 MGMT_OP_SET_EXP_FEATURE, 0,
4883 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4889 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4890 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4892 struct mgmt_rp_set_exp_feature rp;
4893 bool val, changed = false;
4896 /* Command requires to use the non-controller index */
4898 return mgmt_cmd_status(sk, hdev->id,
4899 MGMT_OP_SET_EXP_FEATURE,
4900 MGMT_STATUS_INVALID_INDEX);
4902 /* Parameters are limited to a single octet */
4903 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4904 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4905 MGMT_OP_SET_EXP_FEATURE,
4906 MGMT_STATUS_INVALID_PARAMS);
4908 /* Only boolean on/off is supported */
4909 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4910 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4911 MGMT_OP_SET_EXP_FEATURE,
4912 MGMT_STATUS_INVALID_PARAMS);
4914 val = cp->param[0] ? true : false;
4923 memcpy(rp.uuid, iso_socket_uuid, 16);
4924 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4926 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4928 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4929 MGMT_OP_SET_EXP_FEATURE, 0,
4933 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4939 static const struct mgmt_exp_feature {
4941 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4942 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4943 } exp_features[] = {
4944 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4945 #ifdef CONFIG_BT_FEATURE_DEBUG
4946 EXP_FEAT(debug_uuid, set_debug_func),
4948 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4949 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4950 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4951 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4952 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4954 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4957 /* end with a null feature */
4958 EXP_FEAT(NULL, NULL)
4961 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4962 void *data, u16 data_len)
4964 struct mgmt_cp_set_exp_feature *cp = data;
4967 bt_dev_dbg(hdev, "sock %p", sk);
4969 for (i = 0; exp_features[i].uuid; i++) {
4970 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4971 return exp_features[i].set_func(sk, hdev, cp, data_len);
4974 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4975 MGMT_OP_SET_EXP_FEATURE,
4976 MGMT_STATUS_NOT_SUPPORTED);
4979 static u32 get_params_flags(struct hci_dev *hdev,
4980 struct hci_conn_params *params)
4982 u32 flags = hdev->conn_flags;
4984 /* Devices using RPAs can only be programmed in the acceptlist if
4985 * LL Privacy has been enable otherwise they cannot mark
4986 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4988 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4989 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
4990 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4995 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4998 struct mgmt_cp_get_device_flags *cp = data;
4999 struct mgmt_rp_get_device_flags rp;
5000 struct bdaddr_list_with_flags *br_params;
5001 struct hci_conn_params *params;
5002 u32 supported_flags;
5003 u32 current_flags = 0;
5004 u8 status = MGMT_STATUS_INVALID_PARAMS;
5006 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5007 &cp->addr.bdaddr, cp->addr.type);
5011 supported_flags = hdev->conn_flags;
5013 memset(&rp, 0, sizeof(rp));
5015 if (cp->addr.type == BDADDR_BREDR) {
5016 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5022 current_flags = br_params->flags;
5024 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5025 le_addr_type(cp->addr.type));
5029 supported_flags = get_params_flags(hdev, params);
5030 current_flags = params->flags;
5033 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5034 rp.addr.type = cp->addr.type;
5035 rp.supported_flags = cpu_to_le32(supported_flags);
5036 rp.current_flags = cpu_to_le32(current_flags);
5038 status = MGMT_STATUS_SUCCESS;
5041 hci_dev_unlock(hdev);
5043 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5047 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5048 bdaddr_t *bdaddr, u8 bdaddr_type,
5049 u32 supported_flags, u32 current_flags)
5051 struct mgmt_ev_device_flags_changed ev;
5053 bacpy(&ev.addr.bdaddr, bdaddr);
5054 ev.addr.type = bdaddr_type;
5055 ev.supported_flags = cpu_to_le32(supported_flags);
5056 ev.current_flags = cpu_to_le32(current_flags);
5058 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5061 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5064 struct mgmt_cp_set_device_flags *cp = data;
5065 struct bdaddr_list_with_flags *br_params;
5066 struct hci_conn_params *params;
5067 u8 status = MGMT_STATUS_INVALID_PARAMS;
5068 u32 supported_flags;
5069 u32 current_flags = __le32_to_cpu(cp->current_flags);
5071 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5072 &cp->addr.bdaddr, cp->addr.type, current_flags);
5074 // We should take hci_dev_lock() early, I think.. conn_flags can change
5075 supported_flags = hdev->conn_flags;
5077 if ((supported_flags | current_flags) != supported_flags) {
5078 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5079 current_flags, supported_flags);
5085 if (cp->addr.type == BDADDR_BREDR) {
5086 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5091 br_params->flags = current_flags;
5092 status = MGMT_STATUS_SUCCESS;
5094 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5095 &cp->addr.bdaddr, cp->addr.type);
5101 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5102 le_addr_type(cp->addr.type));
5104 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5105 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5109 supported_flags = get_params_flags(hdev, params);
5111 if ((supported_flags | current_flags) != supported_flags) {
5112 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5113 current_flags, supported_flags);
5117 WRITE_ONCE(params->flags, current_flags);
5118 status = MGMT_STATUS_SUCCESS;
5120 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5123 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5124 hci_update_passive_scan(hdev);
5127 hci_dev_unlock(hdev);
5130 if (status == MGMT_STATUS_SUCCESS)
5131 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5132 supported_flags, current_flags);
5134 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5135 &cp->addr, sizeof(cp->addr));
5138 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5141 struct mgmt_ev_adv_monitor_added ev;
5143 ev.monitor_handle = cpu_to_le16(handle);
5145 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5148 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5150 struct mgmt_ev_adv_monitor_removed ev;
5151 struct mgmt_pending_cmd *cmd;
5152 struct sock *sk_skip = NULL;
5153 struct mgmt_cp_remove_adv_monitor *cp;
5155 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5159 if (cp->monitor_handle)
5163 ev.monitor_handle = cpu_to_le16(handle);
5165 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5168 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5169 void *data, u16 len)
5171 struct adv_monitor *monitor = NULL;
5172 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5175 __u32 supported = 0;
5177 __u16 num_handles = 0;
5178 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5180 BT_DBG("request for %s", hdev->name);
5184 if (msft_monitor_supported(hdev))
5185 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5187 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5188 handles[num_handles++] = monitor->handle;
5190 hci_dev_unlock(hdev);
5192 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5193 rp = kmalloc(rp_size, GFP_KERNEL);
5197 /* All supported features are currently enabled */
5198 enabled = supported;
5200 rp->supported_features = cpu_to_le32(supported);
5201 rp->enabled_features = cpu_to_le32(enabled);
5202 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5203 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5204 rp->num_handles = cpu_to_le16(num_handles);
5206 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5208 err = mgmt_cmd_complete(sk, hdev->id,
5209 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5210 MGMT_STATUS_SUCCESS, rp, rp_size);
5217 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5218 void *data, int status)
5220 struct mgmt_rp_add_adv_patterns_monitor rp;
5221 struct mgmt_pending_cmd *cmd = data;
5222 struct adv_monitor *monitor = cmd->user_data;
5226 rp.monitor_handle = cpu_to_le16(monitor->handle);
5229 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5230 hdev->adv_monitors_cnt++;
5231 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5232 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5233 hci_update_passive_scan(hdev);
5236 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5237 mgmt_status(status), &rp, sizeof(rp));
5238 mgmt_pending_remove(cmd);
5240 hci_dev_unlock(hdev);
5241 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5242 rp.monitor_handle, status);
5245 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5247 struct mgmt_pending_cmd *cmd = data;
5248 struct adv_monitor *monitor = cmd->user_data;
5250 return hci_add_adv_monitor(hdev, monitor);
5253 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5254 struct adv_monitor *m, u8 status,
5255 void *data, u16 len, u16 op)
5257 struct mgmt_pending_cmd *cmd;
5265 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5266 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5267 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5268 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5269 status = MGMT_STATUS_BUSY;
5273 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5275 status = MGMT_STATUS_NO_RESOURCES;
5280 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5281 mgmt_add_adv_patterns_monitor_complete);
5284 status = MGMT_STATUS_NO_RESOURCES;
5286 status = MGMT_STATUS_FAILED;
5291 hci_dev_unlock(hdev);
5296 hci_free_adv_monitor(hdev, m);
5297 hci_dev_unlock(hdev);
5298 return mgmt_cmd_status(sk, hdev->id, op, status);
5301 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5302 struct mgmt_adv_rssi_thresholds *rssi)
5305 m->rssi.low_threshold = rssi->low_threshold;
5306 m->rssi.low_threshold_timeout =
5307 __le16_to_cpu(rssi->low_threshold_timeout);
5308 m->rssi.high_threshold = rssi->high_threshold;
5309 m->rssi.high_threshold_timeout =
5310 __le16_to_cpu(rssi->high_threshold_timeout);
5311 m->rssi.sampling_period = rssi->sampling_period;
5313 /* Default values. These numbers are the least constricting
5314 * parameters for MSFT API to work, so it behaves as if there
5315 * are no rssi parameter to consider. May need to be changed
5316 * if other API are to be supported.
5318 m->rssi.low_threshold = -127;
5319 m->rssi.low_threshold_timeout = 60;
5320 m->rssi.high_threshold = -127;
5321 m->rssi.high_threshold_timeout = 0;
5322 m->rssi.sampling_period = 0;
5326 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5327 struct mgmt_adv_pattern *patterns)
5329 u8 offset = 0, length = 0;
5330 struct adv_pattern *p = NULL;
5333 for (i = 0; i < pattern_count; i++) {
5334 offset = patterns[i].offset;
5335 length = patterns[i].length;
5336 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5337 length > HCI_MAX_EXT_AD_LENGTH ||
5338 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5339 return MGMT_STATUS_INVALID_PARAMS;
5341 p = kmalloc(sizeof(*p), GFP_KERNEL);
5343 return MGMT_STATUS_NO_RESOURCES;
5345 p->ad_type = patterns[i].ad_type;
5346 p->offset = patterns[i].offset;
5347 p->length = patterns[i].length;
5348 memcpy(p->value, patterns[i].value, p->length);
5350 INIT_LIST_HEAD(&p->list);
5351 list_add(&p->list, &m->patterns);
5354 return MGMT_STATUS_SUCCESS;
5357 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5358 void *data, u16 len)
5360 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5361 struct adv_monitor *m = NULL;
5362 u8 status = MGMT_STATUS_SUCCESS;
5363 size_t expected_size = sizeof(*cp);
5365 BT_DBG("request for %s", hdev->name);
5367 if (len <= sizeof(*cp)) {
5368 status = MGMT_STATUS_INVALID_PARAMS;
5372 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5373 if (len != expected_size) {
5374 status = MGMT_STATUS_INVALID_PARAMS;
5378 m = kzalloc(sizeof(*m), GFP_KERNEL);
5380 status = MGMT_STATUS_NO_RESOURCES;
5384 INIT_LIST_HEAD(&m->patterns);
5386 parse_adv_monitor_rssi(m, NULL);
5387 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5390 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5391 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5394 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5395 void *data, u16 len)
5397 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5398 struct adv_monitor *m = NULL;
5399 u8 status = MGMT_STATUS_SUCCESS;
5400 size_t expected_size = sizeof(*cp);
5402 BT_DBG("request for %s", hdev->name);
5404 if (len <= sizeof(*cp)) {
5405 status = MGMT_STATUS_INVALID_PARAMS;
5409 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5410 if (len != expected_size) {
5411 status = MGMT_STATUS_INVALID_PARAMS;
5415 m = kzalloc(sizeof(*m), GFP_KERNEL);
5417 status = MGMT_STATUS_NO_RESOURCES;
5421 INIT_LIST_HEAD(&m->patterns);
5423 parse_adv_monitor_rssi(m, &cp->rssi);
5424 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5427 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5428 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5431 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5432 void *data, int status)
5434 struct mgmt_rp_remove_adv_monitor rp;
5435 struct mgmt_pending_cmd *cmd = data;
5436 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5440 rp.monitor_handle = cp->monitor_handle;
5443 hci_update_passive_scan(hdev);
5445 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5446 mgmt_status(status), &rp, sizeof(rp));
5447 mgmt_pending_remove(cmd);
5449 hci_dev_unlock(hdev);
5450 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5451 rp.monitor_handle, status);
5454 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5456 struct mgmt_pending_cmd *cmd = data;
5457 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5458 u16 handle = __le16_to_cpu(cp->monitor_handle);
5461 return hci_remove_all_adv_monitor(hdev);
5463 return hci_remove_single_adv_monitor(hdev, handle);
5466 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5467 void *data, u16 len)
5469 struct mgmt_pending_cmd *cmd;
5474 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5475 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5476 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5477 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5478 status = MGMT_STATUS_BUSY;
5482 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5484 status = MGMT_STATUS_NO_RESOURCES;
5488 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5489 mgmt_remove_adv_monitor_complete);
5492 mgmt_pending_remove(cmd);
5495 status = MGMT_STATUS_NO_RESOURCES;
5497 status = MGMT_STATUS_FAILED;
5502 hci_dev_unlock(hdev);
5507 hci_dev_unlock(hdev);
5508 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5512 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5514 struct mgmt_rp_read_local_oob_data mgmt_rp;
5515 size_t rp_size = sizeof(mgmt_rp);
5516 struct mgmt_pending_cmd *cmd = data;
5517 struct sk_buff *skb = cmd->skb;
5518 u8 status = mgmt_status(err);
5522 status = MGMT_STATUS_FAILED;
5523 else if (IS_ERR(skb))
5524 status = mgmt_status(PTR_ERR(skb));
5526 status = mgmt_status(skb->data[0]);
5529 bt_dev_dbg(hdev, "status %d", status);
5532 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5536 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5538 if (!bredr_sc_enabled(hdev)) {
5539 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5541 if (skb->len < sizeof(*rp)) {
5542 mgmt_cmd_status(cmd->sk, hdev->id,
5543 MGMT_OP_READ_LOCAL_OOB_DATA,
5544 MGMT_STATUS_FAILED);
5548 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5549 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5551 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5553 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5555 if (skb->len < sizeof(*rp)) {
5556 mgmt_cmd_status(cmd->sk, hdev->id,
5557 MGMT_OP_READ_LOCAL_OOB_DATA,
5558 MGMT_STATUS_FAILED);
5562 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5563 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5565 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5566 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5569 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5570 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5573 if (skb && !IS_ERR(skb))
5576 mgmt_pending_free(cmd);
5579 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5581 struct mgmt_pending_cmd *cmd = data;
5583 if (bredr_sc_enabled(hdev))
5584 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5586 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5588 if (IS_ERR(cmd->skb))
5589 return PTR_ERR(cmd->skb);
5594 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5595 void *data, u16 data_len)
5597 struct mgmt_pending_cmd *cmd;
5600 bt_dev_dbg(hdev, "sock %p", sk);
5604 if (!hdev_is_powered(hdev)) {
5605 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5606 MGMT_STATUS_NOT_POWERED);
5610 if (!lmp_ssp_capable(hdev)) {
5611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5612 MGMT_STATUS_NOT_SUPPORTED);
5616 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5620 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5621 read_local_oob_data_complete);
5624 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5625 MGMT_STATUS_FAILED);
5628 mgmt_pending_free(cmd);
5632 hci_dev_unlock(hdev);
5636 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5637 void *data, u16 len)
5639 struct mgmt_addr_info *addr = data;
5642 bt_dev_dbg(hdev, "sock %p", sk);
5644 if (!bdaddr_type_is_valid(addr->type))
5645 return mgmt_cmd_complete(sk, hdev->id,
5646 MGMT_OP_ADD_REMOTE_OOB_DATA,
5647 MGMT_STATUS_INVALID_PARAMS,
5648 addr, sizeof(*addr));
5652 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5653 struct mgmt_cp_add_remote_oob_data *cp = data;
5656 if (cp->addr.type != BDADDR_BREDR) {
5657 err = mgmt_cmd_complete(sk, hdev->id,
5658 MGMT_OP_ADD_REMOTE_OOB_DATA,
5659 MGMT_STATUS_INVALID_PARAMS,
5660 &cp->addr, sizeof(cp->addr));
5664 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5665 cp->addr.type, cp->hash,
5666 cp->rand, NULL, NULL);
5668 status = MGMT_STATUS_FAILED;
5670 status = MGMT_STATUS_SUCCESS;
5672 err = mgmt_cmd_complete(sk, hdev->id,
5673 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5674 &cp->addr, sizeof(cp->addr));
5675 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5676 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5677 u8 *rand192, *hash192, *rand256, *hash256;
5680 if (bdaddr_type_is_le(cp->addr.type)) {
5681 /* Enforce zero-valued 192-bit parameters as
5682 * long as legacy SMP OOB isn't implemented.
5684 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5685 memcmp(cp->hash192, ZERO_KEY, 16)) {
5686 err = mgmt_cmd_complete(sk, hdev->id,
5687 MGMT_OP_ADD_REMOTE_OOB_DATA,
5688 MGMT_STATUS_INVALID_PARAMS,
5689 addr, sizeof(*addr));
5696 /* In case one of the P-192 values is set to zero,
5697 * then just disable OOB data for P-192.
5699 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5700 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5704 rand192 = cp->rand192;
5705 hash192 = cp->hash192;
5709 /* In case one of the P-256 values is set to zero, then just
5710 * disable OOB data for P-256.
5712 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5713 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5717 rand256 = cp->rand256;
5718 hash256 = cp->hash256;
5721 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5722 cp->addr.type, hash192, rand192,
5725 status = MGMT_STATUS_FAILED;
5727 status = MGMT_STATUS_SUCCESS;
5729 err = mgmt_cmd_complete(sk, hdev->id,
5730 MGMT_OP_ADD_REMOTE_OOB_DATA,
5731 status, &cp->addr, sizeof(cp->addr));
5733 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5735 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5736 MGMT_STATUS_INVALID_PARAMS);
5740 hci_dev_unlock(hdev);
5744 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5745 void *data, u16 len)
5747 struct mgmt_cp_remove_remote_oob_data *cp = data;
5751 bt_dev_dbg(hdev, "sock %p", sk);
5753 if (cp->addr.type != BDADDR_BREDR)
5754 return mgmt_cmd_complete(sk, hdev->id,
5755 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5756 MGMT_STATUS_INVALID_PARAMS,
5757 &cp->addr, sizeof(cp->addr));
5761 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5762 hci_remote_oob_data_clear(hdev);
5763 status = MGMT_STATUS_SUCCESS;
5767 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5769 status = MGMT_STATUS_INVALID_PARAMS;
5771 status = MGMT_STATUS_SUCCESS;
5774 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5775 status, &cp->addr, sizeof(cp->addr));
5777 hci_dev_unlock(hdev);
5781 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5783 struct mgmt_pending_cmd *cmd;
5785 bt_dev_dbg(hdev, "status %u", status);
5789 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5791 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5794 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5797 cmd->cmd_complete(cmd, mgmt_status(status));
5798 mgmt_pending_remove(cmd);
5801 hci_dev_unlock(hdev);
5804 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5805 uint8_t *mgmt_status)
5808 case DISCOV_TYPE_LE:
5809 *mgmt_status = mgmt_le_support(hdev);
5813 case DISCOV_TYPE_INTERLEAVED:
5814 *mgmt_status = mgmt_le_support(hdev);
5818 case DISCOV_TYPE_BREDR:
5819 *mgmt_status = mgmt_bredr_support(hdev);
5824 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5831 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5833 struct mgmt_pending_cmd *cmd = data;
5835 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5836 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5837 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5840 bt_dev_dbg(hdev, "err %d", err);
5842 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5844 mgmt_pending_remove(cmd);
5846 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5850 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5852 return hci_start_discovery_sync(hdev);
5855 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5856 u16 op, void *data, u16 len)
5858 struct mgmt_cp_start_discovery *cp = data;
5859 struct mgmt_pending_cmd *cmd;
5863 bt_dev_dbg(hdev, "sock %p", sk);
5867 if (!hdev_is_powered(hdev)) {
5868 err = mgmt_cmd_complete(sk, hdev->id, op,
5869 MGMT_STATUS_NOT_POWERED,
5870 &cp->type, sizeof(cp->type));
5874 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5875 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5876 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5877 &cp->type, sizeof(cp->type));
5881 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5882 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5883 &cp->type, sizeof(cp->type));
5887 /* Can't start discovery when it is paused */
5888 if (hdev->discovery_paused) {
5889 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5890 &cp->type, sizeof(cp->type));
5894 /* Clear the discovery filter first to free any previously
5895 * allocated memory for the UUID list.
5897 hci_discovery_filter_clear(hdev);
5899 hdev->discovery.type = cp->type;
5900 hdev->discovery.report_invalid_rssi = false;
5901 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5902 hdev->discovery.limited = true;
5904 hdev->discovery.limited = false;
5906 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5912 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5913 start_discovery_complete);
5915 mgmt_pending_remove(cmd);
5919 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5922 hci_dev_unlock(hdev);
5926 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5927 void *data, u16 len)
5929 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5933 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5934 void *data, u16 len)
5936 return start_discovery_internal(sk, hdev,
5937 MGMT_OP_START_LIMITED_DISCOVERY,
5941 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5942 void *data, u16 len)
5944 struct mgmt_cp_start_service_discovery *cp = data;
5945 struct mgmt_pending_cmd *cmd;
5946 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5947 u16 uuid_count, expected_len;
5951 bt_dev_dbg(hdev, "sock %p", sk);
5955 if (!hdev_is_powered(hdev)) {
5956 err = mgmt_cmd_complete(sk, hdev->id,
5957 MGMT_OP_START_SERVICE_DISCOVERY,
5958 MGMT_STATUS_NOT_POWERED,
5959 &cp->type, sizeof(cp->type));
5963 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5964 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5965 err = mgmt_cmd_complete(sk, hdev->id,
5966 MGMT_OP_START_SERVICE_DISCOVERY,
5967 MGMT_STATUS_BUSY, &cp->type,
5972 if (hdev->discovery_paused) {
5973 err = mgmt_cmd_complete(sk, hdev->id,
5974 MGMT_OP_START_SERVICE_DISCOVERY,
5975 MGMT_STATUS_BUSY, &cp->type,
5980 uuid_count = __le16_to_cpu(cp->uuid_count);
5981 if (uuid_count > max_uuid_count) {
5982 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5984 err = mgmt_cmd_complete(sk, hdev->id,
5985 MGMT_OP_START_SERVICE_DISCOVERY,
5986 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5991 expected_len = sizeof(*cp) + uuid_count * 16;
5992 if (expected_len != len) {
5993 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5995 err = mgmt_cmd_complete(sk, hdev->id,
5996 MGMT_OP_START_SERVICE_DISCOVERY,
5997 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6002 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6003 err = mgmt_cmd_complete(sk, hdev->id,
6004 MGMT_OP_START_SERVICE_DISCOVERY,
6005 status, &cp->type, sizeof(cp->type));
6009 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6016 /* Clear the discovery filter first to free any previously
6017 * allocated memory for the UUID list.
6019 hci_discovery_filter_clear(hdev);
6021 hdev->discovery.result_filtering = true;
6022 hdev->discovery.type = cp->type;
6023 hdev->discovery.rssi = cp->rssi;
6024 hdev->discovery.uuid_count = uuid_count;
6026 if (uuid_count > 0) {
6027 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6029 if (!hdev->discovery.uuids) {
6030 err = mgmt_cmd_complete(sk, hdev->id,
6031 MGMT_OP_START_SERVICE_DISCOVERY,
6033 &cp->type, sizeof(cp->type));
6034 mgmt_pending_remove(cmd);
6039 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6040 start_discovery_complete);
6042 mgmt_pending_remove(cmd);
6046 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6049 hci_dev_unlock(hdev);
6053 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6055 struct mgmt_pending_cmd *cmd;
6057 bt_dev_dbg(hdev, "status %u", status);
6061 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6063 cmd->cmd_complete(cmd, mgmt_status(status));
6064 mgmt_pending_remove(cmd);
6067 hci_dev_unlock(hdev);
6070 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6072 struct mgmt_pending_cmd *cmd = data;
6074 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6077 bt_dev_dbg(hdev, "err %d", err);
6079 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6081 mgmt_pending_remove(cmd);
6084 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6087 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6089 return hci_stop_discovery_sync(hdev);
6092 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6095 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6096 struct mgmt_pending_cmd *cmd;
6099 bt_dev_dbg(hdev, "sock %p", sk);
6103 if (!hci_discovery_active(hdev)) {
6104 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6105 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6106 sizeof(mgmt_cp->type));
6110 if (hdev->discovery.type != mgmt_cp->type) {
6111 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6112 MGMT_STATUS_INVALID_PARAMS,
6113 &mgmt_cp->type, sizeof(mgmt_cp->type));
6117 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6123 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6124 stop_discovery_complete);
6126 mgmt_pending_remove(cmd);
6130 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6133 hci_dev_unlock(hdev);
6137 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6140 struct mgmt_cp_confirm_name *cp = data;
6141 struct inquiry_entry *e;
6144 bt_dev_dbg(hdev, "sock %p", sk);
6148 if (!hci_discovery_active(hdev)) {
6149 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6150 MGMT_STATUS_FAILED, &cp->addr,
6155 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6158 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6163 if (cp->name_known) {
6164 e->name_state = NAME_KNOWN;
6167 e->name_state = NAME_NEEDED;
6168 hci_inquiry_cache_update_resolve(hdev, e);
6171 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6172 &cp->addr, sizeof(cp->addr));
6175 hci_dev_unlock(hdev);
6179 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6182 struct mgmt_cp_block_device *cp = data;
6186 bt_dev_dbg(hdev, "sock %p", sk);
6188 if (!bdaddr_type_is_valid(cp->addr.type))
6189 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6190 MGMT_STATUS_INVALID_PARAMS,
6191 &cp->addr, sizeof(cp->addr));
6195 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6198 status = MGMT_STATUS_FAILED;
6202 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6204 status = MGMT_STATUS_SUCCESS;
6207 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6208 &cp->addr, sizeof(cp->addr));
6210 hci_dev_unlock(hdev);
6215 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6218 struct mgmt_cp_unblock_device *cp = data;
6222 bt_dev_dbg(hdev, "sock %p", sk);
6224 if (!bdaddr_type_is_valid(cp->addr.type))
6225 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6226 MGMT_STATUS_INVALID_PARAMS,
6227 &cp->addr, sizeof(cp->addr));
6231 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6234 status = MGMT_STATUS_INVALID_PARAMS;
6238 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6240 status = MGMT_STATUS_SUCCESS;
6243 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6244 &cp->addr, sizeof(cp->addr));
6246 hci_dev_unlock(hdev);
6251 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6253 return hci_update_eir_sync(hdev);
6256 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6259 struct mgmt_cp_set_device_id *cp = data;
6263 bt_dev_dbg(hdev, "sock %p", sk);
6265 source = __le16_to_cpu(cp->source);
6267 if (source > 0x0002)
6268 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6269 MGMT_STATUS_INVALID_PARAMS);
6273 hdev->devid_source = source;
6274 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6275 hdev->devid_product = __le16_to_cpu(cp->product);
6276 hdev->devid_version = __le16_to_cpu(cp->version);
6278 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6281 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6283 hci_dev_unlock(hdev);
6288 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6291 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6293 bt_dev_dbg(hdev, "status %d", err);
6296 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6298 struct cmd_lookup match = { NULL, hdev };
6300 struct adv_info *adv_instance;
6301 u8 status = mgmt_status(err);
6304 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6305 cmd_status_rsp, &status);
6309 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6310 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6312 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6314 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6317 new_settings(hdev, match.sk);
6322 /* If "Set Advertising" was just disabled and instance advertising was
6323 * set up earlier, then re-enable multi-instance advertising.
6325 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6326 list_empty(&hdev->adv_instances))
6329 instance = hdev->cur_adv_instance;
6331 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6332 struct adv_info, list);
6336 instance = adv_instance->instance;
6339 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6341 enable_advertising_instance(hdev, err);
6344 static int set_adv_sync(struct hci_dev *hdev, void *data)
6346 struct mgmt_pending_cmd *cmd = data;
6347 struct mgmt_mode *cp = cmd->param;
6350 if (cp->val == 0x02)
6351 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6353 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6355 cancel_adv_timeout(hdev);
6358 /* Switch to instance "0" for the Set Advertising setting.
6359 * We cannot use update_[adv|scan_rsp]_data() here as the
6360 * HCI_ADVERTISING flag is not yet set.
6362 hdev->cur_adv_instance = 0x00;
6364 if (ext_adv_capable(hdev)) {
6365 hci_start_ext_adv_sync(hdev, 0x00);
6367 hci_update_adv_data_sync(hdev, 0x00);
6368 hci_update_scan_rsp_data_sync(hdev, 0x00);
6369 hci_enable_advertising_sync(hdev);
6372 hci_disable_advertising_sync(hdev);
6378 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6381 struct mgmt_mode *cp = data;
6382 struct mgmt_pending_cmd *cmd;
6386 bt_dev_dbg(hdev, "sock %p", sk);
6388 status = mgmt_le_support(hdev);
6390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6395 MGMT_STATUS_INVALID_PARAMS);
6397 if (hdev->advertising_paused)
6398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6405 /* The following conditions are ones which mean that we should
6406 * not do any HCI communication but directly send a mgmt
6407 * response to user space (after toggling the flag if
6410 if (!hdev_is_powered(hdev) ||
6411 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6412 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6413 hci_dev_test_flag(hdev, HCI_MESH) ||
6414 hci_conn_num(hdev, LE_LINK) > 0 ||
6415 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6416 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6420 hdev->cur_adv_instance = 0x00;
6421 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6422 if (cp->val == 0x02)
6423 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6425 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6427 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6428 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6431 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6436 err = new_settings(hdev, sk);
6441 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6442 pending_find(MGMT_OP_SET_LE, hdev)) {
6443 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6448 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6452 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6453 set_advertising_complete);
6456 mgmt_pending_remove(cmd);
6459 hci_dev_unlock(hdev);
6463 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6464 void *data, u16 len)
6466 struct mgmt_cp_set_static_address *cp = data;
6469 bt_dev_dbg(hdev, "sock %p", sk);
6471 if (!lmp_le_capable(hdev))
6472 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6473 MGMT_STATUS_NOT_SUPPORTED);
6475 if (hdev_is_powered(hdev))
6476 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6477 MGMT_STATUS_REJECTED);
6479 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6480 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6481 return mgmt_cmd_status(sk, hdev->id,
6482 MGMT_OP_SET_STATIC_ADDRESS,
6483 MGMT_STATUS_INVALID_PARAMS);
6485 /* Two most significant bits shall be set */
6486 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6487 return mgmt_cmd_status(sk, hdev->id,
6488 MGMT_OP_SET_STATIC_ADDRESS,
6489 MGMT_STATUS_INVALID_PARAMS);
6494 bacpy(&hdev->static_addr, &cp->bdaddr);
6496 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6500 err = new_settings(hdev, sk);
6503 hci_dev_unlock(hdev);
6507 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6508 void *data, u16 len)
6510 struct mgmt_cp_set_scan_params *cp = data;
6511 __u16 interval, window;
6514 bt_dev_dbg(hdev, "sock %p", sk);
6516 if (!lmp_le_capable(hdev))
6517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6518 MGMT_STATUS_NOT_SUPPORTED);
6520 interval = __le16_to_cpu(cp->interval);
6522 if (interval < 0x0004 || interval > 0x4000)
6523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6524 MGMT_STATUS_INVALID_PARAMS);
6526 window = __le16_to_cpu(cp->window);
6528 if (window < 0x0004 || window > 0x4000)
6529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6530 MGMT_STATUS_INVALID_PARAMS);
6532 if (window > interval)
6533 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6534 MGMT_STATUS_INVALID_PARAMS);
6538 hdev->le_scan_interval = interval;
6539 hdev->le_scan_window = window;
6541 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6544 /* If background scan is running, restart it so new parameters are
6547 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6548 hdev->discovery.state == DISCOVERY_STOPPED)
6549 hci_update_passive_scan(hdev);
6551 hci_dev_unlock(hdev);
6556 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6558 struct mgmt_pending_cmd *cmd = data;
6560 bt_dev_dbg(hdev, "err %d", err);
6563 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6566 struct mgmt_mode *cp = cmd->param;
6569 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6571 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6573 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6574 new_settings(hdev, cmd->sk);
6577 mgmt_pending_free(cmd);
6580 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6582 struct mgmt_pending_cmd *cmd = data;
6583 struct mgmt_mode *cp = cmd->param;
6585 return hci_write_fast_connectable_sync(hdev, cp->val);
6588 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6589 void *data, u16 len)
6591 struct mgmt_mode *cp = data;
6592 struct mgmt_pending_cmd *cmd;
6595 bt_dev_dbg(hdev, "sock %p", sk);
6597 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6598 hdev->hci_ver < BLUETOOTH_VER_1_2)
6599 return mgmt_cmd_status(sk, hdev->id,
6600 MGMT_OP_SET_FAST_CONNECTABLE,
6601 MGMT_STATUS_NOT_SUPPORTED);
6603 if (cp->val != 0x00 && cp->val != 0x01)
6604 return mgmt_cmd_status(sk, hdev->id,
6605 MGMT_OP_SET_FAST_CONNECTABLE,
6606 MGMT_STATUS_INVALID_PARAMS);
6610 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6611 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6615 if (!hdev_is_powered(hdev)) {
6616 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6617 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6618 new_settings(hdev, sk);
6622 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6627 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6628 fast_connectable_complete);
6631 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6632 MGMT_STATUS_FAILED);
6635 mgmt_pending_free(cmd);
6639 hci_dev_unlock(hdev);
6644 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6646 struct mgmt_pending_cmd *cmd = data;
6648 bt_dev_dbg(hdev, "err %d", err);
6651 u8 mgmt_err = mgmt_status(err);
6653 /* We need to restore the flag if related HCI commands
6656 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6658 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6660 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6661 new_settings(hdev, cmd->sk);
6664 mgmt_pending_free(cmd);
6667 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6671 status = hci_write_fast_connectable_sync(hdev, false);
6674 status = hci_update_scan_sync(hdev);
6676 /* Since only the advertising data flags will change, there
6677 * is no need to update the scan response data.
6680 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6685 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6687 struct mgmt_mode *cp = data;
6688 struct mgmt_pending_cmd *cmd;
6691 bt_dev_dbg(hdev, "sock %p", sk);
6693 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6694 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6695 MGMT_STATUS_NOT_SUPPORTED);
6697 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6698 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6699 MGMT_STATUS_REJECTED);
6701 if (cp->val != 0x00 && cp->val != 0x01)
6702 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6703 MGMT_STATUS_INVALID_PARAMS);
6707 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6708 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6712 if (!hdev_is_powered(hdev)) {
6714 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6715 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6716 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6717 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6720 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6722 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6726 err = new_settings(hdev, sk);
6730 /* Reject disabling when powered on */
6732 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6733 MGMT_STATUS_REJECTED);
6736 /* When configuring a dual-mode controller to operate
6737 * with LE only and using a static address, then switching
6738 * BR/EDR back on is not allowed.
6740 * Dual-mode controllers shall operate with the public
6741 * address as its identity address for BR/EDR and LE. So
6742 * reject the attempt to create an invalid configuration.
6744 * The same restrictions applies when secure connections
6745 * has been enabled. For BR/EDR this is a controller feature
6746 * while for LE it is a host stack feature. This means that
6747 * switching BR/EDR back on when secure connections has been
6748 * enabled is not a supported transaction.
6750 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6751 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6752 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754 MGMT_STATUS_REJECTED);
6759 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6763 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6764 set_bredr_complete);
6767 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6768 MGMT_STATUS_FAILED);
6770 mgmt_pending_free(cmd);
6775 /* We need to flip the bit already here so that
6776 * hci_req_update_adv_data generates the correct flags.
6778 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6781 hci_dev_unlock(hdev);
6785 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6787 struct mgmt_pending_cmd *cmd = data;
6788 struct mgmt_mode *cp;
6790 bt_dev_dbg(hdev, "err %d", err);
6793 u8 mgmt_err = mgmt_status(err);
6795 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6803 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6804 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6807 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6808 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6811 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6812 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6816 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6817 new_settings(hdev, cmd->sk);
6820 mgmt_pending_free(cmd);
6823 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6825 struct mgmt_pending_cmd *cmd = data;
6826 struct mgmt_mode *cp = cmd->param;
6829 /* Force write of val */
6830 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6832 return hci_write_sc_support_sync(hdev, val);
6835 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6836 void *data, u16 len)
6838 struct mgmt_mode *cp = data;
6839 struct mgmt_pending_cmd *cmd;
6843 bt_dev_dbg(hdev, "sock %p", sk);
6845 if (!lmp_sc_capable(hdev) &&
6846 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6848 MGMT_STATUS_NOT_SUPPORTED);
6850 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6851 lmp_sc_capable(hdev) &&
6852 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6853 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6854 MGMT_STATUS_REJECTED);
6856 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6858 MGMT_STATUS_INVALID_PARAMS);
6862 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6863 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6867 changed = !hci_dev_test_and_set_flag(hdev,
6869 if (cp->val == 0x02)
6870 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6872 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6874 changed = hci_dev_test_and_clear_flag(hdev,
6876 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6879 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6884 err = new_settings(hdev, sk);
6891 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6892 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6893 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6897 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6901 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6902 set_secure_conn_complete);
6905 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6906 MGMT_STATUS_FAILED);
6908 mgmt_pending_free(cmd);
6912 hci_dev_unlock(hdev);
6916 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6917 void *data, u16 len)
6919 struct mgmt_mode *cp = data;
6920 bool changed, use_changed;
6923 bt_dev_dbg(hdev, "sock %p", sk);
6925 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6926 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6927 MGMT_STATUS_INVALID_PARAMS);
6932 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6934 changed = hci_dev_test_and_clear_flag(hdev,
6935 HCI_KEEP_DEBUG_KEYS);
6937 if (cp->val == 0x02)
6938 use_changed = !hci_dev_test_and_set_flag(hdev,
6939 HCI_USE_DEBUG_KEYS);
6941 use_changed = hci_dev_test_and_clear_flag(hdev,
6942 HCI_USE_DEBUG_KEYS);
6944 if (hdev_is_powered(hdev) && use_changed &&
6945 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6946 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6947 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6948 sizeof(mode), &mode);
6951 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6956 err = new_settings(hdev, sk);
6959 hci_dev_unlock(hdev);
6963 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6966 struct mgmt_cp_set_privacy *cp = cp_data;
6970 bt_dev_dbg(hdev, "sock %p", sk);
6972 if (!lmp_le_capable(hdev))
6973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6974 MGMT_STATUS_NOT_SUPPORTED);
6976 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6978 MGMT_STATUS_INVALID_PARAMS);
6980 if (hdev_is_powered(hdev))
6981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6982 MGMT_STATUS_REJECTED);
6986 /* If user space supports this command it is also expected to
6987 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6989 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6992 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6993 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6994 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6995 hci_adv_instances_set_rpa_expired(hdev, true);
6996 if (cp->privacy == 0x02)
6997 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6999 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7001 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7002 memset(hdev->irk, 0, sizeof(hdev->irk));
7003 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7004 hci_adv_instances_set_rpa_expired(hdev, false);
7005 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7008 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7013 err = new_settings(hdev, sk);
7016 hci_dev_unlock(hdev);
7020 static bool irk_is_valid(struct mgmt_irk_info *irk)
7022 switch (irk->addr.type) {
7023 case BDADDR_LE_PUBLIC:
7026 case BDADDR_LE_RANDOM:
7027 /* Two most significant bits shall be set */
7028 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7036 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7039 struct mgmt_cp_load_irks *cp = cp_data;
7040 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7041 sizeof(struct mgmt_irk_info));
7042 u16 irk_count, expected_len;
7045 bt_dev_dbg(hdev, "sock %p", sk);
7047 if (!lmp_le_capable(hdev))
7048 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7049 MGMT_STATUS_NOT_SUPPORTED);
7051 irk_count = __le16_to_cpu(cp->irk_count);
7052 if (irk_count > max_irk_count) {
7053 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7055 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7056 MGMT_STATUS_INVALID_PARAMS);
7059 expected_len = struct_size(cp, irks, irk_count);
7060 if (expected_len != len) {
7061 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7063 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7064 MGMT_STATUS_INVALID_PARAMS);
7067 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7069 for (i = 0; i < irk_count; i++) {
7070 struct mgmt_irk_info *key = &cp->irks[i];
7072 if (!irk_is_valid(key))
7073 return mgmt_cmd_status(sk, hdev->id,
7075 MGMT_STATUS_INVALID_PARAMS);
7080 hci_smp_irks_clear(hdev);
7082 for (i = 0; i < irk_count; i++) {
7083 struct mgmt_irk_info *irk = &cp->irks[i];
7084 u8 addr_type = le_addr_type(irk->addr.type);
7086 if (hci_is_blocked_key(hdev,
7087 HCI_BLOCKED_KEY_TYPE_IRK,
7089 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7094 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7095 if (irk->addr.type == BDADDR_BREDR)
7096 addr_type = BDADDR_BREDR;
7098 hci_add_irk(hdev, &irk->addr.bdaddr,
7099 addr_type, irk->val,
7103 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7105 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7107 hci_dev_unlock(hdev);
7112 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7114 if (key->initiator != 0x00 && key->initiator != 0x01)
7117 switch (key->addr.type) {
7118 case BDADDR_LE_PUBLIC:
7121 case BDADDR_LE_RANDOM:
7122 /* Two most significant bits shall be set */
7123 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7131 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7132 void *cp_data, u16 len)
7134 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7135 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7136 sizeof(struct mgmt_ltk_info));
7137 u16 key_count, expected_len;
7140 bt_dev_dbg(hdev, "sock %p", sk);
7142 if (!lmp_le_capable(hdev))
7143 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7144 MGMT_STATUS_NOT_SUPPORTED);
7146 key_count = __le16_to_cpu(cp->key_count);
7147 if (key_count > max_key_count) {
7148 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7150 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7151 MGMT_STATUS_INVALID_PARAMS);
7154 expected_len = struct_size(cp, keys, key_count);
7155 if (expected_len != len) {
7156 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7158 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7159 MGMT_STATUS_INVALID_PARAMS);
7162 bt_dev_dbg(hdev, "key_count %u", key_count);
7164 for (i = 0; i < key_count; i++) {
7165 struct mgmt_ltk_info *key = &cp->keys[i];
7167 if (!ltk_is_valid(key))
7168 return mgmt_cmd_status(sk, hdev->id,
7169 MGMT_OP_LOAD_LONG_TERM_KEYS,
7170 MGMT_STATUS_INVALID_PARAMS);
7175 hci_smp_ltks_clear(hdev);
7177 for (i = 0; i < key_count; i++) {
7178 struct mgmt_ltk_info *key = &cp->keys[i];
7179 u8 type, authenticated;
7180 u8 addr_type = le_addr_type(key->addr.type);
7182 if (hci_is_blocked_key(hdev,
7183 HCI_BLOCKED_KEY_TYPE_LTK,
7185 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7190 switch (key->type) {
7191 case MGMT_LTK_UNAUTHENTICATED:
7192 authenticated = 0x00;
7193 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7195 case MGMT_LTK_AUTHENTICATED:
7196 authenticated = 0x01;
7197 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7199 case MGMT_LTK_P256_UNAUTH:
7200 authenticated = 0x00;
7201 type = SMP_LTK_P256;
7203 case MGMT_LTK_P256_AUTH:
7204 authenticated = 0x01;
7205 type = SMP_LTK_P256;
7207 case MGMT_LTK_P256_DEBUG:
7208 authenticated = 0x00;
7209 type = SMP_LTK_P256_DEBUG;
7215 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7216 if (key->addr.type == BDADDR_BREDR)
7217 addr_type = BDADDR_BREDR;
7219 hci_add_ltk(hdev, &key->addr.bdaddr,
7220 addr_type, type, authenticated,
7221 key->val, key->enc_size, key->ediv, key->rand);
7224 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7227 hci_dev_unlock(hdev);
7232 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7234 struct mgmt_pending_cmd *cmd = data;
7235 struct hci_conn *conn = cmd->user_data;
7236 struct mgmt_cp_get_conn_info *cp = cmd->param;
7237 struct mgmt_rp_get_conn_info rp;
7240 bt_dev_dbg(hdev, "err %d", err);
7242 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7244 status = mgmt_status(err);
7245 if (status == MGMT_STATUS_SUCCESS) {
7246 rp.rssi = conn->rssi;
7247 rp.tx_power = conn->tx_power;
7248 rp.max_tx_power = conn->max_tx_power;
7250 rp.rssi = HCI_RSSI_INVALID;
7251 rp.tx_power = HCI_TX_POWER_INVALID;
7252 rp.max_tx_power = HCI_TX_POWER_INVALID;
7255 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7258 mgmt_pending_free(cmd);
7261 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7263 struct mgmt_pending_cmd *cmd = data;
7264 struct mgmt_cp_get_conn_info *cp = cmd->param;
7265 struct hci_conn *conn;
7269 /* Make sure we are still connected */
7270 if (cp->addr.type == BDADDR_BREDR)
7271 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7274 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7276 if (!conn || conn->state != BT_CONNECTED)
7277 return MGMT_STATUS_NOT_CONNECTED;
7279 cmd->user_data = conn;
7280 handle = cpu_to_le16(conn->handle);
7282 /* Refresh RSSI each time */
7283 err = hci_read_rssi_sync(hdev, handle);
7285 /* For LE links TX power does not change thus we don't need to
7286 * query for it once value is known.
7288 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7289 conn->tx_power == HCI_TX_POWER_INVALID))
7290 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7292 /* Max TX power needs to be read only once per connection */
7293 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7294 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7299 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7302 struct mgmt_cp_get_conn_info *cp = data;
7303 struct mgmt_rp_get_conn_info rp;
7304 struct hci_conn *conn;
7305 unsigned long conn_info_age;
7308 bt_dev_dbg(hdev, "sock %p", sk);
7310 memset(&rp, 0, sizeof(rp));
7311 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7312 rp.addr.type = cp->addr.type;
7314 if (!bdaddr_type_is_valid(cp->addr.type))
7315 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7316 MGMT_STATUS_INVALID_PARAMS,
7321 if (!hdev_is_powered(hdev)) {
7322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7323 MGMT_STATUS_NOT_POWERED, &rp,
7328 if (cp->addr.type == BDADDR_BREDR)
7329 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7332 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7334 if (!conn || conn->state != BT_CONNECTED) {
7335 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7336 MGMT_STATUS_NOT_CONNECTED, &rp,
7341 /* To avoid client trying to guess when to poll again for information we
7342 * calculate conn info age as random value between min/max set in hdev.
7344 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7345 hdev->conn_info_max_age - 1);
7347 /* Query controller to refresh cached values if they are too old or were
7350 if (time_after(jiffies, conn->conn_info_timestamp +
7351 msecs_to_jiffies(conn_info_age)) ||
7352 !conn->conn_info_timestamp) {
7353 struct mgmt_pending_cmd *cmd;
7355 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7360 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7361 cmd, get_conn_info_complete);
7365 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7366 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7369 mgmt_pending_free(cmd);
7374 conn->conn_info_timestamp = jiffies;
7376 /* Cache is valid, just reply with values cached in hci_conn */
7377 rp.rssi = conn->rssi;
7378 rp.tx_power = conn->tx_power;
7379 rp.max_tx_power = conn->max_tx_power;
7381 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7382 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7386 hci_dev_unlock(hdev);
7390 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7392 struct mgmt_pending_cmd *cmd = data;
7393 struct mgmt_cp_get_clock_info *cp = cmd->param;
7394 struct mgmt_rp_get_clock_info rp;
7395 struct hci_conn *conn = cmd->user_data;
7396 u8 status = mgmt_status(err);
7398 bt_dev_dbg(hdev, "err %d", err);
7400 memset(&rp, 0, sizeof(rp));
7401 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7402 rp.addr.type = cp->addr.type;
7407 rp.local_clock = cpu_to_le32(hdev->clock);
7410 rp.piconet_clock = cpu_to_le32(conn->clock);
7411 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7415 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7418 mgmt_pending_free(cmd);
7421 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7423 struct mgmt_pending_cmd *cmd = data;
7424 struct mgmt_cp_get_clock_info *cp = cmd->param;
7425 struct hci_cp_read_clock hci_cp;
7426 struct hci_conn *conn;
7428 memset(&hci_cp, 0, sizeof(hci_cp));
7429 hci_read_clock_sync(hdev, &hci_cp);
7431 /* Make sure connection still exists */
7432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7433 if (!conn || conn->state != BT_CONNECTED)
7434 return MGMT_STATUS_NOT_CONNECTED;
7436 cmd->user_data = conn;
7437 hci_cp.handle = cpu_to_le16(conn->handle);
7438 hci_cp.which = 0x01; /* Piconet clock */
7440 return hci_read_clock_sync(hdev, &hci_cp);
7443 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7446 struct mgmt_cp_get_clock_info *cp = data;
7447 struct mgmt_rp_get_clock_info rp;
7448 struct mgmt_pending_cmd *cmd;
7449 struct hci_conn *conn;
7452 bt_dev_dbg(hdev, "sock %p", sk);
7454 memset(&rp, 0, sizeof(rp));
7455 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7456 rp.addr.type = cp->addr.type;
7458 if (cp->addr.type != BDADDR_BREDR)
7459 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7460 MGMT_STATUS_INVALID_PARAMS,
7465 if (!hdev_is_powered(hdev)) {
7466 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7467 MGMT_STATUS_NOT_POWERED, &rp,
7472 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7473 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7475 if (!conn || conn->state != BT_CONNECTED) {
7476 err = mgmt_cmd_complete(sk, hdev->id,
7477 MGMT_OP_GET_CLOCK_INFO,
7478 MGMT_STATUS_NOT_CONNECTED,
7486 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7490 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7491 get_clock_info_complete);
7494 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7495 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7498 mgmt_pending_free(cmd);
7503 hci_dev_unlock(hdev);
7507 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7509 struct hci_conn *conn;
7511 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7515 if (conn->dst_type != type)
7518 if (conn->state != BT_CONNECTED)
7524 /* This function requires the caller holds hdev->lock */
7525 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7526 u8 addr_type, u8 auto_connect)
7528 struct hci_conn_params *params;
7530 params = hci_conn_params_add(hdev, addr, addr_type);
7534 if (params->auto_connect == auto_connect)
7537 hci_pend_le_list_del_init(params);
7539 switch (auto_connect) {
7540 case HCI_AUTO_CONN_DISABLED:
7541 case HCI_AUTO_CONN_LINK_LOSS:
7542 /* If auto connect is being disabled when we're trying to
7543 * connect to device, keep connecting.
7545 if (params->explicit_connect)
7546 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7548 case HCI_AUTO_CONN_REPORT:
7549 if (params->explicit_connect)
7550 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7552 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7554 case HCI_AUTO_CONN_DIRECT:
7555 case HCI_AUTO_CONN_ALWAYS:
7556 if (!is_connected(hdev, addr, addr_type))
7557 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7561 params->auto_connect = auto_connect;
7563 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7564 addr, addr_type, auto_connect);
7569 static void device_added(struct sock *sk, struct hci_dev *hdev,
7570 bdaddr_t *bdaddr, u8 type, u8 action)
7572 struct mgmt_ev_device_added ev;
7574 bacpy(&ev.addr.bdaddr, bdaddr);
7575 ev.addr.type = type;
7578 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7581 static int add_device_sync(struct hci_dev *hdev, void *data)
7583 return hci_update_passive_scan_sync(hdev);
7586 static int add_device(struct sock *sk, struct hci_dev *hdev,
7587 void *data, u16 len)
7589 struct mgmt_cp_add_device *cp = data;
7590 u8 auto_conn, addr_type;
7591 struct hci_conn_params *params;
7593 u32 current_flags = 0;
7594 u32 supported_flags;
7596 bt_dev_dbg(hdev, "sock %p", sk);
7598 if (!bdaddr_type_is_valid(cp->addr.type) ||
7599 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7600 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7601 MGMT_STATUS_INVALID_PARAMS,
7602 &cp->addr, sizeof(cp->addr));
7604 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7605 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7606 MGMT_STATUS_INVALID_PARAMS,
7607 &cp->addr, sizeof(cp->addr));
7611 if (cp->addr.type == BDADDR_BREDR) {
7612 /* Only incoming connections action is supported for now */
7613 if (cp->action != 0x01) {
7614 err = mgmt_cmd_complete(sk, hdev->id,
7616 MGMT_STATUS_INVALID_PARAMS,
7617 &cp->addr, sizeof(cp->addr));
7621 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7627 hci_update_scan(hdev);
7632 addr_type = le_addr_type(cp->addr.type);
7634 if (cp->action == 0x02)
7635 auto_conn = HCI_AUTO_CONN_ALWAYS;
7636 else if (cp->action == 0x01)
7637 auto_conn = HCI_AUTO_CONN_DIRECT;
7639 auto_conn = HCI_AUTO_CONN_REPORT;
7641 /* Kernel internally uses conn_params with resolvable private
7642 * address, but Add Device allows only identity addresses.
7643 * Make sure it is enforced before calling
7644 * hci_conn_params_lookup.
7646 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7647 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7648 MGMT_STATUS_INVALID_PARAMS,
7649 &cp->addr, sizeof(cp->addr));
7653 /* If the connection parameters don't exist for this device,
7654 * they will be created and configured with defaults.
7656 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7658 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7659 MGMT_STATUS_FAILED, &cp->addr,
7663 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7666 current_flags = params->flags;
7669 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7674 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7675 supported_flags = hdev->conn_flags;
7676 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7677 supported_flags, current_flags);
7679 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7680 MGMT_STATUS_SUCCESS, &cp->addr,
7684 hci_dev_unlock(hdev);
7688 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7689 bdaddr_t *bdaddr, u8 type)
7691 struct mgmt_ev_device_removed ev;
7693 bacpy(&ev.addr.bdaddr, bdaddr);
7694 ev.addr.type = type;
7696 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7699 static int remove_device_sync(struct hci_dev *hdev, void *data)
7701 return hci_update_passive_scan_sync(hdev);
7704 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7705 void *data, u16 len)
7707 struct mgmt_cp_remove_device *cp = data;
7710 bt_dev_dbg(hdev, "sock %p", sk);
7714 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7715 struct hci_conn_params *params;
7718 if (!bdaddr_type_is_valid(cp->addr.type)) {
7719 err = mgmt_cmd_complete(sk, hdev->id,
7720 MGMT_OP_REMOVE_DEVICE,
7721 MGMT_STATUS_INVALID_PARAMS,
7722 &cp->addr, sizeof(cp->addr));
7726 if (cp->addr.type == BDADDR_BREDR) {
7727 err = hci_bdaddr_list_del(&hdev->accept_list,
7731 err = mgmt_cmd_complete(sk, hdev->id,
7732 MGMT_OP_REMOVE_DEVICE,
7733 MGMT_STATUS_INVALID_PARAMS,
7739 hci_update_scan(hdev);
7741 device_removed(sk, hdev, &cp->addr.bdaddr,
7746 addr_type = le_addr_type(cp->addr.type);
7748 /* Kernel internally uses conn_params with resolvable private
7749 * address, but Remove Device allows only identity addresses.
7750 * Make sure it is enforced before calling
7751 * hci_conn_params_lookup.
7753 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7754 err = mgmt_cmd_complete(sk, hdev->id,
7755 MGMT_OP_REMOVE_DEVICE,
7756 MGMT_STATUS_INVALID_PARAMS,
7757 &cp->addr, sizeof(cp->addr));
7761 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7764 err = mgmt_cmd_complete(sk, hdev->id,
7765 MGMT_OP_REMOVE_DEVICE,
7766 MGMT_STATUS_INVALID_PARAMS,
7767 &cp->addr, sizeof(cp->addr));
7771 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7772 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7773 err = mgmt_cmd_complete(sk, hdev->id,
7774 MGMT_OP_REMOVE_DEVICE,
7775 MGMT_STATUS_INVALID_PARAMS,
7776 &cp->addr, sizeof(cp->addr));
7780 hci_conn_params_free(params);
7782 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7784 struct hci_conn_params *p, *tmp;
7785 struct bdaddr_list *b, *btmp;
7787 if (cp->addr.type) {
7788 err = mgmt_cmd_complete(sk, hdev->id,
7789 MGMT_OP_REMOVE_DEVICE,
7790 MGMT_STATUS_INVALID_PARAMS,
7791 &cp->addr, sizeof(cp->addr));
7795 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7796 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7801 hci_update_scan(hdev);
7803 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7804 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7806 device_removed(sk, hdev, &p->addr, p->addr_type);
7807 if (p->explicit_connect) {
7808 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7811 hci_conn_params_free(p);
7814 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7817 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7820 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7821 MGMT_STATUS_SUCCESS, &cp->addr,
7824 hci_dev_unlock(hdev);
7828 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7831 struct mgmt_cp_load_conn_param *cp = data;
7832 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7833 sizeof(struct mgmt_conn_param));
7834 u16 param_count, expected_len;
7837 if (!lmp_le_capable(hdev))
7838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7839 MGMT_STATUS_NOT_SUPPORTED);
7841 param_count = __le16_to_cpu(cp->param_count);
7842 if (param_count > max_param_count) {
7843 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7846 MGMT_STATUS_INVALID_PARAMS);
7849 expected_len = struct_size(cp, params, param_count);
7850 if (expected_len != len) {
7851 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7853 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7854 MGMT_STATUS_INVALID_PARAMS);
7857 bt_dev_dbg(hdev, "param_count %u", param_count);
7861 hci_conn_params_clear_disabled(hdev);
7863 for (i = 0; i < param_count; i++) {
7864 struct mgmt_conn_param *param = &cp->params[i];
7865 struct hci_conn_params *hci_param;
7866 u16 min, max, latency, timeout;
7869 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7872 if (param->addr.type == BDADDR_LE_PUBLIC) {
7873 addr_type = ADDR_LE_DEV_PUBLIC;
7874 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7875 addr_type = ADDR_LE_DEV_RANDOM;
7877 bt_dev_err(hdev, "ignoring invalid connection parameters");
7881 min = le16_to_cpu(param->min_interval);
7882 max = le16_to_cpu(param->max_interval);
7883 latency = le16_to_cpu(param->latency);
7884 timeout = le16_to_cpu(param->timeout);
7886 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7887 min, max, latency, timeout);
7889 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7890 bt_dev_err(hdev, "ignoring invalid connection parameters");
7894 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7897 bt_dev_err(hdev, "failed to add connection parameters");
7901 hci_param->conn_min_interval = min;
7902 hci_param->conn_max_interval = max;
7903 hci_param->conn_latency = latency;
7904 hci_param->supervision_timeout = timeout;
7907 hci_dev_unlock(hdev);
7909 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7913 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7914 void *data, u16 len)
7916 struct mgmt_cp_set_external_config *cp = data;
7920 bt_dev_dbg(hdev, "sock %p", sk);
7922 if (hdev_is_powered(hdev))
7923 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7924 MGMT_STATUS_REJECTED);
7926 if (cp->config != 0x00 && cp->config != 0x01)
7927 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7928 MGMT_STATUS_INVALID_PARAMS);
7930 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7931 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7932 MGMT_STATUS_NOT_SUPPORTED);
7937 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7939 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7941 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7948 err = new_options(hdev, sk);
7950 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7951 mgmt_index_removed(hdev);
7953 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7954 hci_dev_set_flag(hdev, HCI_CONFIG);
7955 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7957 queue_work(hdev->req_workqueue, &hdev->power_on);
7959 set_bit(HCI_RAW, &hdev->flags);
7960 mgmt_index_added(hdev);
7965 hci_dev_unlock(hdev);
7969 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7970 void *data, u16 len)
7972 struct mgmt_cp_set_public_address *cp = data;
7976 bt_dev_dbg(hdev, "sock %p", sk);
7978 if (hdev_is_powered(hdev))
7979 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7980 MGMT_STATUS_REJECTED);
7982 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7984 MGMT_STATUS_INVALID_PARAMS);
7986 if (!hdev->set_bdaddr)
7987 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7988 MGMT_STATUS_NOT_SUPPORTED);
7992 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7993 bacpy(&hdev->public_addr, &cp->bdaddr);
7995 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8002 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8003 err = new_options(hdev, sk);
8005 if (is_configured(hdev)) {
8006 mgmt_index_removed(hdev);
8008 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8010 hci_dev_set_flag(hdev, HCI_CONFIG);
8011 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8013 queue_work(hdev->req_workqueue, &hdev->power_on);
8017 hci_dev_unlock(hdev);
8021 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8024 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8025 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8026 u8 *h192, *r192, *h256, *r256;
8027 struct mgmt_pending_cmd *cmd = data;
8028 struct sk_buff *skb = cmd->skb;
8029 u8 status = mgmt_status(err);
8032 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8037 status = MGMT_STATUS_FAILED;
8038 else if (IS_ERR(skb))
8039 status = mgmt_status(PTR_ERR(skb));
8041 status = mgmt_status(skb->data[0]);
8044 bt_dev_dbg(hdev, "status %u", status);
8046 mgmt_cp = cmd->param;
8049 status = mgmt_status(status);
8056 } else if (!bredr_sc_enabled(hdev)) {
8057 struct hci_rp_read_local_oob_data *rp;
8059 if (skb->len != sizeof(*rp)) {
8060 status = MGMT_STATUS_FAILED;
8063 status = MGMT_STATUS_SUCCESS;
8064 rp = (void *)skb->data;
8066 eir_len = 5 + 18 + 18;
8073 struct hci_rp_read_local_oob_ext_data *rp;
8075 if (skb->len != sizeof(*rp)) {
8076 status = MGMT_STATUS_FAILED;
8079 status = MGMT_STATUS_SUCCESS;
8080 rp = (void *)skb->data;
8082 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8083 eir_len = 5 + 18 + 18;
8087 eir_len = 5 + 18 + 18 + 18 + 18;
8097 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8104 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8105 hdev->dev_class, 3);
8108 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8109 EIR_SSP_HASH_C192, h192, 16);
8110 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8111 EIR_SSP_RAND_R192, r192, 16);
8115 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8116 EIR_SSP_HASH_C256, h256, 16);
8117 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8118 EIR_SSP_RAND_R256, r256, 16);
8122 mgmt_rp->type = mgmt_cp->type;
8123 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8125 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8126 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8127 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8128 if (err < 0 || status)
8131 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8133 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8134 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8135 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8137 if (skb && !IS_ERR(skb))
8141 mgmt_pending_remove(cmd);
8144 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8145 struct mgmt_cp_read_local_oob_ext_data *cp)
8147 struct mgmt_pending_cmd *cmd;
8150 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8155 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8156 read_local_oob_ext_data_complete);
8159 mgmt_pending_remove(cmd);
8166 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8167 void *data, u16 data_len)
8169 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8170 struct mgmt_rp_read_local_oob_ext_data *rp;
8173 u8 status, flags, role, addr[7], hash[16], rand[16];
8176 bt_dev_dbg(hdev, "sock %p", sk);
8178 if (hdev_is_powered(hdev)) {
8180 case BIT(BDADDR_BREDR):
8181 status = mgmt_bredr_support(hdev);
8187 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8188 status = mgmt_le_support(hdev);
8192 eir_len = 9 + 3 + 18 + 18 + 3;
8195 status = MGMT_STATUS_INVALID_PARAMS;
8200 status = MGMT_STATUS_NOT_POWERED;
8204 rp_len = sizeof(*rp) + eir_len;
8205 rp = kmalloc(rp_len, GFP_ATOMIC);
8209 if (!status && !lmp_ssp_capable(hdev)) {
8210 status = MGMT_STATUS_NOT_SUPPORTED;
8221 case BIT(BDADDR_BREDR):
8222 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8223 err = read_local_ssp_oob_req(hdev, sk, cp);
8224 hci_dev_unlock(hdev);
8228 status = MGMT_STATUS_FAILED;
8231 eir_len = eir_append_data(rp->eir, eir_len,
8233 hdev->dev_class, 3);
8236 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8237 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8238 smp_generate_oob(hdev, hash, rand) < 0) {
8239 hci_dev_unlock(hdev);
8240 status = MGMT_STATUS_FAILED;
8244 /* This should return the active RPA, but since the RPA
8245 * is only programmed on demand, it is really hard to fill
8246 * this in at the moment. For now disallow retrieving
8247 * local out-of-band data when privacy is in use.
8249 * Returning the identity address will not help here since
8250 * pairing happens before the identity resolving key is
8251 * known and thus the connection establishment happens
8252 * based on the RPA and not the identity address.
8254 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8255 hci_dev_unlock(hdev);
8256 status = MGMT_STATUS_REJECTED;
8260 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8261 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8262 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8263 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8264 memcpy(addr, &hdev->static_addr, 6);
8267 memcpy(addr, &hdev->bdaddr, 6);
8271 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8272 addr, sizeof(addr));
8274 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8279 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8280 &role, sizeof(role));
8282 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8283 eir_len = eir_append_data(rp->eir, eir_len,
8285 hash, sizeof(hash));
8287 eir_len = eir_append_data(rp->eir, eir_len,
8289 rand, sizeof(rand));
8292 flags = mgmt_get_adv_discov_flags(hdev);
8294 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8295 flags |= LE_AD_NO_BREDR;
8297 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8298 &flags, sizeof(flags));
8302 hci_dev_unlock(hdev);
8304 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8306 status = MGMT_STATUS_SUCCESS;
8309 rp->type = cp->type;
8310 rp->eir_len = cpu_to_le16(eir_len);
8312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8313 status, rp, sizeof(*rp) + eir_len);
8314 if (err < 0 || status)
8317 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8318 rp, sizeof(*rp) + eir_len,
8319 HCI_MGMT_OOB_DATA_EVENTS, sk);
8327 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8331 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8332 flags |= MGMT_ADV_FLAG_DISCOV;
8333 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8334 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8335 flags |= MGMT_ADV_FLAG_APPEARANCE;
8336 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8337 flags |= MGMT_ADV_PARAM_DURATION;
8338 flags |= MGMT_ADV_PARAM_TIMEOUT;
8339 flags |= MGMT_ADV_PARAM_INTERVALS;
8340 flags |= MGMT_ADV_PARAM_TX_POWER;
8341 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8343 /* In extended adv TX_POWER returned from Set Adv Param
8344 * will be always valid.
8346 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8347 flags |= MGMT_ADV_FLAG_TX_POWER;
8349 if (ext_adv_capable(hdev)) {
8350 flags |= MGMT_ADV_FLAG_SEC_1M;
8351 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8352 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8354 if (le_2m_capable(hdev))
8355 flags |= MGMT_ADV_FLAG_SEC_2M;
8357 if (le_coded_capable(hdev))
8358 flags |= MGMT_ADV_FLAG_SEC_CODED;
8364 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8365 void *data, u16 data_len)
8367 struct mgmt_rp_read_adv_features *rp;
8370 struct adv_info *adv_instance;
8371 u32 supported_flags;
8374 bt_dev_dbg(hdev, "sock %p", sk);
8376 if (!lmp_le_capable(hdev))
8377 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8378 MGMT_STATUS_REJECTED);
8382 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8383 rp = kmalloc(rp_len, GFP_ATOMIC);
8385 hci_dev_unlock(hdev);
8389 supported_flags = get_supported_adv_flags(hdev);
8391 rp->supported_flags = cpu_to_le32(supported_flags);
8392 rp->max_adv_data_len = max_adv_len(hdev);
8393 rp->max_scan_rsp_len = max_adv_len(hdev);
8394 rp->max_instances = hdev->le_num_of_adv_sets;
8395 rp->num_instances = hdev->adv_instance_cnt;
8397 instance = rp->instance;
8398 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8399 /* Only instances 1-le_num_of_adv_sets are externally visible */
8400 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8401 *instance = adv_instance->instance;
8404 rp->num_instances--;
8409 hci_dev_unlock(hdev);
8411 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8412 MGMT_STATUS_SUCCESS, rp, rp_len);
8419 static u8 calculate_name_len(struct hci_dev *hdev)
8421 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8423 return eir_append_local_name(hdev, buf, 0);
8426 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8429 u8 max_len = max_adv_len(hdev);
8432 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8433 MGMT_ADV_FLAG_LIMITED_DISCOV |
8434 MGMT_ADV_FLAG_MANAGED_FLAGS))
8437 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8440 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8441 max_len -= calculate_name_len(hdev);
8443 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8450 static bool flags_managed(u32 adv_flags)
8452 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8453 MGMT_ADV_FLAG_LIMITED_DISCOV |
8454 MGMT_ADV_FLAG_MANAGED_FLAGS);
8457 static bool tx_power_managed(u32 adv_flags)
8459 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8462 static bool name_managed(u32 adv_flags)
8464 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8467 static bool appearance_managed(u32 adv_flags)
8469 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8472 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8473 u8 len, bool is_adv_data)
8478 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8483 /* Make sure that the data is correctly formatted. */
8484 for (i = 0; i < len; i += (cur_len + 1)) {
8490 if (data[i + 1] == EIR_FLAGS &&
8491 (!is_adv_data || flags_managed(adv_flags)))
8494 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8497 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8500 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8503 if (data[i + 1] == EIR_APPEARANCE &&
8504 appearance_managed(adv_flags))
8507 /* If the current field length would exceed the total data
8508 * length, then it's invalid.
8510 if (i + cur_len >= len)
8517 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8519 u32 supported_flags, phy_flags;
8521 /* The current implementation only supports a subset of the specified
8522 * flags. Also need to check mutual exclusiveness of sec flags.
8524 supported_flags = get_supported_adv_flags(hdev);
8525 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8526 if (adv_flags & ~supported_flags ||
8527 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8533 static bool adv_busy(struct hci_dev *hdev)
8535 return pending_find(MGMT_OP_SET_LE, hdev);
8538 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8541 struct adv_info *adv, *n;
8543 bt_dev_dbg(hdev, "err %d", err);
8547 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8554 adv->pending = false;
8558 instance = adv->instance;
8560 if (hdev->cur_adv_instance == instance)
8561 cancel_adv_timeout(hdev);
8563 hci_remove_adv_instance(hdev, instance);
8564 mgmt_advertising_removed(sk, hdev, instance);
8567 hci_dev_unlock(hdev);
8570 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8572 struct mgmt_pending_cmd *cmd = data;
8573 struct mgmt_cp_add_advertising *cp = cmd->param;
8574 struct mgmt_rp_add_advertising rp;
8576 memset(&rp, 0, sizeof(rp));
8578 rp.instance = cp->instance;
8581 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8584 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8585 mgmt_status(err), &rp, sizeof(rp));
8587 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8589 mgmt_pending_free(cmd);
8592 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8594 struct mgmt_pending_cmd *cmd = data;
8595 struct mgmt_cp_add_advertising *cp = cmd->param;
8597 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8600 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8601 void *data, u16 data_len)
8603 struct mgmt_cp_add_advertising *cp = data;
8604 struct mgmt_rp_add_advertising rp;
8607 u16 timeout, duration;
8608 unsigned int prev_instance_cnt;
8609 u8 schedule_instance = 0;
8610 struct adv_info *adv, *next_instance;
8612 struct mgmt_pending_cmd *cmd;
8614 bt_dev_dbg(hdev, "sock %p", sk);
8616 status = mgmt_le_support(hdev);
8618 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8621 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8622 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8623 MGMT_STATUS_INVALID_PARAMS);
8625 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8626 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8627 MGMT_STATUS_INVALID_PARAMS);
8629 flags = __le32_to_cpu(cp->flags);
8630 timeout = __le16_to_cpu(cp->timeout);
8631 duration = __le16_to_cpu(cp->duration);
8633 if (!requested_adv_flags_are_valid(hdev, flags))
8634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8635 MGMT_STATUS_INVALID_PARAMS);
8639 if (timeout && !hdev_is_powered(hdev)) {
8640 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8641 MGMT_STATUS_REJECTED);
8645 if (adv_busy(hdev)) {
8646 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8651 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8652 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8653 cp->scan_rsp_len, false)) {
8654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8655 MGMT_STATUS_INVALID_PARAMS);
8659 prev_instance_cnt = hdev->adv_instance_cnt;
8661 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8662 cp->adv_data_len, cp->data,
8664 cp->data + cp->adv_data_len,
8666 HCI_ADV_TX_POWER_NO_PREFERENCE,
8667 hdev->le_adv_min_interval,
8668 hdev->le_adv_max_interval, 0);
8670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8671 MGMT_STATUS_FAILED);
8675 /* Only trigger an advertising added event if a new instance was
8678 if (hdev->adv_instance_cnt > prev_instance_cnt)
8679 mgmt_advertising_added(sk, hdev, cp->instance);
8681 if (hdev->cur_adv_instance == cp->instance) {
8682 /* If the currently advertised instance is being changed then
8683 * cancel the current advertising and schedule the next
8684 * instance. If there is only one instance then the overridden
8685 * advertising data will be visible right away.
8687 cancel_adv_timeout(hdev);
8689 next_instance = hci_get_next_instance(hdev, cp->instance);
8691 schedule_instance = next_instance->instance;
8692 } else if (!hdev->adv_instance_timeout) {
8693 /* Immediately advertise the new instance if no other
8694 * instance is currently being advertised.
8696 schedule_instance = cp->instance;
8699 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8700 * there is no instance to be advertised then we have no HCI
8701 * communication to make. Simply return.
8703 if (!hdev_is_powered(hdev) ||
8704 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8705 !schedule_instance) {
8706 rp.instance = cp->instance;
8707 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8708 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8712 /* We're good to go, update advertising data, parameters, and start
8715 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8722 cp->instance = schedule_instance;
8724 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8725 add_advertising_complete);
8727 mgmt_pending_free(cmd);
8730 hci_dev_unlock(hdev);
8735 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8738 struct mgmt_pending_cmd *cmd = data;
8739 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8740 struct mgmt_rp_add_ext_adv_params rp;
8741 struct adv_info *adv;
8744 BT_DBG("%s", hdev->name);
8748 adv = hci_find_adv_instance(hdev, cp->instance);
8752 rp.instance = cp->instance;
8753 rp.tx_power = adv->tx_power;
8755 /* While we're at it, inform userspace of the available space for this
8756 * advertisement, given the flags that will be used.
8758 flags = __le32_to_cpu(cp->flags);
8759 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8760 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8763 /* If this advertisement was previously advertising and we
8764 * failed to update it, we signal that it has been removed and
8765 * delete its structure
8768 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8770 hci_remove_adv_instance(hdev, cp->instance);
8772 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8775 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8776 mgmt_status(err), &rp, sizeof(rp));
8780 mgmt_pending_free(cmd);
8782 hci_dev_unlock(hdev);
8785 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8787 struct mgmt_pending_cmd *cmd = data;
8788 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8790 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8793 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8794 void *data, u16 data_len)
8796 struct mgmt_cp_add_ext_adv_params *cp = data;
8797 struct mgmt_rp_add_ext_adv_params rp;
8798 struct mgmt_pending_cmd *cmd = NULL;
8799 struct adv_info *adv;
8800 u32 flags, min_interval, max_interval;
8801 u16 timeout, duration;
8806 BT_DBG("%s", hdev->name);
8808 status = mgmt_le_support(hdev);
8810 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8813 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8815 MGMT_STATUS_INVALID_PARAMS);
8817 /* The purpose of breaking add_advertising into two separate MGMT calls
8818 * for params and data is to allow more parameters to be added to this
8819 * structure in the future. For this reason, we verify that we have the
8820 * bare minimum structure we know of when the interface was defined. Any
8821 * extra parameters we don't know about will be ignored in this request.
8823 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8825 MGMT_STATUS_INVALID_PARAMS);
8827 flags = __le32_to_cpu(cp->flags);
8829 if (!requested_adv_flags_are_valid(hdev, flags))
8830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8831 MGMT_STATUS_INVALID_PARAMS);
8835 /* In new interface, we require that we are powered to register */
8836 if (!hdev_is_powered(hdev)) {
8837 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8838 MGMT_STATUS_REJECTED);
8842 if (adv_busy(hdev)) {
8843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8848 /* Parse defined parameters from request, use defaults otherwise */
8849 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8850 __le16_to_cpu(cp->timeout) : 0;
8852 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8853 __le16_to_cpu(cp->duration) :
8854 hdev->def_multi_adv_rotation_duration;
8856 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8857 __le32_to_cpu(cp->min_interval) :
8858 hdev->le_adv_min_interval;
8860 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8861 __le32_to_cpu(cp->max_interval) :
8862 hdev->le_adv_max_interval;
8864 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8866 HCI_ADV_TX_POWER_NO_PREFERENCE;
8868 /* Create advertising instance with no advertising or response data */
8869 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8870 timeout, duration, tx_power, min_interval,
8874 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8875 MGMT_STATUS_FAILED);
8879 /* Submit request for advertising params if ext adv available */
8880 if (ext_adv_capable(hdev)) {
8881 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8885 hci_remove_adv_instance(hdev, cp->instance);
8889 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8890 add_ext_adv_params_complete);
8892 mgmt_pending_free(cmd);
8894 rp.instance = cp->instance;
8895 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8896 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8897 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8898 err = mgmt_cmd_complete(sk, hdev->id,
8899 MGMT_OP_ADD_EXT_ADV_PARAMS,
8900 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8904 hci_dev_unlock(hdev);
8909 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8911 struct mgmt_pending_cmd *cmd = data;
8912 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8913 struct mgmt_rp_add_advertising rp;
8915 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8917 memset(&rp, 0, sizeof(rp));
8919 rp.instance = cp->instance;
8922 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8925 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8926 mgmt_status(err), &rp, sizeof(rp));
8928 mgmt_pending_free(cmd);
8931 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8933 struct mgmt_pending_cmd *cmd = data;
8934 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8937 if (ext_adv_capable(hdev)) {
8938 err = hci_update_adv_data_sync(hdev, cp->instance);
8942 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8946 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8949 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8952 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8955 struct mgmt_cp_add_ext_adv_data *cp = data;
8956 struct mgmt_rp_add_ext_adv_data rp;
8957 u8 schedule_instance = 0;
8958 struct adv_info *next_instance;
8959 struct adv_info *adv_instance;
8961 struct mgmt_pending_cmd *cmd;
8963 BT_DBG("%s", hdev->name);
8967 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8969 if (!adv_instance) {
8970 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8971 MGMT_STATUS_INVALID_PARAMS);
8975 /* In new interface, we require that we are powered to register */
8976 if (!hdev_is_powered(hdev)) {
8977 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8978 MGMT_STATUS_REJECTED);
8979 goto clear_new_instance;
8982 if (adv_busy(hdev)) {
8983 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8985 goto clear_new_instance;
8988 /* Validate new data */
8989 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8990 cp->adv_data_len, true) ||
8991 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8992 cp->adv_data_len, cp->scan_rsp_len, false)) {
8993 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8994 MGMT_STATUS_INVALID_PARAMS);
8995 goto clear_new_instance;
8998 /* Set the data in the advertising instance */
8999 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9000 cp->data, cp->scan_rsp_len,
9001 cp->data + cp->adv_data_len);
9003 /* If using software rotation, determine next instance to use */
9004 if (hdev->cur_adv_instance == cp->instance) {
9005 /* If the currently advertised instance is being changed
9006 * then cancel the current advertising and schedule the
9007 * next instance. If there is only one instance then the
9008 * overridden advertising data will be visible right
9011 cancel_adv_timeout(hdev);
9013 next_instance = hci_get_next_instance(hdev, cp->instance);
9015 schedule_instance = next_instance->instance;
9016 } else if (!hdev->adv_instance_timeout) {
9017 /* Immediately advertise the new instance if no other
9018 * instance is currently being advertised.
9020 schedule_instance = cp->instance;
9023 /* If the HCI_ADVERTISING flag is set or there is no instance to
9024 * be advertised then we have no HCI communication to make.
9027 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9028 if (adv_instance->pending) {
9029 mgmt_advertising_added(sk, hdev, cp->instance);
9030 adv_instance->pending = false;
9032 rp.instance = cp->instance;
9033 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9034 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9038 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9042 goto clear_new_instance;
9045 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9046 add_ext_adv_data_complete);
9048 mgmt_pending_free(cmd);
9049 goto clear_new_instance;
9052 /* We were successful in updating data, so trigger advertising_added
9053 * event if this is an instance that wasn't previously advertising. If
9054 * a failure occurs in the requests we initiated, we will remove the
9055 * instance again in add_advertising_complete
9057 if (adv_instance->pending)
9058 mgmt_advertising_added(sk, hdev, cp->instance);
9063 hci_remove_adv_instance(hdev, cp->instance);
9066 hci_dev_unlock(hdev);
9071 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9074 struct mgmt_pending_cmd *cmd = data;
9075 struct mgmt_cp_remove_advertising *cp = cmd->param;
9076 struct mgmt_rp_remove_advertising rp;
9078 bt_dev_dbg(hdev, "err %d", err);
9080 memset(&rp, 0, sizeof(rp));
9081 rp.instance = cp->instance;
9084 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9087 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9088 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9090 mgmt_pending_free(cmd);
9093 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9095 struct mgmt_pending_cmd *cmd = data;
9096 struct mgmt_cp_remove_advertising *cp = cmd->param;
9099 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9103 if (list_empty(&hdev->adv_instances))
9104 err = hci_disable_advertising_sync(hdev);
9109 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9110 void *data, u16 data_len)
9112 struct mgmt_cp_remove_advertising *cp = data;
9113 struct mgmt_pending_cmd *cmd;
9116 bt_dev_dbg(hdev, "sock %p", sk);
9120 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9121 err = mgmt_cmd_status(sk, hdev->id,
9122 MGMT_OP_REMOVE_ADVERTISING,
9123 MGMT_STATUS_INVALID_PARAMS);
9127 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9128 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9133 if (list_empty(&hdev->adv_instances)) {
9134 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9135 MGMT_STATUS_INVALID_PARAMS);
9139 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9146 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9147 remove_advertising_complete);
9149 mgmt_pending_free(cmd);
9152 hci_dev_unlock(hdev);
9157 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9158 void *data, u16 data_len)
9160 struct mgmt_cp_get_adv_size_info *cp = data;
9161 struct mgmt_rp_get_adv_size_info rp;
9162 u32 flags, supported_flags;
9164 bt_dev_dbg(hdev, "sock %p", sk);
9166 if (!lmp_le_capable(hdev))
9167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9168 MGMT_STATUS_REJECTED);
9170 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9172 MGMT_STATUS_INVALID_PARAMS);
9174 flags = __le32_to_cpu(cp->flags);
9176 /* The current implementation only supports a subset of the specified
9179 supported_flags = get_supported_adv_flags(hdev);
9180 if (flags & ~supported_flags)
9181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9182 MGMT_STATUS_INVALID_PARAMS);
9184 rp.instance = cp->instance;
9185 rp.flags = cp->flags;
9186 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9187 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9189 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9190 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9193 static const struct hci_mgmt_handler mgmt_handlers[] = {
9194 { NULL }, /* 0x0000 (no command) */
9195 { read_version, MGMT_READ_VERSION_SIZE,
9197 HCI_MGMT_UNTRUSTED },
9198 { read_commands, MGMT_READ_COMMANDS_SIZE,
9200 HCI_MGMT_UNTRUSTED },
9201 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9203 HCI_MGMT_UNTRUSTED },
9204 { read_controller_info, MGMT_READ_INFO_SIZE,
9205 HCI_MGMT_UNTRUSTED },
9206 { set_powered, MGMT_SETTING_SIZE },
9207 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9208 { set_connectable, MGMT_SETTING_SIZE },
9209 { set_fast_connectable, MGMT_SETTING_SIZE },
9210 { set_bondable, MGMT_SETTING_SIZE },
9211 { set_link_security, MGMT_SETTING_SIZE },
9212 { set_ssp, MGMT_SETTING_SIZE },
9213 { set_hs, MGMT_SETTING_SIZE },
9214 { set_le, MGMT_SETTING_SIZE },
9215 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9216 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9217 { add_uuid, MGMT_ADD_UUID_SIZE },
9218 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9219 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9221 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9223 { disconnect, MGMT_DISCONNECT_SIZE },
9224 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9225 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9226 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9227 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9228 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9229 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9230 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9231 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9232 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9233 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9234 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9235 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9236 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9238 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9239 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9240 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9241 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9242 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9243 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9244 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9245 { set_advertising, MGMT_SETTING_SIZE },
9246 { set_bredr, MGMT_SETTING_SIZE },
9247 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9248 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9249 { set_secure_conn, MGMT_SETTING_SIZE },
9250 { set_debug_keys, MGMT_SETTING_SIZE },
9251 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9252 { load_irks, MGMT_LOAD_IRKS_SIZE,
9254 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9255 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9256 { add_device, MGMT_ADD_DEVICE_SIZE },
9257 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9258 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9260 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9262 HCI_MGMT_UNTRUSTED },
9263 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9264 HCI_MGMT_UNCONFIGURED |
9265 HCI_MGMT_UNTRUSTED },
9266 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9267 HCI_MGMT_UNCONFIGURED },
9268 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9269 HCI_MGMT_UNCONFIGURED },
9270 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9272 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9273 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9275 HCI_MGMT_UNTRUSTED },
9276 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9277 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9279 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9280 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9281 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9282 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9283 HCI_MGMT_UNTRUSTED },
9284 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9285 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9286 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9287 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9289 { set_wideband_speech, MGMT_SETTING_SIZE },
9290 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9291 HCI_MGMT_UNTRUSTED },
9292 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9293 HCI_MGMT_UNTRUSTED |
9294 HCI_MGMT_HDEV_OPTIONAL },
9295 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9297 HCI_MGMT_HDEV_OPTIONAL },
9298 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9299 HCI_MGMT_UNTRUSTED },
9300 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9302 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9303 HCI_MGMT_UNTRUSTED },
9304 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9306 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9307 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9308 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9309 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9311 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9312 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9314 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9316 { add_adv_patterns_monitor_rssi,
9317 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9319 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9321 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9322 { mesh_send, MGMT_MESH_SEND_SIZE,
9324 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9327 void mgmt_index_added(struct hci_dev *hdev)
9329 struct mgmt_ev_ext_index ev;
9331 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9334 switch (hdev->dev_type) {
9336 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9337 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9338 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9341 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9342 HCI_MGMT_INDEX_EVENTS);
9355 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9356 HCI_MGMT_EXT_INDEX_EVENTS);
9359 void mgmt_index_removed(struct hci_dev *hdev)
9361 struct mgmt_ev_ext_index ev;
9362 u8 status = MGMT_STATUS_INVALID_INDEX;
9364 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9367 switch (hdev->dev_type) {
9369 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9371 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9372 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9373 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9376 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9377 HCI_MGMT_INDEX_EVENTS);
9390 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9391 HCI_MGMT_EXT_INDEX_EVENTS);
9393 /* Cancel any remaining timed work */
9394 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9396 cancel_delayed_work_sync(&hdev->discov_off);
9397 cancel_delayed_work_sync(&hdev->service_cache);
9398 cancel_delayed_work_sync(&hdev->rpa_expired);
9401 void mgmt_power_on(struct hci_dev *hdev, int err)
9403 struct cmd_lookup match = { NULL, hdev };
9405 bt_dev_dbg(hdev, "err %d", err);
9410 restart_le_actions(hdev);
9411 hci_update_passive_scan(hdev);
9414 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9416 new_settings(hdev, match.sk);
9421 hci_dev_unlock(hdev);
9424 void __mgmt_power_off(struct hci_dev *hdev)
9426 struct cmd_lookup match = { NULL, hdev };
9427 u8 status, zero_cod[] = { 0, 0, 0 };
9429 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9431 /* If the power off is because of hdev unregistration let
9432 * use the appropriate INVALID_INDEX status. Otherwise use
9433 * NOT_POWERED. We cover both scenarios here since later in
9434 * mgmt_index_removed() any hci_conn callbacks will have already
9435 * been triggered, potentially causing misleading DISCONNECTED
9438 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9439 status = MGMT_STATUS_INVALID_INDEX;
9441 status = MGMT_STATUS_NOT_POWERED;
9443 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9445 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9446 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9447 zero_cod, sizeof(zero_cod),
9448 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9449 ext_info_changed(hdev, NULL);
9452 new_settings(hdev, match.sk);
9458 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9460 struct mgmt_pending_cmd *cmd;
9463 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9467 if (err == -ERFKILL)
9468 status = MGMT_STATUS_RFKILLED;
9470 status = MGMT_STATUS_FAILED;
9472 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9474 mgmt_pending_remove(cmd);
9477 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9480 struct mgmt_ev_new_link_key ev;
9482 memset(&ev, 0, sizeof(ev));
9484 ev.store_hint = persistent;
9485 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9486 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9487 ev.key.type = key->type;
9488 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9489 ev.key.pin_len = key->pin_len;
9491 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9494 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9496 switch (ltk->type) {
9498 case SMP_LTK_RESPONDER:
9499 if (ltk->authenticated)
9500 return MGMT_LTK_AUTHENTICATED;
9501 return MGMT_LTK_UNAUTHENTICATED;
9503 if (ltk->authenticated)
9504 return MGMT_LTK_P256_AUTH;
9505 return MGMT_LTK_P256_UNAUTH;
9506 case SMP_LTK_P256_DEBUG:
9507 return MGMT_LTK_P256_DEBUG;
9510 return MGMT_LTK_UNAUTHENTICATED;
9513 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9515 struct mgmt_ev_new_long_term_key ev;
9517 memset(&ev, 0, sizeof(ev));
9519 /* Devices using resolvable or non-resolvable random addresses
9520 * without providing an identity resolving key don't require
9521 * to store long term keys. Their addresses will change the
9524 * Only when a remote device provides an identity address
9525 * make sure the long term key is stored. If the remote
9526 * identity is known, the long term keys are internally
9527 * mapped to the identity address. So allow static random
9528 * and public addresses here.
9530 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9531 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9532 ev.store_hint = 0x00;
9534 ev.store_hint = persistent;
9536 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9537 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9538 ev.key.type = mgmt_ltk_type(key);
9539 ev.key.enc_size = key->enc_size;
9540 ev.key.ediv = key->ediv;
9541 ev.key.rand = key->rand;
9543 if (key->type == SMP_LTK)
9544 ev.key.initiator = 1;
9546 /* Make sure we copy only the significant bytes based on the
9547 * encryption key size, and set the rest of the value to zeroes.
9549 memcpy(ev.key.val, key->val, key->enc_size);
9550 memset(ev.key.val + key->enc_size, 0,
9551 sizeof(ev.key.val) - key->enc_size);
9553 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9556 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9558 struct mgmt_ev_new_irk ev;
9560 memset(&ev, 0, sizeof(ev));
9562 ev.store_hint = persistent;
9564 bacpy(&ev.rpa, &irk->rpa);
9565 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9566 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9567 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9569 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9572 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9575 struct mgmt_ev_new_csrk ev;
9577 memset(&ev, 0, sizeof(ev));
9579 /* Devices using resolvable or non-resolvable random addresses
9580 * without providing an identity resolving key don't require
9581 * to store signature resolving keys. Their addresses will change
9582 * the next time around.
9584 * Only when a remote device provides an identity address
9585 * make sure the signature resolving key is stored. So allow
9586 * static random and public addresses here.
9588 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9589 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9590 ev.store_hint = 0x00;
9592 ev.store_hint = persistent;
9594 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9595 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9596 ev.key.type = csrk->type;
9597 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9599 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9602 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9603 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9604 u16 max_interval, u16 latency, u16 timeout)
9606 struct mgmt_ev_new_conn_param ev;
9608 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9611 memset(&ev, 0, sizeof(ev));
9612 bacpy(&ev.addr.bdaddr, bdaddr);
9613 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9614 ev.store_hint = store_hint;
9615 ev.min_interval = cpu_to_le16(min_interval);
9616 ev.max_interval = cpu_to_le16(max_interval);
9617 ev.latency = cpu_to_le16(latency);
9618 ev.timeout = cpu_to_le16(timeout);
9620 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9623 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9624 u8 *name, u8 name_len)
9626 struct sk_buff *skb;
9627 struct mgmt_ev_device_connected *ev;
9631 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9634 /* allocate buff for LE or BR/EDR adv */
9635 if (conn->le_adv_data_len > 0)
9636 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9637 sizeof(*ev) + conn->le_adv_data_len);
9639 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9640 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9641 eir_precalc_len(sizeof(conn->dev_class)));
9643 ev = skb_put(skb, sizeof(*ev));
9644 bacpy(&ev->addr.bdaddr, &conn->dst);
9645 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9648 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9650 ev->flags = __cpu_to_le32(flags);
9652 /* We must ensure that the EIR Data fields are ordered and
9653 * unique. Keep it simple for now and avoid the problem by not
9654 * adding any BR/EDR data to the LE adv.
9656 if (conn->le_adv_data_len > 0) {
9657 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9658 eir_len = conn->le_adv_data_len;
9661 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9663 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9664 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9665 conn->dev_class, sizeof(conn->dev_class));
9668 ev->eir_len = cpu_to_le16(eir_len);
9670 mgmt_event_skb(skb, NULL);
9673 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9675 struct sock **sk = data;
9677 cmd->cmd_complete(cmd, 0);
9682 mgmt_pending_remove(cmd);
9685 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9687 struct hci_dev *hdev = data;
9688 struct mgmt_cp_unpair_device *cp = cmd->param;
9690 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9692 cmd->cmd_complete(cmd, 0);
9693 mgmt_pending_remove(cmd);
9696 bool mgmt_powering_down(struct hci_dev *hdev)
9698 struct mgmt_pending_cmd *cmd;
9699 struct mgmt_mode *cp;
9701 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9704 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9715 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9716 u8 link_type, u8 addr_type, u8 reason,
9717 bool mgmt_connected)
9719 struct mgmt_ev_device_disconnected ev;
9720 struct sock *sk = NULL;
9722 if (!mgmt_connected)
9725 if (link_type != ACL_LINK && link_type != LE_LINK)
9728 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9730 bacpy(&ev.addr.bdaddr, bdaddr);
9731 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9734 /* Report disconnects due to suspend */
9735 if (hdev->suspended)
9736 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9738 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9743 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9747 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9748 u8 link_type, u8 addr_type, u8 status)
9750 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9751 struct mgmt_cp_disconnect *cp;
9752 struct mgmt_pending_cmd *cmd;
9754 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9757 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9763 if (bacmp(bdaddr, &cp->addr.bdaddr))
9766 if (cp->addr.type != bdaddr_type)
9769 cmd->cmd_complete(cmd, mgmt_status(status));
9770 mgmt_pending_remove(cmd);
9773 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9774 u8 addr_type, u8 status)
9776 struct mgmt_ev_connect_failed ev;
9778 bacpy(&ev.addr.bdaddr, bdaddr);
9779 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9780 ev.status = mgmt_status(status);
9782 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9785 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9787 struct mgmt_ev_pin_code_request ev;
9789 bacpy(&ev.addr.bdaddr, bdaddr);
9790 ev.addr.type = BDADDR_BREDR;
9793 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9796 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9799 struct mgmt_pending_cmd *cmd;
9801 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9805 cmd->cmd_complete(cmd, mgmt_status(status));
9806 mgmt_pending_remove(cmd);
9809 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9812 struct mgmt_pending_cmd *cmd;
9814 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9818 cmd->cmd_complete(cmd, mgmt_status(status));
9819 mgmt_pending_remove(cmd);
9822 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9823 u8 link_type, u8 addr_type, u32 value,
9826 struct mgmt_ev_user_confirm_request ev;
9828 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9830 bacpy(&ev.addr.bdaddr, bdaddr);
9831 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9832 ev.confirm_hint = confirm_hint;
9833 ev.value = cpu_to_le32(value);
9835 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9839 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9840 u8 link_type, u8 addr_type)
9842 struct mgmt_ev_user_passkey_request ev;
9844 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9846 bacpy(&ev.addr.bdaddr, bdaddr);
9847 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9849 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9853 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854 u8 link_type, u8 addr_type, u8 status,
9857 struct mgmt_pending_cmd *cmd;
9859 cmd = pending_find(opcode, hdev);
9863 cmd->cmd_complete(cmd, mgmt_status(status));
9864 mgmt_pending_remove(cmd);
9869 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9870 u8 link_type, u8 addr_type, u8 status)
9872 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9873 status, MGMT_OP_USER_CONFIRM_REPLY);
9876 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9877 u8 link_type, u8 addr_type, u8 status)
9879 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9881 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9884 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9885 u8 link_type, u8 addr_type, u8 status)
9887 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9888 status, MGMT_OP_USER_PASSKEY_REPLY);
9891 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9892 u8 link_type, u8 addr_type, u8 status)
9894 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9896 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9899 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9900 u8 link_type, u8 addr_type, u32 passkey,
9903 struct mgmt_ev_passkey_notify ev;
9905 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9907 bacpy(&ev.addr.bdaddr, bdaddr);
9908 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9909 ev.passkey = __cpu_to_le32(passkey);
9910 ev.entered = entered;
9912 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9915 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9917 struct mgmt_ev_auth_failed ev;
9918 struct mgmt_pending_cmd *cmd;
9919 u8 status = mgmt_status(hci_status);
9921 bacpy(&ev.addr.bdaddr, &conn->dst);
9922 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9925 cmd = find_pairing(conn);
9927 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9928 cmd ? cmd->sk : NULL);
9931 cmd->cmd_complete(cmd, status);
9932 mgmt_pending_remove(cmd);
9936 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9938 struct cmd_lookup match = { NULL, hdev };
9942 u8 mgmt_err = mgmt_status(status);
9943 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9944 cmd_status_rsp, &mgmt_err);
9948 if (test_bit(HCI_AUTH, &hdev->flags))
9949 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9951 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9953 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9957 new_settings(hdev, match.sk);
9963 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9965 struct cmd_lookup *match = data;
9967 if (match->sk == NULL) {
9968 match->sk = cmd->sk;
9969 sock_hold(match->sk);
9973 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9976 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9978 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9979 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9980 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9983 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9984 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9985 ext_info_changed(hdev, NULL);
9992 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9994 struct mgmt_cp_set_local_name ev;
9995 struct mgmt_pending_cmd *cmd;
10000 memset(&ev, 0, sizeof(ev));
10001 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10002 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10004 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10006 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10008 /* If this is a HCI command related to powering on the
10009 * HCI dev don't send any mgmt signals.
10011 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10014 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10018 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10019 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10020 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10023 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10027 for (i = 0; i < uuid_count; i++) {
10028 if (!memcmp(uuid, uuids[i], 16))
10035 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10039 while (parsed < eir_len) {
10040 u8 field_len = eir[0];
10044 if (field_len == 0)
10047 if (eir_len - parsed < field_len + 1)
10051 case EIR_UUID16_ALL:
10052 case EIR_UUID16_SOME:
10053 for (i = 0; i + 3 <= field_len; i += 2) {
10054 memcpy(uuid, bluetooth_base_uuid, 16);
10055 uuid[13] = eir[i + 3];
10056 uuid[12] = eir[i + 2];
10057 if (has_uuid(uuid, uuid_count, uuids))
10061 case EIR_UUID32_ALL:
10062 case EIR_UUID32_SOME:
10063 for (i = 0; i + 5 <= field_len; i += 4) {
10064 memcpy(uuid, bluetooth_base_uuid, 16);
10065 uuid[15] = eir[i + 5];
10066 uuid[14] = eir[i + 4];
10067 uuid[13] = eir[i + 3];
10068 uuid[12] = eir[i + 2];
10069 if (has_uuid(uuid, uuid_count, uuids))
10073 case EIR_UUID128_ALL:
10074 case EIR_UUID128_SOME:
10075 for (i = 0; i + 17 <= field_len; i += 16) {
10076 memcpy(uuid, eir + i + 2, 16);
10077 if (has_uuid(uuid, uuid_count, uuids))
10083 parsed += field_len + 1;
10084 eir += field_len + 1;
10090 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10091 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10093 /* If a RSSI threshold has been specified, and
10094 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10095 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10096 * is set, let it through for further processing, as we might need to
10097 * restart the scan.
10099 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10100 * the results are also dropped.
10102 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10103 (rssi == HCI_RSSI_INVALID ||
10104 (rssi < hdev->discovery.rssi &&
10105 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10108 if (hdev->discovery.uuid_count != 0) {
10109 /* If a list of UUIDs is provided in filter, results with no
10110 * matching UUID should be dropped.
10112 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10113 hdev->discovery.uuids) &&
10114 !eir_has_uuids(scan_rsp, scan_rsp_len,
10115 hdev->discovery.uuid_count,
10116 hdev->discovery.uuids))
10120 /* If duplicate filtering does not report RSSI changes, then restart
10121 * scanning to ensure updated result with updated RSSI values.
10123 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10124 /* Validate RSSI value against the RSSI threshold once more. */
10125 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10126 rssi < hdev->discovery.rssi)
10133 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10134 bdaddr_t *bdaddr, u8 addr_type)
10136 struct mgmt_ev_adv_monitor_device_lost ev;
10138 ev.monitor_handle = cpu_to_le16(handle);
10139 bacpy(&ev.addr.bdaddr, bdaddr);
10140 ev.addr.type = addr_type;
10142 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10146 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10147 struct sk_buff *skb,
10148 struct sock *skip_sk,
10151 struct sk_buff *advmon_skb;
10152 size_t advmon_skb_len;
10153 __le16 *monitor_handle;
10158 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10159 sizeof(struct mgmt_ev_device_found)) + skb->len;
10160 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10165 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10166 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10167 * store monitor_handle of the matched monitor.
10169 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10170 *monitor_handle = cpu_to_le16(handle);
10171 skb_put_data(advmon_skb, skb->data, skb->len);
10173 mgmt_event_skb(advmon_skb, skip_sk);
10176 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10177 bdaddr_t *bdaddr, bool report_device,
10178 struct sk_buff *skb,
10179 struct sock *skip_sk)
10181 struct monitored_device *dev, *tmp;
10182 bool matched = false;
10183 bool notified = false;
10185 /* We have received the Advertisement Report because:
10186 * 1. the kernel has initiated active discovery
10187 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10189 * 3. if none of the above is true, we have one or more active
10190 * Advertisement Monitor
10192 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10193 * and report ONLY one advertisement per device for the matched Monitor
10194 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10196 * For case 3, since we are not active scanning and all advertisements
10197 * received are due to a matched Advertisement Monitor, report all
10198 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10200 if (report_device && !hdev->advmon_pend_notify) {
10201 mgmt_event_skb(skb, skip_sk);
10205 hdev->advmon_pend_notify = false;
10207 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10208 if (!bacmp(&dev->bdaddr, bdaddr)) {
10211 if (!dev->notified) {
10212 mgmt_send_adv_monitor_device_found(hdev, skb,
10216 dev->notified = true;
10220 if (!dev->notified)
10221 hdev->advmon_pend_notify = true;
10224 if (!report_device &&
10225 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10226 /* Handle 0 indicates that we are not active scanning and this
10227 * is a subsequent advertisement report for an already matched
10228 * Advertisement Monitor or the controller offloading support
10229 * is not available.
10231 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10235 mgmt_event_skb(skb, skip_sk);
10240 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10241 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10242 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10245 struct sk_buff *skb;
10246 struct mgmt_ev_mesh_device_found *ev;
10249 if (!hdev->mesh_ad_types[0])
10252 /* Scan for requested AD types */
10254 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10255 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10256 if (!hdev->mesh_ad_types[j])
10259 if (hdev->mesh_ad_types[j] == eir[i + 1])
10265 if (scan_rsp_len > 0) {
10266 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10267 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10268 if (!hdev->mesh_ad_types[j])
10271 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10280 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10281 sizeof(*ev) + eir_len + scan_rsp_len);
10285 ev = skb_put(skb, sizeof(*ev));
10287 bacpy(&ev->addr.bdaddr, bdaddr);
10288 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10290 ev->flags = cpu_to_le32(flags);
10291 ev->instant = cpu_to_le64(instant);
10294 /* Copy EIR or advertising data into event */
10295 skb_put_data(skb, eir, eir_len);
10297 if (scan_rsp_len > 0)
10298 /* Append scan response data to event */
10299 skb_put_data(skb, scan_rsp, scan_rsp_len);
10301 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10303 mgmt_event_skb(skb, NULL);
10306 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10307 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10308 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10311 struct sk_buff *skb;
10312 struct mgmt_ev_device_found *ev;
10313 bool report_device = hci_discovery_active(hdev);
10315 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10316 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10317 eir, eir_len, scan_rsp, scan_rsp_len,
10320 /* Don't send events for a non-kernel initiated discovery. With
10321 * LE one exception is if we have pend_le_reports > 0 in which
10322 * case we're doing passive scanning and want these events.
10324 if (!hci_discovery_active(hdev)) {
10325 if (link_type == ACL_LINK)
10327 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10328 report_device = true;
10329 else if (!hci_is_adv_monitoring(hdev))
10333 if (hdev->discovery.result_filtering) {
10334 /* We are using service discovery */
10335 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10340 if (hdev->discovery.limited) {
10341 /* Check for limited discoverable bit */
10343 if (!(dev_class[1] & 0x20))
10346 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10347 if (!flags || !(flags[0] & LE_AD_LIMITED))
10352 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10353 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10354 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10358 ev = skb_put(skb, sizeof(*ev));
10360 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10361 * RSSI value was reported as 0 when not available. This behavior
10362 * is kept when using device discovery. This is required for full
10363 * backwards compatibility with the API.
10365 * However when using service discovery, the value 127 will be
10366 * returned when the RSSI is not available.
10368 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10369 link_type == ACL_LINK)
10372 bacpy(&ev->addr.bdaddr, bdaddr);
10373 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10375 ev->flags = cpu_to_le32(flags);
10378 /* Copy EIR or advertising data into event */
10379 skb_put_data(skb, eir, eir_len);
10381 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10384 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10386 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10389 if (scan_rsp_len > 0)
10390 /* Append scan response data to event */
10391 skb_put_data(skb, scan_rsp, scan_rsp_len);
10393 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10395 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10398 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10399 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10401 struct sk_buff *skb;
10402 struct mgmt_ev_device_found *ev;
10406 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10407 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10409 ev = skb_put(skb, sizeof(*ev));
10410 bacpy(&ev->addr.bdaddr, bdaddr);
10411 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10415 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10417 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10419 ev->eir_len = cpu_to_le16(eir_len);
10420 ev->flags = cpu_to_le32(flags);
10422 mgmt_event_skb(skb, NULL);
10425 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10427 struct mgmt_ev_discovering ev;
10429 bt_dev_dbg(hdev, "discovering %u", discovering);
10431 memset(&ev, 0, sizeof(ev));
10432 ev.type = hdev->discovery.type;
10433 ev.discovering = discovering;
10435 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10438 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10440 struct mgmt_ev_controller_suspend ev;
10442 ev.suspend_state = state;
10443 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10446 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10449 struct mgmt_ev_controller_resume ev;
10451 ev.wake_reason = reason;
10453 bacpy(&ev.addr.bdaddr, bdaddr);
10454 ev.addr.type = addr_type;
10456 memset(&ev.addr, 0, sizeof(ev.addr));
10459 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10462 static struct hci_mgmt_chan chan = {
10463 .channel = HCI_CHANNEL_CONTROL,
10464 .handler_count = ARRAY_SIZE(mgmt_handlers),
10465 .handlers = mgmt_handlers,
10466 .hdev_init = mgmt_init_hdev,
10469 int mgmt_init(void)
10471 return hci_mgmt_chan_register(&chan);
10474 void mgmt_exit(void)
10476 hci_mgmt_chan_unregister(&chan);
10479 void mgmt_cleanup(struct sock *sk)
10481 struct mgmt_mesh_tx *mesh_tx;
10482 struct hci_dev *hdev;
10484 read_lock(&hci_dev_list_lock);
10486 list_for_each_entry(hdev, &hci_dev_list, list) {
10488 mesh_tx = mgmt_mesh_next(hdev, sk);
10491 mesh_send_complete(hdev, mesh_tx, true);
10495 read_unlock(&hci_dev_list_lock);