2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
135 MGMT_OP_MESH_SEND_CANCEL,
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
154 MGMT_EV_DEVICE_FOUND,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
198 static const u16 mgmt_untrusted_events[] = {
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
286 static u8 mgmt_errno_status(int err)
290 return MGMT_STATUS_SUCCESS;
292 return MGMT_STATUS_REJECTED;
294 return MGMT_STATUS_INVALID_PARAMS;
296 return MGMT_STATUS_NOT_SUPPORTED;
298 return MGMT_STATUS_BUSY;
300 return MGMT_STATUS_AUTH_FAILED;
302 return MGMT_STATUS_NO_RESOURCES;
304 return MGMT_STATUS_ALREADY_CONNECTED;
306 return MGMT_STATUS_DISCONNECTED;
309 return MGMT_STATUS_FAILED;
312 static u8 mgmt_status(int err)
315 return mgmt_errno_status(err);
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
320 return MGMT_STATUS_FAILED;
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
350 static u8 le_addr_type(u8 mgmt_addr_type)
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
355 return ADDR_LE_DEV_RANDOM;
358 void mgmt_fill_version_info(void *ver)
360 struct mgmt_rp_read_version *rp = ver;
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
369 struct mgmt_rp_read_version rp;
371 bt_dev_dbg(hdev, "sock %p", sk);
373 mgmt_fill_version_info(&rp);
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
387 bt_dev_dbg(hdev, "sock %p", sk);
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
399 rp = kmalloc(rp_size, GFP_KERNEL);
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
415 __le16 *opcode = rp->opcodes;
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
434 struct mgmt_rp_read_index_list *rp;
440 bt_dev_dbg(hdev, "sock %p", sk);
442 read_lock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
454 read_unlock(&hci_dev_list_lock);
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
481 read_unlock(&hci_dev_list_lock);
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
494 struct mgmt_rp_read_unconf_index_list *rp;
500 bt_dev_dbg(hdev, "sock %p", sk);
502 read_lock(&hci_dev_list_lock);
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
514 read_unlock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
541 read_unlock(&hci_dev_list_lock);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
554 struct mgmt_rp_read_ext_index_list *rp;
559 bt_dev_dbg(hdev, "sock %p", sk);
561 read_lock(&hci_dev_list_lock);
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
571 read_unlock(&hci_dev_list_lock);
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
604 rp->num_controllers = cpu_to_le16(count);
606 read_unlock(&hci_dev_list_lock);
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
625 static bool is_configured(struct hci_dev *hdev)
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
639 static __le32 get_missing_options(struct hci_dev *hdev)
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
652 return cpu_to_le32(options);
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
657 __le32 options = get_missing_options(hdev);
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
665 __le32 options = get_missing_options(hdev);
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
674 struct mgmt_rp_read_config_info rp;
677 bt_dev_dbg(hdev, "sock %p", sk);
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
693 hci_dev_unlock(hdev);
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
699 static u32 get_supported_phys(struct hci_dev *hdev)
701 u32 supported_phys = 0;
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
748 return supported_phys;
751 static u32 get_selected_phys(struct hci_dev *hdev)
753 u32 selected_phys = 0;
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
811 return selected_phys;
814 static u32 get_configurable_phys(struct hci_dev *hdev)
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
820 static u32 get_supported_settings(struct hci_dev *hdev)
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
838 if (IS_ENABLED(CONFIG_BT_HS))
839 settings |= MGMT_SETTING_HS;
842 if (lmp_sc_capable(hdev))
843 settings |= MGMT_SETTING_SECURE_CONN;
845 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
847 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
850 if (lmp_le_capable(hdev)) {
851 settings |= MGMT_SETTING_LE;
852 settings |= MGMT_SETTING_SECURE_CONN;
853 settings |= MGMT_SETTING_PRIVACY;
854 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 settings |= MGMT_SETTING_ADVERTISING;
858 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
860 settings |= MGMT_SETTING_CONFIGURATION;
862 settings |= MGMT_SETTING_PHY_CONFIGURATION;
867 static u32 get_current_settings(struct hci_dev *hdev)
871 if (hdev_is_powered(hdev))
872 settings |= MGMT_SETTING_POWERED;
874 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
875 settings |= MGMT_SETTING_CONNECTABLE;
877 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
878 settings |= MGMT_SETTING_FAST_CONNECTABLE;
880 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
881 settings |= MGMT_SETTING_DISCOVERABLE;
883 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
884 settings |= MGMT_SETTING_BONDABLE;
886 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
887 settings |= MGMT_SETTING_BREDR;
889 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
890 settings |= MGMT_SETTING_LE;
892 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
893 settings |= MGMT_SETTING_LINK_SECURITY;
895 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
896 settings |= MGMT_SETTING_SSP;
898 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
899 settings |= MGMT_SETTING_HS;
901 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
902 settings |= MGMT_SETTING_ADVERTISING;
904 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
905 settings |= MGMT_SETTING_SECURE_CONN;
907 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
908 settings |= MGMT_SETTING_DEBUG_KEYS;
910 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
911 settings |= MGMT_SETTING_PRIVACY;
913 /* The current setting for static address has two purposes. The
914 * first is to indicate if the static address will be used and
915 * the second is to indicate if it is actually set.
917 * This means if the static address is not configured, this flag
918 * will never be set. If the address is configured, then if the
919 * address is actually used decides if the flag is set or not.
921 * For single mode LE only controllers and dual-mode controllers
922 * with BR/EDR disabled, the existence of the static address will
925 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
926 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
927 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
928 if (bacmp(&hdev->static_addr, BDADDR_ANY))
929 settings |= MGMT_SETTING_STATIC_ADDRESS;
932 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
933 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
938 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
943 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 struct mgmt_pending_cmd *cmd;
947 /* If there's a pending mgmt command the flags will not yet have
948 * their final values, so check for this first.
950 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 struct mgmt_mode *cp = cmd->param;
954 return LE_AD_GENERAL;
955 else if (cp->val == 0x02)
956 return LE_AD_LIMITED;
958 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
959 return LE_AD_LIMITED;
960 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
961 return LE_AD_GENERAL;
967 bool mgmt_get_connectable(struct hci_dev *hdev)
969 struct mgmt_pending_cmd *cmd;
971 /* If there's a pending mgmt command the flag will not yet have
972 * it's final value, so check for this first.
974 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 struct mgmt_mode *cp = cmd->param;
981 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
984 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 hci_update_eir_sync(hdev);
987 hci_update_class_sync(hdev);
992 static void service_cache_off(struct work_struct *work)
994 struct hci_dev *hdev = container_of(work, struct hci_dev,
997 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1000 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1003 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 /* The generation of a new RPA and programming it into the
1006 * controller happens in the hci_req_enable_advertising()
1009 if (ext_adv_capable(hdev))
1010 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 return hci_enable_advertising_sync(hdev);
1015 static void rpa_expired(struct work_struct *work)
1017 struct hci_dev *hdev = container_of(work, struct hci_dev,
1020 bt_dev_dbg(hdev, "");
1022 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1027 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1030 static void discov_off(struct work_struct *work)
1032 struct hci_dev *hdev = container_of(work, struct hci_dev,
1035 bt_dev_dbg(hdev, "");
1039 /* When discoverable timeout triggers, then just make sure
1040 * the limited discoverable flag is cleared. Even in the case
1041 * of a timeout triggered from general discoverable, it is
1042 * safe to unconditionally clear the flag.
1044 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1045 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1046 hdev->discov_timeout = 0;
1048 hci_update_discoverable(hdev);
1050 mgmt_new_settings(hdev);
1052 hci_dev_unlock(hdev);
1055 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1057 static void mesh_send_complete(struct hci_dev *hdev,
1058 struct mgmt_mesh_tx *mesh_tx, bool silent)
1060 u8 handle = mesh_tx->handle;
1063 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1064 sizeof(handle), NULL);
1066 mgmt_mesh_remove(mesh_tx);
1069 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1071 struct mgmt_mesh_tx *mesh_tx;
1073 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1074 hci_disable_advertising_sync(hdev);
1075 mesh_tx = mgmt_mesh_next(hdev, NULL);
1078 mesh_send_complete(hdev, mesh_tx, false);
1083 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1084 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1085 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1087 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1092 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1093 mesh_send_start_complete);
1096 mesh_send_complete(hdev, mesh_tx, false);
1098 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1101 static void mesh_send_done(struct work_struct *work)
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 mesh_send_done.work);
1106 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1109 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1112 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1114 if (hci_dev_test_flag(hdev, HCI_MGMT))
1117 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1119 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1120 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1121 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1122 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1124 /* Non-mgmt controlled devices get this bit set
1125 * implicitly so that pairing works for them, however
1126 * for mgmt we require user-space to explicitly enable
1129 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1131 hci_dev_set_flag(hdev, HCI_MGMT);
1134 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1135 void *data, u16 data_len)
1137 struct mgmt_rp_read_info rp;
1139 bt_dev_dbg(hdev, "sock %p", sk);
1143 memset(&rp, 0, sizeof(rp));
1145 bacpy(&rp.bdaddr, &hdev->bdaddr);
1147 rp.version = hdev->hci_ver;
1148 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1150 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1151 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1153 memcpy(rp.dev_class, hdev->dev_class, 3);
1155 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1156 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1158 hci_dev_unlock(hdev);
1160 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1169 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1170 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1171 hdev->dev_class, 3);
1173 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1174 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1177 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1178 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1179 hdev->dev_name, name_len);
1181 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1182 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1183 hdev->short_name, name_len);
1188 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1189 void *data, u16 data_len)
1192 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1195 bt_dev_dbg(hdev, "sock %p", sk);
1197 memset(&buf, 0, sizeof(buf));
1201 bacpy(&rp->bdaddr, &hdev->bdaddr);
1203 rp->version = hdev->hci_ver;
1204 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1206 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1207 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1210 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1211 rp->eir_len = cpu_to_le16(eir_len);
1213 hci_dev_unlock(hdev);
1215 /* If this command is called at least once, then the events
1216 * for class of device and local name changes are disabled
1217 * and only the new extended controller information event
1220 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1221 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1222 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1224 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1225 sizeof(*rp) + eir_len);
1228 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1231 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1234 memset(buf, 0, sizeof(buf));
1236 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1237 ev->eir_len = cpu_to_le16(eir_len);
1239 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1240 sizeof(*ev) + eir_len,
1241 HCI_MGMT_EXT_INFO_EVENTS, skip);
1244 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1246 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1248 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1254 struct mgmt_ev_advertising_added ev;
1256 ev.instance = instance;
1258 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1261 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1264 struct mgmt_ev_advertising_removed ev;
1266 ev.instance = instance;
1268 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1271 static void cancel_adv_timeout(struct hci_dev *hdev)
1273 if (hdev->adv_instance_timeout) {
1274 hdev->adv_instance_timeout = 0;
1275 cancel_delayed_work(&hdev->adv_instance_expire);
1279 /* This function requires the caller holds hdev->lock */
1280 static void restart_le_actions(struct hci_dev *hdev)
1282 struct hci_conn_params *p;
1284 list_for_each_entry(p, &hdev->le_conn_params, list) {
1285 /* Needed for AUTO_OFF case where might not "really"
1286 * have been powered off.
1288 list_del_init(&p->action);
1290 switch (p->auto_connect) {
1291 case HCI_AUTO_CONN_DIRECT:
1292 case HCI_AUTO_CONN_ALWAYS:
1293 list_add(&p->action, &hdev->pend_le_conns);
1295 case HCI_AUTO_CONN_REPORT:
1296 list_add(&p->action, &hdev->pend_le_reports);
1304 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1306 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1308 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1309 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1312 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1314 struct mgmt_pending_cmd *cmd = data;
1315 struct mgmt_mode *cp;
1317 /* Make sure cmd still outstanding. */
1318 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 bt_dev_dbg(hdev, "err %d", err);
1328 restart_le_actions(hdev);
1329 hci_update_passive_scan(hdev);
1330 hci_dev_unlock(hdev);
1333 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1335 /* Only call new_setting for power on as power off is deferred
1336 * to hdev->power_off work which does call hci_dev_do_close.
1339 new_settings(hdev, cmd->sk);
1341 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1345 mgmt_pending_remove(cmd);
1348 static int set_powered_sync(struct hci_dev *hdev, void *data)
1350 struct mgmt_pending_cmd *cmd = data;
1351 struct mgmt_mode *cp = cmd->param;
1353 BT_DBG("%s", hdev->name);
1355 return hci_set_powered_sync(hdev, cp->val);
1358 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1361 struct mgmt_mode *cp = data;
1362 struct mgmt_pending_cmd *cmd;
1365 bt_dev_dbg(hdev, "sock %p", sk);
1367 if (cp->val != 0x00 && cp->val != 0x01)
1368 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1369 MGMT_STATUS_INVALID_PARAMS);
1373 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1374 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 if (!!cp->val == hdev_is_powered(hdev)) {
1380 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1384 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1390 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1391 mgmt_set_powered_complete);
1394 mgmt_pending_remove(cmd);
1397 hci_dev_unlock(hdev);
1401 int mgmt_new_settings(struct hci_dev *hdev)
1403 return new_settings(hdev, NULL);
1408 struct hci_dev *hdev;
1412 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1414 struct cmd_lookup *match = data;
1416 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1418 list_del(&cmd->list);
1420 if (match->sk == NULL) {
1421 match->sk = cmd->sk;
1422 sock_hold(match->sk);
1425 mgmt_pending_free(cmd);
1428 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1432 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1433 mgmt_pending_remove(cmd);
1436 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1438 if (cmd->cmd_complete) {
1441 cmd->cmd_complete(cmd, *status);
1442 mgmt_pending_remove(cmd);
1447 cmd_status_rsp(cmd, data);
1450 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1452 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1453 cmd->param, cmd->param_len);
1456 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1458 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1459 cmd->param, sizeof(struct mgmt_addr_info));
1462 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1464 if (!lmp_bredr_capable(hdev))
1465 return MGMT_STATUS_NOT_SUPPORTED;
1466 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1467 return MGMT_STATUS_REJECTED;
1469 return MGMT_STATUS_SUCCESS;
1472 static u8 mgmt_le_support(struct hci_dev *hdev)
1474 if (!lmp_le_capable(hdev))
1475 return MGMT_STATUS_NOT_SUPPORTED;
1476 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1477 return MGMT_STATUS_REJECTED;
1479 return MGMT_STATUS_SUCCESS;
1482 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1485 struct mgmt_pending_cmd *cmd = data;
1487 bt_dev_dbg(hdev, "err %d", err);
1489 /* Make sure cmd still outstanding. */
1490 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1496 u8 mgmt_err = mgmt_status(err);
1497 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1498 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1502 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1503 hdev->discov_timeout > 0) {
1504 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1505 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1508 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1509 new_settings(hdev, cmd->sk);
1512 mgmt_pending_remove(cmd);
1513 hci_dev_unlock(hdev);
1516 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1518 BT_DBG("%s", hdev->name);
1520 return hci_update_discoverable_sync(hdev);
1523 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1526 struct mgmt_cp_set_discoverable *cp = data;
1527 struct mgmt_pending_cmd *cmd;
1531 bt_dev_dbg(hdev, "sock %p", sk);
1533 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1534 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1535 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1536 MGMT_STATUS_REJECTED);
1538 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1539 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1540 MGMT_STATUS_INVALID_PARAMS);
1542 timeout = __le16_to_cpu(cp->timeout);
1544 /* Disabling discoverable requires that no timeout is set,
1545 * and enabling limited discoverable requires a timeout.
1547 if ((cp->val == 0x00 && timeout > 0) ||
1548 (cp->val == 0x02 && timeout == 0))
1549 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1550 MGMT_STATUS_INVALID_PARAMS);
1554 if (!hdev_is_powered(hdev) && timeout > 0) {
1555 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556 MGMT_STATUS_NOT_POWERED);
1560 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1561 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1562 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1568 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1569 MGMT_STATUS_REJECTED);
1573 if (hdev->advertising_paused) {
1574 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 if (!hdev_is_powered(hdev)) {
1580 bool changed = false;
1582 /* Setting limited discoverable when powered off is
1583 * not a valid operation since it requires a timeout
1584 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1586 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1587 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1591 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1596 err = new_settings(hdev, sk);
1601 /* If the current mode is the same, then just update the timeout
1602 * value with the new value. And if only the timeout gets updated,
1603 * then no need for any HCI transactions.
1605 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1606 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1607 HCI_LIMITED_DISCOVERABLE)) {
1608 cancel_delayed_work(&hdev->discov_off);
1609 hdev->discov_timeout = timeout;
1611 if (cp->val && hdev->discov_timeout > 0) {
1612 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1613 queue_delayed_work(hdev->req_workqueue,
1614 &hdev->discov_off, to);
1617 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1621 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1627 /* Cancel any potential discoverable timeout that might be
1628 * still active and store new timeout value. The arming of
1629 * the timeout happens in the complete handler.
1631 cancel_delayed_work(&hdev->discov_off);
1632 hdev->discov_timeout = timeout;
1635 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1637 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1639 /* Limited discoverable mode */
1640 if (cp->val == 0x02)
1641 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1643 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1645 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1646 mgmt_set_discoverable_complete);
1649 mgmt_pending_remove(cmd);
1652 hci_dev_unlock(hdev);
1656 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1659 struct mgmt_pending_cmd *cmd = data;
1661 bt_dev_dbg(hdev, "err %d", err);
1663 /* Make sure cmd still outstanding. */
1664 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1670 u8 mgmt_err = mgmt_status(err);
1671 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1675 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1676 new_settings(hdev, cmd->sk);
1680 mgmt_pending_remove(cmd);
1682 hci_dev_unlock(hdev);
1685 static int set_connectable_update_settings(struct hci_dev *hdev,
1686 struct sock *sk, u8 val)
1688 bool changed = false;
1691 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1695 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1697 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1698 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1701 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1706 hci_update_scan(hdev);
1707 hci_update_passive_scan(hdev);
1708 return new_settings(hdev, sk);
1714 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1716 BT_DBG("%s", hdev->name);
1718 return hci_update_connectable_sync(hdev);
1721 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1724 struct mgmt_mode *cp = data;
1725 struct mgmt_pending_cmd *cmd;
1728 bt_dev_dbg(hdev, "sock %p", sk);
1730 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1731 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1732 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1733 MGMT_STATUS_REJECTED);
1735 if (cp->val != 0x00 && cp->val != 0x01)
1736 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1737 MGMT_STATUS_INVALID_PARAMS);
1741 if (!hdev_is_powered(hdev)) {
1742 err = set_connectable_update_settings(hdev, sk, cp->val);
1746 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1747 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1748 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1753 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1760 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1762 if (hdev->discov_timeout > 0)
1763 cancel_delayed_work(&hdev->discov_off);
1765 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1766 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1767 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1770 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1771 mgmt_set_connectable_complete);
1774 mgmt_pending_remove(cmd);
1777 hci_dev_unlock(hdev);
1781 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1784 struct mgmt_mode *cp = data;
1788 bt_dev_dbg(hdev, "sock %p", sk);
1790 if (cp->val != 0x00 && cp->val != 0x01)
1791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1792 MGMT_STATUS_INVALID_PARAMS);
1797 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1799 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1801 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1806 /* In limited privacy mode the change of bondable mode
1807 * may affect the local advertising address.
1809 hci_update_discoverable(hdev);
1811 err = new_settings(hdev, sk);
1815 hci_dev_unlock(hdev);
1819 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1822 struct mgmt_mode *cp = data;
1823 struct mgmt_pending_cmd *cmd;
1827 bt_dev_dbg(hdev, "sock %p", sk);
1829 status = mgmt_bredr_support(hdev);
1831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1834 if (cp->val != 0x00 && cp->val != 0x01)
1835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1836 MGMT_STATUS_INVALID_PARAMS);
1840 if (!hdev_is_powered(hdev)) {
1841 bool changed = false;
1843 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1844 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1848 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1853 err = new_settings(hdev, sk);
1858 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1859 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1867 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1871 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1877 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1879 mgmt_pending_remove(cmd);
1884 hci_dev_unlock(hdev);
1888 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1890 struct cmd_lookup match = { NULL, hdev };
1891 struct mgmt_pending_cmd *cmd = data;
1892 struct mgmt_mode *cp = cmd->param;
1893 u8 enable = cp->val;
1896 /* Make sure cmd still outstanding. */
1897 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1901 u8 mgmt_err = mgmt_status(err);
1903 if (enable && hci_dev_test_and_clear_flag(hdev,
1905 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1906 new_settings(hdev, NULL);
1909 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1915 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1917 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1920 changed = hci_dev_test_and_clear_flag(hdev,
1923 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1926 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1929 new_settings(hdev, match.sk);
1934 hci_update_eir_sync(hdev);
1937 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1939 struct mgmt_pending_cmd *cmd = data;
1940 struct mgmt_mode *cp = cmd->param;
1941 bool changed = false;
1945 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1947 err = hci_write_ssp_mode_sync(hdev, cp->val);
1949 if (!err && changed)
1950 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1955 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1957 struct mgmt_mode *cp = data;
1958 struct mgmt_pending_cmd *cmd;
1962 bt_dev_dbg(hdev, "sock %p", sk);
1964 status = mgmt_bredr_support(hdev);
1966 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1968 if (!lmp_ssp_capable(hdev))
1969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1970 MGMT_STATUS_NOT_SUPPORTED);
1972 if (cp->val != 0x00 && cp->val != 0x01)
1973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1974 MGMT_STATUS_INVALID_PARAMS);
1978 if (!hdev_is_powered(hdev)) {
1982 changed = !hci_dev_test_and_set_flag(hdev,
1985 changed = hci_dev_test_and_clear_flag(hdev,
1988 changed = hci_dev_test_and_clear_flag(hdev,
1991 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1994 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1999 err = new_settings(hdev, sk);
2004 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2005 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2010 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2011 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2019 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2024 MGMT_STATUS_FAILED);
2027 mgmt_pending_remove(cmd);
2031 hci_dev_unlock(hdev);
2035 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2037 struct mgmt_mode *cp = data;
2042 bt_dev_dbg(hdev, "sock %p", sk);
2044 if (!IS_ENABLED(CONFIG_BT_HS))
2045 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2046 MGMT_STATUS_NOT_SUPPORTED);
2048 status = mgmt_bredr_support(hdev);
2050 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2052 if (!lmp_ssp_capable(hdev))
2053 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2054 MGMT_STATUS_NOT_SUPPORTED);
2056 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2057 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 MGMT_STATUS_REJECTED);
2060 if (cp->val != 0x00 && cp->val != 0x01)
2061 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2062 MGMT_STATUS_INVALID_PARAMS);
2066 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2067 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2073 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2075 if (hdev_is_powered(hdev)) {
2076 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2077 MGMT_STATUS_REJECTED);
2081 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2084 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2089 err = new_settings(hdev, sk);
2092 hci_dev_unlock(hdev);
2096 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2098 struct cmd_lookup match = { NULL, hdev };
2099 u8 status = mgmt_status(err);
2101 bt_dev_dbg(hdev, "err %d", err);
2104 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2109 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2111 new_settings(hdev, match.sk);
2117 static int set_le_sync(struct hci_dev *hdev, void *data)
2119 struct mgmt_pending_cmd *cmd = data;
2120 struct mgmt_mode *cp = cmd->param;
2125 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2127 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2128 hci_disable_advertising_sync(hdev);
2130 if (ext_adv_capable(hdev))
2131 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2133 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2136 err = hci_write_le_host_supported_sync(hdev, val, 0);
2138 /* Make sure the controller has a good default for
2139 * advertising data. Restrict the update to when LE
2140 * has actually been enabled. During power on, the
2141 * update in powered_update_hci will take care of it.
2143 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2144 if (ext_adv_capable(hdev)) {
2147 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2149 hci_update_scan_rsp_data_sync(hdev, 0x00);
2151 hci_update_adv_data_sync(hdev, 0x00);
2152 hci_update_scan_rsp_data_sync(hdev, 0x00);
2155 hci_update_passive_scan(hdev);
2161 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2163 struct mgmt_pending_cmd *cmd = data;
2164 u8 status = mgmt_status(err);
2165 struct sock *sk = cmd->sk;
2168 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2169 cmd_status_rsp, &status);
2173 mgmt_pending_remove(cmd);
2174 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2177 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2179 struct mgmt_pending_cmd *cmd = data;
2180 struct mgmt_cp_set_mesh *cp = cmd->param;
2181 size_t len = cmd->param_len;
2183 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2186 hci_dev_set_flag(hdev, HCI_MESH);
2188 hci_dev_clear_flag(hdev, HCI_MESH);
2192 /* If filters don't fit, forward all adv pkts */
2193 if (len <= sizeof(hdev->mesh_ad_types))
2194 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2196 hci_update_passive_scan_sync(hdev);
2200 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2202 struct mgmt_cp_set_mesh *cp = data;
2203 struct mgmt_pending_cmd *cmd;
2206 bt_dev_dbg(hdev, "sock %p", sk);
2208 if (!lmp_le_capable(hdev) ||
2209 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2210 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2211 MGMT_STATUS_NOT_SUPPORTED);
2213 if (cp->enable != 0x00 && cp->enable != 0x01)
2214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2215 MGMT_STATUS_INVALID_PARAMS);
2219 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2223 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2228 MGMT_STATUS_FAILED);
2231 mgmt_pending_remove(cmd);
2234 hci_dev_unlock(hdev);
2238 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2240 struct mgmt_mesh_tx *mesh_tx = data;
2241 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2242 unsigned long mesh_send_interval;
2243 u8 mgmt_err = mgmt_status(err);
2245 /* Report any errors here, but don't report completion */
2248 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2249 /* Send Complete Error Code for handle */
2250 mesh_send_complete(hdev, mesh_tx, false);
2254 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2255 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2256 mesh_send_interval);
2259 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2261 struct mgmt_mesh_tx *mesh_tx = data;
2262 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2263 struct adv_info *adv, *next_instance;
2264 u8 instance = hdev->le_num_of_adv_sets + 1;
2265 u16 timeout, duration;
2268 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2269 return MGMT_STATUS_BUSY;
2272 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2273 adv = hci_add_adv_instance(hdev, instance, 0,
2274 send->adv_data_len, send->adv_data,
2277 HCI_ADV_TX_POWER_NO_PREFERENCE,
2278 hdev->le_adv_min_interval,
2279 hdev->le_adv_max_interval,
2283 mesh_tx->instance = instance;
2287 if (hdev->cur_adv_instance == instance) {
2288 /* If the currently advertised instance is being changed then
2289 * cancel the current advertising and schedule the next
2290 * instance. If there is only one instance then the overridden
2291 * advertising data will be visible right away.
2293 cancel_adv_timeout(hdev);
2295 next_instance = hci_get_next_instance(hdev, instance);
2297 instance = next_instance->instance;
2300 } else if (hdev->adv_instance_timeout) {
2301 /* Immediately advertise the new instance if no other, or
2302 * let it go naturally from queue if ADV is already happening
2308 return hci_schedule_adv_instance_sync(hdev, instance, true);
2313 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2315 struct mgmt_rp_mesh_read_features *rp = data;
2317 if (rp->used_handles >= rp->max_handles)
2320 rp->handles[rp->used_handles++] = mesh_tx->handle;
2323 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2324 void *data, u16 len)
2326 struct mgmt_rp_mesh_read_features rp;
2328 if (!lmp_le_capable(hdev) ||
2329 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2331 MGMT_STATUS_NOT_SUPPORTED);
2333 memset(&rp, 0, sizeof(rp));
2334 rp.index = cpu_to_le16(hdev->id);
2335 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2336 rp.max_handles = MESH_HANDLES_MAX;
2341 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2343 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2344 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2346 hci_dev_unlock(hdev);
2350 static int send_cancel(struct hci_dev *hdev, void *data)
2352 struct mgmt_pending_cmd *cmd = data;
2353 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2354 struct mgmt_mesh_tx *mesh_tx;
2356 if (!cancel->handle) {
2358 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2361 mesh_send_complete(hdev, mesh_tx, false);
2364 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2366 if (mesh_tx && mesh_tx->sk == cmd->sk)
2367 mesh_send_complete(hdev, mesh_tx, false);
2370 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2372 mgmt_pending_free(cmd);
2377 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2378 void *data, u16 len)
2380 struct mgmt_pending_cmd *cmd;
2383 if (!lmp_le_capable(hdev) ||
2384 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2385 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2386 MGMT_STATUS_NOT_SUPPORTED);
2388 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2389 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2390 MGMT_STATUS_REJECTED);
2393 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2397 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2400 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2401 MGMT_STATUS_FAILED);
2404 mgmt_pending_free(cmd);
2407 hci_dev_unlock(hdev);
2411 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2413 struct mgmt_mesh_tx *mesh_tx;
2414 struct mgmt_cp_mesh_send *send = data;
2415 struct mgmt_rp_mesh_read_features rp;
2419 if (!lmp_le_capable(hdev) ||
2420 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2422 MGMT_STATUS_NOT_SUPPORTED);
2423 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2424 len <= MGMT_MESH_SEND_SIZE ||
2425 len > (MGMT_MESH_SEND_SIZE + 31))
2426 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2427 MGMT_STATUS_REJECTED);
2431 memset(&rp, 0, sizeof(rp));
2432 rp.max_handles = MESH_HANDLES_MAX;
2434 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2436 if (rp.max_handles <= rp.used_handles) {
2437 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2442 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2443 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2448 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2449 mesh_send_start_complete);
2452 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2453 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2454 MGMT_STATUS_FAILED);
2458 mgmt_mesh_remove(mesh_tx);
2461 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2463 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2464 &mesh_tx->handle, 1);
2468 hci_dev_unlock(hdev);
2472 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2474 struct mgmt_mode *cp = data;
2475 struct mgmt_pending_cmd *cmd;
2479 bt_dev_dbg(hdev, "sock %p", sk);
2481 if (!lmp_le_capable(hdev))
2482 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2483 MGMT_STATUS_NOT_SUPPORTED);
2485 if (cp->val != 0x00 && cp->val != 0x01)
2486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2487 MGMT_STATUS_INVALID_PARAMS);
2489 /* Bluetooth single mode LE only controllers or dual-mode
2490 * controllers configured as LE only devices, do not allow
2491 * switching LE off. These have either LE enabled explicitly
2492 * or BR/EDR has been previously switched off.
2494 * When trying to enable an already enabled LE, then gracefully
2495 * send a positive response. Trying to disable it however will
2496 * result into rejection.
2498 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2499 if (cp->val == 0x01)
2500 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2502 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2503 MGMT_STATUS_REJECTED);
2509 enabled = lmp_host_le_capable(hdev);
2511 if (!hdev_is_powered(hdev) || val == enabled) {
2512 bool changed = false;
2514 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2515 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2519 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2520 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2524 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2529 err = new_settings(hdev, sk);
2534 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2535 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2536 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2541 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2545 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2549 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2550 MGMT_STATUS_FAILED);
2553 mgmt_pending_remove(cmd);
2557 hci_dev_unlock(hdev);
2561 /* This is a helper function to test for pending mgmt commands that can
2562 * cause CoD or EIR HCI commands. We can only allow one such pending
2563 * mgmt command at a time since otherwise we cannot easily track what
2564 * the current values are, will be, and based on that calculate if a new
2565 * HCI command needs to be sent and if yes with what value.
2567 static bool pending_eir_or_class(struct hci_dev *hdev)
2569 struct mgmt_pending_cmd *cmd;
2571 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2572 switch (cmd->opcode) {
2573 case MGMT_OP_ADD_UUID:
2574 case MGMT_OP_REMOVE_UUID:
2575 case MGMT_OP_SET_DEV_CLASS:
2576 case MGMT_OP_SET_POWERED:
2584 static const u8 bluetooth_base_uuid[] = {
2585 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2586 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2589 static u8 get_uuid_size(const u8 *uuid)
2593 if (memcmp(uuid, bluetooth_base_uuid, 12))
2596 val = get_unaligned_le32(&uuid[12]);
2603 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2605 struct mgmt_pending_cmd *cmd = data;
2607 bt_dev_dbg(hdev, "err %d", err);
2609 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2610 mgmt_status(err), hdev->dev_class, 3);
2612 mgmt_pending_free(cmd);
2615 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2619 err = hci_update_class_sync(hdev);
2623 return hci_update_eir_sync(hdev);
2626 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2628 struct mgmt_cp_add_uuid *cp = data;
2629 struct mgmt_pending_cmd *cmd;
2630 struct bt_uuid *uuid;
2633 bt_dev_dbg(hdev, "sock %p", sk);
2637 if (pending_eir_or_class(hdev)) {
2638 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2643 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2649 memcpy(uuid->uuid, cp->uuid, 16);
2650 uuid->svc_hint = cp->svc_hint;
2651 uuid->size = get_uuid_size(cp->uuid);
2653 list_add_tail(&uuid->list, &hdev->uuids);
2655 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2661 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2663 mgmt_pending_free(cmd);
2668 hci_dev_unlock(hdev);
2672 static bool enable_service_cache(struct hci_dev *hdev)
2674 if (!hdev_is_powered(hdev))
2677 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2678 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2686 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2690 err = hci_update_class_sync(hdev);
2694 return hci_update_eir_sync(hdev);
2697 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2700 struct mgmt_cp_remove_uuid *cp = data;
2701 struct mgmt_pending_cmd *cmd;
2702 struct bt_uuid *match, *tmp;
2703 static const u8 bt_uuid_any[] = {
2704 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2708 bt_dev_dbg(hdev, "sock %p", sk);
2712 if (pending_eir_or_class(hdev)) {
2713 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2718 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2719 hci_uuids_clear(hdev);
2721 if (enable_service_cache(hdev)) {
2722 err = mgmt_cmd_complete(sk, hdev->id,
2723 MGMT_OP_REMOVE_UUID,
2724 0, hdev->dev_class, 3);
2733 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2734 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2737 list_del(&match->list);
2743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2744 MGMT_STATUS_INVALID_PARAMS);
2749 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2755 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2756 mgmt_class_complete);
2758 mgmt_pending_free(cmd);
2761 hci_dev_unlock(hdev);
2765 static int set_class_sync(struct hci_dev *hdev, void *data)
2769 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2770 cancel_delayed_work_sync(&hdev->service_cache);
2771 err = hci_update_eir_sync(hdev);
2777 return hci_update_class_sync(hdev);
2780 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2783 struct mgmt_cp_set_dev_class *cp = data;
2784 struct mgmt_pending_cmd *cmd;
2787 bt_dev_dbg(hdev, "sock %p", sk);
2789 if (!lmp_bredr_capable(hdev))
2790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2791 MGMT_STATUS_NOT_SUPPORTED);
2795 if (pending_eir_or_class(hdev)) {
2796 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2801 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2802 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2803 MGMT_STATUS_INVALID_PARAMS);
2807 hdev->major_class = cp->major;
2808 hdev->minor_class = cp->minor;
2810 if (!hdev_is_powered(hdev)) {
2811 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2812 hdev->dev_class, 3);
2816 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2822 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2823 mgmt_class_complete);
2825 mgmt_pending_free(cmd);
2828 hci_dev_unlock(hdev);
2832 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2835 struct mgmt_cp_load_link_keys *cp = data;
2836 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2837 sizeof(struct mgmt_link_key_info));
2838 u16 key_count, expected_len;
2842 bt_dev_dbg(hdev, "sock %p", sk);
2844 if (!lmp_bredr_capable(hdev))
2845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2846 MGMT_STATUS_NOT_SUPPORTED);
2848 key_count = __le16_to_cpu(cp->key_count);
2849 if (key_count > max_key_count) {
2850 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2852 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2853 MGMT_STATUS_INVALID_PARAMS);
2856 expected_len = struct_size(cp, keys, key_count);
2857 if (expected_len != len) {
2858 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2860 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2861 MGMT_STATUS_INVALID_PARAMS);
2864 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2866 MGMT_STATUS_INVALID_PARAMS);
2868 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2871 for (i = 0; i < key_count; i++) {
2872 struct mgmt_link_key_info *key = &cp->keys[i];
2874 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2875 return mgmt_cmd_status(sk, hdev->id,
2876 MGMT_OP_LOAD_LINK_KEYS,
2877 MGMT_STATUS_INVALID_PARAMS);
2882 hci_link_keys_clear(hdev);
2885 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2887 changed = hci_dev_test_and_clear_flag(hdev,
2888 HCI_KEEP_DEBUG_KEYS);
2891 new_settings(hdev, NULL);
2893 for (i = 0; i < key_count; i++) {
2894 struct mgmt_link_key_info *key = &cp->keys[i];
2896 if (hci_is_blocked_key(hdev,
2897 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2899 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2904 /* Always ignore debug keys and require a new pairing if
2905 * the user wants to use them.
2907 if (key->type == HCI_LK_DEBUG_COMBINATION)
2910 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2911 key->type, key->pin_len, NULL);
2914 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2916 hci_dev_unlock(hdev);
2921 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2922 u8 addr_type, struct sock *skip_sk)
2924 struct mgmt_ev_device_unpaired ev;
2926 bacpy(&ev.addr.bdaddr, bdaddr);
2927 ev.addr.type = addr_type;
2929 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2933 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2935 struct mgmt_pending_cmd *cmd = data;
2936 struct mgmt_cp_unpair_device *cp = cmd->param;
2939 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2941 cmd->cmd_complete(cmd, err);
2942 mgmt_pending_free(cmd);
2945 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2947 struct mgmt_pending_cmd *cmd = data;
2948 struct mgmt_cp_unpair_device *cp = cmd->param;
2949 struct hci_conn *conn;
2951 if (cp->addr.type == BDADDR_BREDR)
2952 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2955 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2956 le_addr_type(cp->addr.type));
2961 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2964 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2967 struct mgmt_cp_unpair_device *cp = data;
2968 struct mgmt_rp_unpair_device rp;
2969 struct hci_conn_params *params;
2970 struct mgmt_pending_cmd *cmd;
2971 struct hci_conn *conn;
2975 memset(&rp, 0, sizeof(rp));
2976 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2977 rp.addr.type = cp->addr.type;
2979 if (!bdaddr_type_is_valid(cp->addr.type))
2980 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2981 MGMT_STATUS_INVALID_PARAMS,
2984 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2985 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2986 MGMT_STATUS_INVALID_PARAMS,
2991 if (!hdev_is_powered(hdev)) {
2992 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2993 MGMT_STATUS_NOT_POWERED, &rp,
2998 if (cp->addr.type == BDADDR_BREDR) {
2999 /* If disconnection is requested, then look up the
3000 * connection. If the remote device is connected, it
3001 * will be later used to terminate the link.
3003 * Setting it to NULL explicitly will cause no
3004 * termination of the link.
3007 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3012 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3014 err = mgmt_cmd_complete(sk, hdev->id,
3015 MGMT_OP_UNPAIR_DEVICE,
3016 MGMT_STATUS_NOT_PAIRED, &rp,
3024 /* LE address type */
3025 addr_type = le_addr_type(cp->addr.type);
3027 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3028 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3030 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3031 MGMT_STATUS_NOT_PAIRED, &rp,
3036 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3038 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3043 /* Defer clearing up the connection parameters until closing to
3044 * give a chance of keeping them if a repairing happens.
3046 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3048 /* Disable auto-connection parameters if present */
3049 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3051 if (params->explicit_connect)
3052 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3054 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3057 /* If disconnection is not requested, then clear the connection
3058 * variable so that the link is not terminated.
3060 if (!cp->disconnect)
3064 /* If the connection variable is set, then termination of the
3065 * link is requested.
3068 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3070 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3074 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3081 cmd->cmd_complete = addr_cmd_complete;
3083 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3084 unpair_device_complete);
3086 mgmt_pending_free(cmd);
3089 hci_dev_unlock(hdev);
3093 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3096 struct mgmt_cp_disconnect *cp = data;
3097 struct mgmt_rp_disconnect rp;
3098 struct mgmt_pending_cmd *cmd;
3099 struct hci_conn *conn;
3102 bt_dev_dbg(hdev, "sock %p", sk);
3104 memset(&rp, 0, sizeof(rp));
3105 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3106 rp.addr.type = cp->addr.type;
3108 if (!bdaddr_type_is_valid(cp->addr.type))
3109 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3110 MGMT_STATUS_INVALID_PARAMS,
3115 if (!test_bit(HCI_UP, &hdev->flags)) {
3116 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3117 MGMT_STATUS_NOT_POWERED, &rp,
3122 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3123 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3124 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3128 if (cp->addr.type == BDADDR_BREDR)
3129 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3132 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3133 le_addr_type(cp->addr.type));
3135 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3136 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3137 MGMT_STATUS_NOT_CONNECTED, &rp,
3142 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3148 cmd->cmd_complete = generic_cmd_complete;
3150 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3152 mgmt_pending_remove(cmd);
3155 hci_dev_unlock(hdev);
3159 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3161 switch (link_type) {
3163 switch (addr_type) {
3164 case ADDR_LE_DEV_PUBLIC:
3165 return BDADDR_LE_PUBLIC;
3168 /* Fallback to LE Random address type */
3169 return BDADDR_LE_RANDOM;
3173 /* Fallback to BR/EDR type */
3174 return BDADDR_BREDR;
3178 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3181 struct mgmt_rp_get_connections *rp;
3186 bt_dev_dbg(hdev, "sock %p", sk);
3190 if (!hdev_is_powered(hdev)) {
3191 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3192 MGMT_STATUS_NOT_POWERED);
3197 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3198 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3202 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3209 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3210 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3212 bacpy(&rp->addr[i].bdaddr, &c->dst);
3213 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3214 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3219 rp->conn_count = cpu_to_le16(i);
3221 /* Recalculate length in case of filtered SCO connections, etc */
3222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3223 struct_size(rp, addr, i));
3228 hci_dev_unlock(hdev);
3232 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3233 struct mgmt_cp_pin_code_neg_reply *cp)
3235 struct mgmt_pending_cmd *cmd;
3238 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3243 cmd->cmd_complete = addr_cmd_complete;
3245 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3246 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3248 mgmt_pending_remove(cmd);
3253 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3256 struct hci_conn *conn;
3257 struct mgmt_cp_pin_code_reply *cp = data;
3258 struct hci_cp_pin_code_reply reply;
3259 struct mgmt_pending_cmd *cmd;
3262 bt_dev_dbg(hdev, "sock %p", sk);
3266 if (!hdev_is_powered(hdev)) {
3267 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3268 MGMT_STATUS_NOT_POWERED);
3272 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3274 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3275 MGMT_STATUS_NOT_CONNECTED);
3279 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3280 struct mgmt_cp_pin_code_neg_reply ncp;
3282 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3284 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3286 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3288 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3289 MGMT_STATUS_INVALID_PARAMS);
3294 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3300 cmd->cmd_complete = addr_cmd_complete;
3302 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3303 reply.pin_len = cp->pin_len;
3304 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3306 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3308 mgmt_pending_remove(cmd);
3311 hci_dev_unlock(hdev);
3315 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3318 struct mgmt_cp_set_io_capability *cp = data;
3320 bt_dev_dbg(hdev, "sock %p", sk);
3322 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3323 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3324 MGMT_STATUS_INVALID_PARAMS);
3328 hdev->io_capability = cp->io_capability;
3330 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3332 hci_dev_unlock(hdev);
3334 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3338 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3340 struct hci_dev *hdev = conn->hdev;
3341 struct mgmt_pending_cmd *cmd;
3343 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3344 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3347 if (cmd->user_data != conn)
3356 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3358 struct mgmt_rp_pair_device rp;
3359 struct hci_conn *conn = cmd->user_data;
3362 bacpy(&rp.addr.bdaddr, &conn->dst);
3363 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3365 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3366 status, &rp, sizeof(rp));
3368 /* So we don't get further callbacks for this connection */
3369 conn->connect_cfm_cb = NULL;
3370 conn->security_cfm_cb = NULL;
3371 conn->disconn_cfm_cb = NULL;
3373 hci_conn_drop(conn);
3375 /* The device is paired so there is no need to remove
3376 * its connection parameters anymore.
3378 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3385 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3387 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3388 struct mgmt_pending_cmd *cmd;
3390 cmd = find_pairing(conn);
3392 cmd->cmd_complete(cmd, status);
3393 mgmt_pending_remove(cmd);
3397 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3399 struct mgmt_pending_cmd *cmd;
3401 BT_DBG("status %u", status);
3403 cmd = find_pairing(conn);
3405 BT_DBG("Unable to find a pending command");
3409 cmd->cmd_complete(cmd, mgmt_status(status));
3410 mgmt_pending_remove(cmd);
3413 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3415 struct mgmt_pending_cmd *cmd;
3417 BT_DBG("status %u", status);
3422 cmd = find_pairing(conn);
3424 BT_DBG("Unable to find a pending command");
3428 cmd->cmd_complete(cmd, mgmt_status(status));
3429 mgmt_pending_remove(cmd);
3432 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3435 struct mgmt_cp_pair_device *cp = data;
3436 struct mgmt_rp_pair_device rp;
3437 struct mgmt_pending_cmd *cmd;
3438 u8 sec_level, auth_type;
3439 struct hci_conn *conn;
3442 bt_dev_dbg(hdev, "sock %p", sk);
3444 memset(&rp, 0, sizeof(rp));
3445 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3446 rp.addr.type = cp->addr.type;
3448 if (!bdaddr_type_is_valid(cp->addr.type))
3449 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3450 MGMT_STATUS_INVALID_PARAMS,
3453 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3454 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3455 MGMT_STATUS_INVALID_PARAMS,
3460 if (!hdev_is_powered(hdev)) {
3461 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3462 MGMT_STATUS_NOT_POWERED, &rp,
3467 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3468 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3469 MGMT_STATUS_ALREADY_PAIRED, &rp,
3474 sec_level = BT_SECURITY_MEDIUM;
3475 auth_type = HCI_AT_DEDICATED_BONDING;
3477 if (cp->addr.type == BDADDR_BREDR) {
3478 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3479 auth_type, CONN_REASON_PAIR_DEVICE);
3481 u8 addr_type = le_addr_type(cp->addr.type);
3482 struct hci_conn_params *p;
3484 /* When pairing a new device, it is expected to remember
3485 * this device for future connections. Adding the connection
3486 * parameter information ahead of time allows tracking
3487 * of the peripheral preferred values and will speed up any
3488 * further connection establishment.
3490 * If connection parameters already exist, then they
3491 * will be kept and this function does nothing.
3493 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3495 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3496 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3498 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3499 sec_level, HCI_LE_CONN_TIMEOUT,
3500 CONN_REASON_PAIR_DEVICE);
3506 if (PTR_ERR(conn) == -EBUSY)
3507 status = MGMT_STATUS_BUSY;
3508 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3509 status = MGMT_STATUS_NOT_SUPPORTED;
3510 else if (PTR_ERR(conn) == -ECONNREFUSED)
3511 status = MGMT_STATUS_REJECTED;
3513 status = MGMT_STATUS_CONNECT_FAILED;
3515 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3516 status, &rp, sizeof(rp));
3520 if (conn->connect_cfm_cb) {
3521 hci_conn_drop(conn);
3522 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3523 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3527 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3530 hci_conn_drop(conn);
3534 cmd->cmd_complete = pairing_complete;
3536 /* For LE, just connecting isn't a proof that the pairing finished */
3537 if (cp->addr.type == BDADDR_BREDR) {
3538 conn->connect_cfm_cb = pairing_complete_cb;
3539 conn->security_cfm_cb = pairing_complete_cb;
3540 conn->disconn_cfm_cb = pairing_complete_cb;
3542 conn->connect_cfm_cb = le_pairing_complete_cb;
3543 conn->security_cfm_cb = le_pairing_complete_cb;
3544 conn->disconn_cfm_cb = le_pairing_complete_cb;
3547 conn->io_capability = cp->io_cap;
3548 cmd->user_data = hci_conn_get(conn);
3550 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3551 hci_conn_security(conn, sec_level, auth_type, true)) {
3552 cmd->cmd_complete(cmd, 0);
3553 mgmt_pending_remove(cmd);
3559 hci_dev_unlock(hdev);
3563 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3565 struct hci_conn *conn;
3566 u16 handle = PTR_ERR(data);
3568 conn = hci_conn_hash_lookup_handle(hdev, handle);
3572 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3575 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3578 struct mgmt_addr_info *addr = data;
3579 struct mgmt_pending_cmd *cmd;
3580 struct hci_conn *conn;
3583 bt_dev_dbg(hdev, "sock %p", sk);
3587 if (!hdev_is_powered(hdev)) {
3588 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3589 MGMT_STATUS_NOT_POWERED);
3593 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3596 MGMT_STATUS_INVALID_PARAMS);
3600 conn = cmd->user_data;
3602 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3603 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3604 MGMT_STATUS_INVALID_PARAMS);
3608 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3609 mgmt_pending_remove(cmd);
3611 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3612 addr, sizeof(*addr));
3614 /* Since user doesn't want to proceed with the connection, abort any
3615 * ongoing pairing and then terminate the link if it was created
3616 * because of the pair device action.
3618 if (addr->type == BDADDR_BREDR)
3619 hci_remove_link_key(hdev, &addr->bdaddr);
3621 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3622 le_addr_type(addr->type));
3624 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3625 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3629 hci_dev_unlock(hdev);
3633 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3634 struct mgmt_addr_info *addr, u16 mgmt_op,
3635 u16 hci_op, __le32 passkey)
3637 struct mgmt_pending_cmd *cmd;
3638 struct hci_conn *conn;
3643 if (!hdev_is_powered(hdev)) {
3644 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3645 MGMT_STATUS_NOT_POWERED, addr,
3650 if (addr->type == BDADDR_BREDR)
3651 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3653 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3654 le_addr_type(addr->type));
3657 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3658 MGMT_STATUS_NOT_CONNECTED, addr,
3663 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3664 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3666 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3667 MGMT_STATUS_SUCCESS, addr,
3670 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3671 MGMT_STATUS_FAILED, addr,
3677 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3683 cmd->cmd_complete = addr_cmd_complete;
3685 /* Continue with pairing via HCI */
3686 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3687 struct hci_cp_user_passkey_reply cp;
3689 bacpy(&cp.bdaddr, &addr->bdaddr);
3690 cp.passkey = passkey;
3691 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3693 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3697 mgmt_pending_remove(cmd);
3700 hci_dev_unlock(hdev);
3704 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3705 void *data, u16 len)
3707 struct mgmt_cp_pin_code_neg_reply *cp = data;
3709 bt_dev_dbg(hdev, "sock %p", sk);
3711 return user_pairing_resp(sk, hdev, &cp->addr,
3712 MGMT_OP_PIN_CODE_NEG_REPLY,
3713 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3716 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3719 struct mgmt_cp_user_confirm_reply *cp = data;
3721 bt_dev_dbg(hdev, "sock %p", sk);
3723 if (len != sizeof(*cp))
3724 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3725 MGMT_STATUS_INVALID_PARAMS);
3727 return user_pairing_resp(sk, hdev, &cp->addr,
3728 MGMT_OP_USER_CONFIRM_REPLY,
3729 HCI_OP_USER_CONFIRM_REPLY, 0);
3732 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3733 void *data, u16 len)
3735 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3737 bt_dev_dbg(hdev, "sock %p", sk);
3739 return user_pairing_resp(sk, hdev, &cp->addr,
3740 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3741 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3744 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3747 struct mgmt_cp_user_passkey_reply *cp = data;
3749 bt_dev_dbg(hdev, "sock %p", sk);
3751 return user_pairing_resp(sk, hdev, &cp->addr,
3752 MGMT_OP_USER_PASSKEY_REPLY,
3753 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3756 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3757 void *data, u16 len)
3759 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3761 bt_dev_dbg(hdev, "sock %p", sk);
3763 return user_pairing_resp(sk, hdev, &cp->addr,
3764 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3765 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3768 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3770 struct adv_info *adv_instance;
3772 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3776 /* stop if current instance doesn't need to be changed */
3777 if (!(adv_instance->flags & flags))
3780 cancel_adv_timeout(hdev);
3782 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3786 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3791 static int name_changed_sync(struct hci_dev *hdev, void *data)
3793 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3796 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3798 struct mgmt_pending_cmd *cmd = data;
3799 struct mgmt_cp_set_local_name *cp = cmd->param;
3800 u8 status = mgmt_status(err);
3802 bt_dev_dbg(hdev, "err %d", err);
3804 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3808 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3811 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3814 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3815 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3818 mgmt_pending_remove(cmd);
3821 static int set_name_sync(struct hci_dev *hdev, void *data)
3823 if (lmp_bredr_capable(hdev)) {
3824 hci_update_name_sync(hdev);
3825 hci_update_eir_sync(hdev);
3828 /* The name is stored in the scan response data and so
3829 * no need to update the advertising data here.
3831 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3832 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3837 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3840 struct mgmt_cp_set_local_name *cp = data;
3841 struct mgmt_pending_cmd *cmd;
3844 bt_dev_dbg(hdev, "sock %p", sk);
3848 /* If the old values are the same as the new ones just return a
3849 * direct command complete event.
3851 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3852 !memcmp(hdev->short_name, cp->short_name,
3853 sizeof(hdev->short_name))) {
3854 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3859 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3861 if (!hdev_is_powered(hdev)) {
3862 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3864 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3869 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3870 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3871 ext_info_changed(hdev, sk);
3876 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3880 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3884 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3885 MGMT_STATUS_FAILED);
3888 mgmt_pending_remove(cmd);
3893 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3896 hci_dev_unlock(hdev);
3900 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3902 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3905 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3908 struct mgmt_cp_set_appearance *cp = data;
3912 bt_dev_dbg(hdev, "sock %p", sk);
3914 if (!lmp_le_capable(hdev))
3915 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3916 MGMT_STATUS_NOT_SUPPORTED);
3918 appearance = le16_to_cpu(cp->appearance);
3922 if (hdev->appearance != appearance) {
3923 hdev->appearance = appearance;
3925 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3926 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3929 ext_info_changed(hdev, sk);
3932 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3935 hci_dev_unlock(hdev);
3940 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3941 void *data, u16 len)
3943 struct mgmt_rp_get_phy_configuration rp;
3945 bt_dev_dbg(hdev, "sock %p", sk);
3949 memset(&rp, 0, sizeof(rp));
3951 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3952 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3953 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3955 hci_dev_unlock(hdev);
3957 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3961 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3963 struct mgmt_ev_phy_configuration_changed ev;
3965 memset(&ev, 0, sizeof(ev));
3967 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3969 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3973 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3975 struct mgmt_pending_cmd *cmd = data;
3976 struct sk_buff *skb = cmd->skb;
3977 u8 status = mgmt_status(err);
3979 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3984 status = MGMT_STATUS_FAILED;
3985 else if (IS_ERR(skb))
3986 status = mgmt_status(PTR_ERR(skb));
3988 status = mgmt_status(skb->data[0]);
3991 bt_dev_dbg(hdev, "status %d", status);
3994 mgmt_cmd_status(cmd->sk, hdev->id,
3995 MGMT_OP_SET_PHY_CONFIGURATION, status);
3997 mgmt_cmd_complete(cmd->sk, hdev->id,
3998 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4001 mgmt_phy_configuration_changed(hdev, cmd->sk);
4004 if (skb && !IS_ERR(skb))
4007 mgmt_pending_remove(cmd);
4010 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4012 struct mgmt_pending_cmd *cmd = data;
4013 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4014 struct hci_cp_le_set_default_phy cp_phy;
4015 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4017 memset(&cp_phy, 0, sizeof(cp_phy));
4019 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4020 cp_phy.all_phys |= 0x01;
4022 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4023 cp_phy.all_phys |= 0x02;
4025 if (selected_phys & MGMT_PHY_LE_1M_TX)
4026 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4028 if (selected_phys & MGMT_PHY_LE_2M_TX)
4029 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4031 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4032 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4034 if (selected_phys & MGMT_PHY_LE_1M_RX)
4035 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4037 if (selected_phys & MGMT_PHY_LE_2M_RX)
4038 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4040 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4041 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4043 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4044 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4049 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4050 void *data, u16 len)
4052 struct mgmt_cp_set_phy_configuration *cp = data;
4053 struct mgmt_pending_cmd *cmd;
4054 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4055 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4056 bool changed = false;
4059 bt_dev_dbg(hdev, "sock %p", sk);
4061 configurable_phys = get_configurable_phys(hdev);
4062 supported_phys = get_supported_phys(hdev);
4063 selected_phys = __le32_to_cpu(cp->selected_phys);
4065 if (selected_phys & ~supported_phys)
4066 return mgmt_cmd_status(sk, hdev->id,
4067 MGMT_OP_SET_PHY_CONFIGURATION,
4068 MGMT_STATUS_INVALID_PARAMS);
4070 unconfigure_phys = supported_phys & ~configurable_phys;
4072 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4073 return mgmt_cmd_status(sk, hdev->id,
4074 MGMT_OP_SET_PHY_CONFIGURATION,
4075 MGMT_STATUS_INVALID_PARAMS);
4077 if (selected_phys == get_selected_phys(hdev))
4078 return mgmt_cmd_complete(sk, hdev->id,
4079 MGMT_OP_SET_PHY_CONFIGURATION,
4084 if (!hdev_is_powered(hdev)) {
4085 err = mgmt_cmd_status(sk, hdev->id,
4086 MGMT_OP_SET_PHY_CONFIGURATION,
4087 MGMT_STATUS_REJECTED);
4091 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4092 err = mgmt_cmd_status(sk, hdev->id,
4093 MGMT_OP_SET_PHY_CONFIGURATION,
4098 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4099 pkt_type |= (HCI_DH3 | HCI_DM3);
4101 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4103 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4104 pkt_type |= (HCI_DH5 | HCI_DM5);
4106 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4108 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4109 pkt_type &= ~HCI_2DH1;
4111 pkt_type |= HCI_2DH1;
4113 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4114 pkt_type &= ~HCI_2DH3;
4116 pkt_type |= HCI_2DH3;
4118 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4119 pkt_type &= ~HCI_2DH5;
4121 pkt_type |= HCI_2DH5;
4123 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4124 pkt_type &= ~HCI_3DH1;
4126 pkt_type |= HCI_3DH1;
4128 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4129 pkt_type &= ~HCI_3DH3;
4131 pkt_type |= HCI_3DH3;
4133 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4134 pkt_type &= ~HCI_3DH5;
4136 pkt_type |= HCI_3DH5;
4138 if (pkt_type != hdev->pkt_type) {
4139 hdev->pkt_type = pkt_type;
4143 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4144 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4146 mgmt_phy_configuration_changed(hdev, sk);
4148 err = mgmt_cmd_complete(sk, hdev->id,
4149 MGMT_OP_SET_PHY_CONFIGURATION,
4155 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4160 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4161 set_default_phy_complete);
4164 err = mgmt_cmd_status(sk, hdev->id,
4165 MGMT_OP_SET_PHY_CONFIGURATION,
4166 MGMT_STATUS_FAILED);
4169 mgmt_pending_remove(cmd);
4173 hci_dev_unlock(hdev);
4178 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4181 int err = MGMT_STATUS_SUCCESS;
4182 struct mgmt_cp_set_blocked_keys *keys = data;
4183 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4184 sizeof(struct mgmt_blocked_key_info));
4185 u16 key_count, expected_len;
4188 bt_dev_dbg(hdev, "sock %p", sk);
4190 key_count = __le16_to_cpu(keys->key_count);
4191 if (key_count > max_key_count) {
4192 bt_dev_err(hdev, "too big key_count value %u", key_count);
4193 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4194 MGMT_STATUS_INVALID_PARAMS);
4197 expected_len = struct_size(keys, keys, key_count);
4198 if (expected_len != len) {
4199 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4201 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4202 MGMT_STATUS_INVALID_PARAMS);
4207 hci_blocked_keys_clear(hdev);
4209 for (i = 0; i < key_count; ++i) {
4210 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4213 err = MGMT_STATUS_NO_RESOURCES;
4217 b->type = keys->keys[i].type;
4218 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4219 list_add_rcu(&b->list, &hdev->blocked_keys);
4221 hci_dev_unlock(hdev);
4223 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4227 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4228 void *data, u16 len)
4230 struct mgmt_mode *cp = data;
4232 bool changed = false;
4234 bt_dev_dbg(hdev, "sock %p", sk);
4236 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4237 return mgmt_cmd_status(sk, hdev->id,
4238 MGMT_OP_SET_WIDEBAND_SPEECH,
4239 MGMT_STATUS_NOT_SUPPORTED);
4241 if (cp->val != 0x00 && cp->val != 0x01)
4242 return mgmt_cmd_status(sk, hdev->id,
4243 MGMT_OP_SET_WIDEBAND_SPEECH,
4244 MGMT_STATUS_INVALID_PARAMS);
4248 if (hdev_is_powered(hdev) &&
4249 !!cp->val != hci_dev_test_flag(hdev,
4250 HCI_WIDEBAND_SPEECH_ENABLED)) {
4251 err = mgmt_cmd_status(sk, hdev->id,
4252 MGMT_OP_SET_WIDEBAND_SPEECH,
4253 MGMT_STATUS_REJECTED);
4258 changed = !hci_dev_test_and_set_flag(hdev,
4259 HCI_WIDEBAND_SPEECH_ENABLED);
4261 changed = hci_dev_test_and_clear_flag(hdev,
4262 HCI_WIDEBAND_SPEECH_ENABLED);
4264 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4269 err = new_settings(hdev, sk);
4272 hci_dev_unlock(hdev);
4276 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4277 void *data, u16 data_len)
4280 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4283 u8 tx_power_range[2];
4285 bt_dev_dbg(hdev, "sock %p", sk);
4287 memset(&buf, 0, sizeof(buf));
4291 /* When the Read Simple Pairing Options command is supported, then
4292 * the remote public key validation is supported.
4294 * Alternatively, when Microsoft extensions are available, they can
4295 * indicate support for public key validation as well.
4297 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4298 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4300 flags |= 0x02; /* Remote public key validation (LE) */
4302 /* When the Read Encryption Key Size command is supported, then the
4303 * encryption key size is enforced.
4305 if (hdev->commands[20] & 0x10)
4306 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4308 flags |= 0x08; /* Encryption key size enforcement (LE) */
4310 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4313 /* When the Read Simple Pairing Options command is supported, then
4314 * also max encryption key size information is provided.
4316 if (hdev->commands[41] & 0x08)
4317 cap_len = eir_append_le16(rp->cap, cap_len,
4318 MGMT_CAP_MAX_ENC_KEY_SIZE,
4319 hdev->max_enc_key_size);
4321 cap_len = eir_append_le16(rp->cap, cap_len,
4322 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4323 SMP_MAX_ENC_KEY_SIZE);
4325 /* Append the min/max LE tx power parameters if we were able to fetch
4326 * it from the controller
4328 if (hdev->commands[38] & 0x80) {
4329 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4330 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4331 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4335 rp->cap_len = cpu_to_le16(cap_len);
4337 hci_dev_unlock(hdev);
4339 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4340 rp, sizeof(*rp) + cap_len);
4343 #ifdef CONFIG_BT_FEATURE_DEBUG
4344 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4345 static const u8 debug_uuid[16] = {
4346 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4347 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4351 /* 330859bc-7506-492d-9370-9a6f0614037f */
4352 static const u8 quality_report_uuid[16] = {
4353 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4354 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4357 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4358 static const u8 offload_codecs_uuid[16] = {
4359 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4360 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4363 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4364 static const u8 le_simultaneous_roles_uuid[16] = {
4365 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4366 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4369 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4370 static const u8 rpa_resolution_uuid[16] = {
4371 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4372 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4375 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4376 static const u8 iso_socket_uuid[16] = {
4377 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4378 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4381 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4382 static const u8 mgmt_mesh_uuid[16] = {
4383 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4384 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4387 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4388 void *data, u16 data_len)
4390 struct mgmt_rp_read_exp_features_info *rp;
4396 bt_dev_dbg(hdev, "sock %p", sk);
4398 /* Enough space for 7 features */
4399 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4400 rp = kzalloc(len, GFP_KERNEL);
4404 #ifdef CONFIG_BT_FEATURE_DEBUG
4406 flags = bt_dbg_get() ? BIT(0) : 0;
4408 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4409 rp->features[idx].flags = cpu_to_le32(flags);
4414 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4415 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4420 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4421 rp->features[idx].flags = cpu_to_le32(flags);
4425 if (hdev && ll_privacy_capable(hdev)) {
4426 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4427 flags = BIT(0) | BIT(1);
4431 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4432 rp->features[idx].flags = cpu_to_le32(flags);
4436 if (hdev && (aosp_has_quality_report(hdev) ||
4437 hdev->set_quality_report)) {
4438 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4443 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4444 rp->features[idx].flags = cpu_to_le32(flags);
4448 if (hdev && hdev->get_data_path_id) {
4449 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4454 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4455 rp->features[idx].flags = cpu_to_le32(flags);
4459 if (IS_ENABLED(CONFIG_BT_LE)) {
4460 flags = iso_enabled() ? BIT(0) : 0;
4461 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4462 rp->features[idx].flags = cpu_to_le32(flags);
4466 if (hdev && lmp_le_capable(hdev)) {
4467 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4472 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4473 rp->features[idx].flags = cpu_to_le32(flags);
4477 rp->feature_count = cpu_to_le16(idx);
4479 /* After reading the experimental features information, enable
4480 * the events to update client on any future change.
4482 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4484 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4485 MGMT_OP_READ_EXP_FEATURES_INFO,
4486 0, rp, sizeof(*rp) + (20 * idx));
4492 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4495 struct mgmt_ev_exp_feature_changed ev;
4497 memset(&ev, 0, sizeof(ev));
4498 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4499 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4501 // Do we need to be atomic with the conn_flags?
4502 if (enabled && privacy_mode_capable(hdev))
4503 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4505 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4507 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4509 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4513 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4514 bool enabled, struct sock *skip)
4516 struct mgmt_ev_exp_feature_changed ev;
4518 memset(&ev, 0, sizeof(ev));
4519 memcpy(ev.uuid, uuid, 16);
4520 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4522 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4524 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4527 #define EXP_FEAT(_uuid, _set_func) \
4530 .set_func = _set_func, \
4533 /* The zero key uuid is special. Multiple exp features are set through it. */
4534 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4535 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4537 struct mgmt_rp_set_exp_feature rp;
4539 memset(rp.uuid, 0, 16);
4540 rp.flags = cpu_to_le32(0);
4542 #ifdef CONFIG_BT_FEATURE_DEBUG
4544 bool changed = bt_dbg_get();
4549 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4553 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4556 changed = hci_dev_test_and_clear_flag(hdev,
4557 HCI_ENABLE_LL_PRIVACY);
4559 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4563 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4565 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4566 MGMT_OP_SET_EXP_FEATURE, 0,
4570 #ifdef CONFIG_BT_FEATURE_DEBUG
4571 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4572 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4574 struct mgmt_rp_set_exp_feature rp;
4579 /* Command requires to use the non-controller index */
4581 return mgmt_cmd_status(sk, hdev->id,
4582 MGMT_OP_SET_EXP_FEATURE,
4583 MGMT_STATUS_INVALID_INDEX);
4585 /* Parameters are limited to a single octet */
4586 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4587 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4588 MGMT_OP_SET_EXP_FEATURE,
4589 MGMT_STATUS_INVALID_PARAMS);
4591 /* Only boolean on/off is supported */
4592 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4593 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4594 MGMT_OP_SET_EXP_FEATURE,
4595 MGMT_STATUS_INVALID_PARAMS);
4597 val = !!cp->param[0];
4598 changed = val ? !bt_dbg_get() : bt_dbg_get();
4601 memcpy(rp.uuid, debug_uuid, 16);
4602 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4604 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4606 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4607 MGMT_OP_SET_EXP_FEATURE, 0,
4611 exp_feature_changed(hdev, debug_uuid, val, sk);
4617 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4618 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4620 struct mgmt_rp_set_exp_feature rp;
4624 /* Command requires to use the controller index */
4626 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4627 MGMT_OP_SET_EXP_FEATURE,
4628 MGMT_STATUS_INVALID_INDEX);
4630 /* Parameters are limited to a single octet */
4631 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4632 return mgmt_cmd_status(sk, hdev->id,
4633 MGMT_OP_SET_EXP_FEATURE,
4634 MGMT_STATUS_INVALID_PARAMS);
4636 /* Only boolean on/off is supported */
4637 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4638 return mgmt_cmd_status(sk, hdev->id,
4639 MGMT_OP_SET_EXP_FEATURE,
4640 MGMT_STATUS_INVALID_PARAMS);
4642 val = !!cp->param[0];
4645 changed = !hci_dev_test_and_set_flag(hdev,
4646 HCI_MESH_EXPERIMENTAL);
4648 hci_dev_clear_flag(hdev, HCI_MESH);
4649 changed = hci_dev_test_and_clear_flag(hdev,
4650 HCI_MESH_EXPERIMENTAL);
4653 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4654 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4656 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4658 err = mgmt_cmd_complete(sk, hdev->id,
4659 MGMT_OP_SET_EXP_FEATURE, 0,
4663 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4668 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4669 struct mgmt_cp_set_exp_feature *cp,
4672 struct mgmt_rp_set_exp_feature rp;
4677 /* Command requires to use the controller index */
4679 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4680 MGMT_OP_SET_EXP_FEATURE,
4681 MGMT_STATUS_INVALID_INDEX);
4683 /* Changes can only be made when controller is powered down */
4684 if (hdev_is_powered(hdev))
4685 return mgmt_cmd_status(sk, hdev->id,
4686 MGMT_OP_SET_EXP_FEATURE,
4687 MGMT_STATUS_REJECTED);
4689 /* Parameters are limited to a single octet */
4690 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4691 return mgmt_cmd_status(sk, hdev->id,
4692 MGMT_OP_SET_EXP_FEATURE,
4693 MGMT_STATUS_INVALID_PARAMS);
4695 /* Only boolean on/off is supported */
4696 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4697 return mgmt_cmd_status(sk, hdev->id,
4698 MGMT_OP_SET_EXP_FEATURE,
4699 MGMT_STATUS_INVALID_PARAMS);
4701 val = !!cp->param[0];
4704 changed = !hci_dev_test_and_set_flag(hdev,
4705 HCI_ENABLE_LL_PRIVACY);
4706 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4708 /* Enable LL privacy + supported settings changed */
4709 flags = BIT(0) | BIT(1);
4711 changed = hci_dev_test_and_clear_flag(hdev,
4712 HCI_ENABLE_LL_PRIVACY);
4714 /* Disable LL privacy + supported settings changed */
4718 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4719 rp.flags = cpu_to_le32(flags);
4721 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4723 err = mgmt_cmd_complete(sk, hdev->id,
4724 MGMT_OP_SET_EXP_FEATURE, 0,
4728 exp_ll_privacy_feature_changed(val, hdev, sk);
4733 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4734 struct mgmt_cp_set_exp_feature *cp,
4737 struct mgmt_rp_set_exp_feature rp;
4741 /* Command requires to use a valid controller index */
4743 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4744 MGMT_OP_SET_EXP_FEATURE,
4745 MGMT_STATUS_INVALID_INDEX);
4747 /* Parameters are limited to a single octet */
4748 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4749 return mgmt_cmd_status(sk, hdev->id,
4750 MGMT_OP_SET_EXP_FEATURE,
4751 MGMT_STATUS_INVALID_PARAMS);
4753 /* Only boolean on/off is supported */
4754 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4755 return mgmt_cmd_status(sk, hdev->id,
4756 MGMT_OP_SET_EXP_FEATURE,
4757 MGMT_STATUS_INVALID_PARAMS);
4759 hci_req_sync_lock(hdev);
4761 val = !!cp->param[0];
4762 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4764 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4765 err = mgmt_cmd_status(sk, hdev->id,
4766 MGMT_OP_SET_EXP_FEATURE,
4767 MGMT_STATUS_NOT_SUPPORTED);
4768 goto unlock_quality_report;
4772 if (hdev->set_quality_report)
4773 err = hdev->set_quality_report(hdev, val);
4775 err = aosp_set_quality_report(hdev, val);
4778 err = mgmt_cmd_status(sk, hdev->id,
4779 MGMT_OP_SET_EXP_FEATURE,
4780 MGMT_STATUS_FAILED);
4781 goto unlock_quality_report;
4785 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4787 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4790 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4792 memcpy(rp.uuid, quality_report_uuid, 16);
4793 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4794 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4796 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4800 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4802 unlock_quality_report:
4803 hci_req_sync_unlock(hdev);
4807 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4808 struct mgmt_cp_set_exp_feature *cp,
4813 struct mgmt_rp_set_exp_feature rp;
4815 /* Command requires to use a valid controller index */
4817 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4818 MGMT_OP_SET_EXP_FEATURE,
4819 MGMT_STATUS_INVALID_INDEX);
4821 /* Parameters are limited to a single octet */
4822 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4823 return mgmt_cmd_status(sk, hdev->id,
4824 MGMT_OP_SET_EXP_FEATURE,
4825 MGMT_STATUS_INVALID_PARAMS);
4827 /* Only boolean on/off is supported */
4828 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4829 return mgmt_cmd_status(sk, hdev->id,
4830 MGMT_OP_SET_EXP_FEATURE,
4831 MGMT_STATUS_INVALID_PARAMS);
4833 val = !!cp->param[0];
4834 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4836 if (!hdev->get_data_path_id) {
4837 return mgmt_cmd_status(sk, hdev->id,
4838 MGMT_OP_SET_EXP_FEATURE,
4839 MGMT_STATUS_NOT_SUPPORTED);
4844 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4846 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4849 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4852 memcpy(rp.uuid, offload_codecs_uuid, 16);
4853 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4854 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4855 err = mgmt_cmd_complete(sk, hdev->id,
4856 MGMT_OP_SET_EXP_FEATURE, 0,
4860 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4865 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4866 struct mgmt_cp_set_exp_feature *cp,
4871 struct mgmt_rp_set_exp_feature rp;
4873 /* Command requires to use a valid controller index */
4875 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4876 MGMT_OP_SET_EXP_FEATURE,
4877 MGMT_STATUS_INVALID_INDEX);
4879 /* Parameters are limited to a single octet */
4880 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4881 return mgmt_cmd_status(sk, hdev->id,
4882 MGMT_OP_SET_EXP_FEATURE,
4883 MGMT_STATUS_INVALID_PARAMS);
4885 /* Only boolean on/off is supported */
4886 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4887 return mgmt_cmd_status(sk, hdev->id,
4888 MGMT_OP_SET_EXP_FEATURE,
4889 MGMT_STATUS_INVALID_PARAMS);
4891 val = !!cp->param[0];
4892 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4894 if (!hci_dev_le_state_simultaneous(hdev)) {
4895 return mgmt_cmd_status(sk, hdev->id,
4896 MGMT_OP_SET_EXP_FEATURE,
4897 MGMT_STATUS_NOT_SUPPORTED);
4902 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4904 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4907 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4910 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4911 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4912 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4913 err = mgmt_cmd_complete(sk, hdev->id,
4914 MGMT_OP_SET_EXP_FEATURE, 0,
4918 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4924 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4925 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4927 struct mgmt_rp_set_exp_feature rp;
4928 bool val, changed = false;
4931 /* Command requires to use the non-controller index */
4933 return mgmt_cmd_status(sk, hdev->id,
4934 MGMT_OP_SET_EXP_FEATURE,
4935 MGMT_STATUS_INVALID_INDEX);
4937 /* Parameters are limited to a single octet */
4938 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4939 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4940 MGMT_OP_SET_EXP_FEATURE,
4941 MGMT_STATUS_INVALID_PARAMS);
4943 /* Only boolean on/off is supported */
4944 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4945 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4946 MGMT_OP_SET_EXP_FEATURE,
4947 MGMT_STATUS_INVALID_PARAMS);
4949 val = cp->param[0] ? true : false;
4958 memcpy(rp.uuid, iso_socket_uuid, 16);
4959 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4961 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4963 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4964 MGMT_OP_SET_EXP_FEATURE, 0,
4968 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4974 static const struct mgmt_exp_feature {
4976 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4977 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4978 } exp_features[] = {
4979 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4980 #ifdef CONFIG_BT_FEATURE_DEBUG
4981 EXP_FEAT(debug_uuid, set_debug_func),
4983 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4984 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4985 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4986 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4987 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4989 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4992 /* end with a null feature */
4993 EXP_FEAT(NULL, NULL)
4996 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4997 void *data, u16 data_len)
4999 struct mgmt_cp_set_exp_feature *cp = data;
5002 bt_dev_dbg(hdev, "sock %p", sk);
5004 for (i = 0; exp_features[i].uuid; i++) {
5005 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5006 return exp_features[i].set_func(sk, hdev, cp, data_len);
5009 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5010 MGMT_OP_SET_EXP_FEATURE,
5011 MGMT_STATUS_NOT_SUPPORTED);
5014 static u32 get_params_flags(struct hci_dev *hdev,
5015 struct hci_conn_params *params)
5017 u32 flags = hdev->conn_flags;
5019 /* Devices using RPAs can only be programmed in the acceptlist if
5020 * LL Privacy has been enable otherwise they cannot mark
5021 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5023 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5024 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5025 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5030 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5033 struct mgmt_cp_get_device_flags *cp = data;
5034 struct mgmt_rp_get_device_flags rp;
5035 struct bdaddr_list_with_flags *br_params;
5036 struct hci_conn_params *params;
5037 u32 supported_flags;
5038 u32 current_flags = 0;
5039 u8 status = MGMT_STATUS_INVALID_PARAMS;
5041 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5042 &cp->addr.bdaddr, cp->addr.type);
5046 supported_flags = hdev->conn_flags;
5048 memset(&rp, 0, sizeof(rp));
5050 if (cp->addr.type == BDADDR_BREDR) {
5051 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5057 current_flags = br_params->flags;
5059 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5060 le_addr_type(cp->addr.type));
5064 supported_flags = get_params_flags(hdev, params);
5065 current_flags = params->flags;
5068 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5069 rp.addr.type = cp->addr.type;
5070 rp.supported_flags = cpu_to_le32(supported_flags);
5071 rp.current_flags = cpu_to_le32(current_flags);
5073 status = MGMT_STATUS_SUCCESS;
5076 hci_dev_unlock(hdev);
5078 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5082 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5083 bdaddr_t *bdaddr, u8 bdaddr_type,
5084 u32 supported_flags, u32 current_flags)
5086 struct mgmt_ev_device_flags_changed ev;
5088 bacpy(&ev.addr.bdaddr, bdaddr);
5089 ev.addr.type = bdaddr_type;
5090 ev.supported_flags = cpu_to_le32(supported_flags);
5091 ev.current_flags = cpu_to_le32(current_flags);
5093 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5096 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5099 struct mgmt_cp_set_device_flags *cp = data;
5100 struct bdaddr_list_with_flags *br_params;
5101 struct hci_conn_params *params;
5102 u8 status = MGMT_STATUS_INVALID_PARAMS;
5103 u32 supported_flags;
5104 u32 current_flags = __le32_to_cpu(cp->current_flags);
5106 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5107 &cp->addr.bdaddr, cp->addr.type, current_flags);
5109 // We should take hci_dev_lock() early, I think.. conn_flags can change
5110 supported_flags = hdev->conn_flags;
5112 if ((supported_flags | current_flags) != supported_flags) {
5113 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5114 current_flags, supported_flags);
5120 if (cp->addr.type == BDADDR_BREDR) {
5121 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5126 br_params->flags = current_flags;
5127 status = MGMT_STATUS_SUCCESS;
5129 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5130 &cp->addr.bdaddr, cp->addr.type);
5136 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5137 le_addr_type(cp->addr.type));
5139 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5140 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5144 supported_flags = get_params_flags(hdev, params);
5146 if ((supported_flags | current_flags) != supported_flags) {
5147 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5148 current_flags, supported_flags);
5152 params->flags = current_flags;
5153 status = MGMT_STATUS_SUCCESS;
5155 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5158 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5159 hci_update_passive_scan(hdev);
5162 hci_dev_unlock(hdev);
5165 if (status == MGMT_STATUS_SUCCESS)
5166 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5167 supported_flags, current_flags);
5169 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5170 &cp->addr, sizeof(cp->addr));
5173 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5176 struct mgmt_ev_adv_monitor_added ev;
5178 ev.monitor_handle = cpu_to_le16(handle);
5180 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5183 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5185 struct mgmt_ev_adv_monitor_removed ev;
5186 struct mgmt_pending_cmd *cmd;
5187 struct sock *sk_skip = NULL;
5188 struct mgmt_cp_remove_adv_monitor *cp;
5190 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5194 if (cp->monitor_handle)
5198 ev.monitor_handle = cpu_to_le16(handle);
5200 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5203 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5204 void *data, u16 len)
5206 struct adv_monitor *monitor = NULL;
5207 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5210 __u32 supported = 0;
5212 __u16 num_handles = 0;
5213 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5215 BT_DBG("request for %s", hdev->name);
5219 if (msft_monitor_supported(hdev))
5220 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5222 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5223 handles[num_handles++] = monitor->handle;
5225 hci_dev_unlock(hdev);
5227 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5228 rp = kmalloc(rp_size, GFP_KERNEL);
5232 /* All supported features are currently enabled */
5233 enabled = supported;
5235 rp->supported_features = cpu_to_le32(supported);
5236 rp->enabled_features = cpu_to_le32(enabled);
5237 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5238 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5239 rp->num_handles = cpu_to_le16(num_handles);
5241 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5243 err = mgmt_cmd_complete(sk, hdev->id,
5244 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5245 MGMT_STATUS_SUCCESS, rp, rp_size);
5252 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5253 void *data, int status)
5255 struct mgmt_rp_add_adv_patterns_monitor rp;
5256 struct mgmt_pending_cmd *cmd = data;
5257 struct adv_monitor *monitor = cmd->user_data;
5261 rp.monitor_handle = cpu_to_le16(monitor->handle);
5264 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5265 hdev->adv_monitors_cnt++;
5266 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5267 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5268 hci_update_passive_scan(hdev);
5271 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5272 mgmt_status(status), &rp, sizeof(rp));
5273 mgmt_pending_remove(cmd);
5275 hci_dev_unlock(hdev);
5276 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5277 rp.monitor_handle, status);
5280 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5282 struct mgmt_pending_cmd *cmd = data;
5283 struct adv_monitor *monitor = cmd->user_data;
5285 return hci_add_adv_monitor(hdev, monitor);
5288 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5289 struct adv_monitor *m, u8 status,
5290 void *data, u16 len, u16 op)
5292 struct mgmt_pending_cmd *cmd;
5300 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5301 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5302 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5303 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5304 status = MGMT_STATUS_BUSY;
5308 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5310 status = MGMT_STATUS_NO_RESOURCES;
5315 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5316 mgmt_add_adv_patterns_monitor_complete);
5319 status = MGMT_STATUS_NO_RESOURCES;
5321 status = MGMT_STATUS_FAILED;
5326 hci_dev_unlock(hdev);
5331 hci_free_adv_monitor(hdev, m);
5332 hci_dev_unlock(hdev);
5333 return mgmt_cmd_status(sk, hdev->id, op, status);
5336 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5337 struct mgmt_adv_rssi_thresholds *rssi)
5340 m->rssi.low_threshold = rssi->low_threshold;
5341 m->rssi.low_threshold_timeout =
5342 __le16_to_cpu(rssi->low_threshold_timeout);
5343 m->rssi.high_threshold = rssi->high_threshold;
5344 m->rssi.high_threshold_timeout =
5345 __le16_to_cpu(rssi->high_threshold_timeout);
5346 m->rssi.sampling_period = rssi->sampling_period;
5348 /* Default values. These numbers are the least constricting
5349 * parameters for MSFT API to work, so it behaves as if there
5350 * are no rssi parameter to consider. May need to be changed
5351 * if other API are to be supported.
5353 m->rssi.low_threshold = -127;
5354 m->rssi.low_threshold_timeout = 60;
5355 m->rssi.high_threshold = -127;
5356 m->rssi.high_threshold_timeout = 0;
5357 m->rssi.sampling_period = 0;
5361 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5362 struct mgmt_adv_pattern *patterns)
5364 u8 offset = 0, length = 0;
5365 struct adv_pattern *p = NULL;
5368 for (i = 0; i < pattern_count; i++) {
5369 offset = patterns[i].offset;
5370 length = patterns[i].length;
5371 if (offset >= HCI_MAX_AD_LENGTH ||
5372 length > HCI_MAX_AD_LENGTH ||
5373 (offset + length) > HCI_MAX_AD_LENGTH)
5374 return MGMT_STATUS_INVALID_PARAMS;
5376 p = kmalloc(sizeof(*p), GFP_KERNEL);
5378 return MGMT_STATUS_NO_RESOURCES;
5380 p->ad_type = patterns[i].ad_type;
5381 p->offset = patterns[i].offset;
5382 p->length = patterns[i].length;
5383 memcpy(p->value, patterns[i].value, p->length);
5385 INIT_LIST_HEAD(&p->list);
5386 list_add(&p->list, &m->patterns);
5389 return MGMT_STATUS_SUCCESS;
5392 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5393 void *data, u16 len)
5395 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5396 struct adv_monitor *m = NULL;
5397 u8 status = MGMT_STATUS_SUCCESS;
5398 size_t expected_size = sizeof(*cp);
5400 BT_DBG("request for %s", hdev->name);
5402 if (len <= sizeof(*cp)) {
5403 status = MGMT_STATUS_INVALID_PARAMS;
5407 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5408 if (len != expected_size) {
5409 status = MGMT_STATUS_INVALID_PARAMS;
5413 m = kzalloc(sizeof(*m), GFP_KERNEL);
5415 status = MGMT_STATUS_NO_RESOURCES;
5419 INIT_LIST_HEAD(&m->patterns);
5421 parse_adv_monitor_rssi(m, NULL);
5422 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5425 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5426 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5429 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5430 void *data, u16 len)
5432 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5433 struct adv_monitor *m = NULL;
5434 u8 status = MGMT_STATUS_SUCCESS;
5435 size_t expected_size = sizeof(*cp);
5437 BT_DBG("request for %s", hdev->name);
5439 if (len <= sizeof(*cp)) {
5440 status = MGMT_STATUS_INVALID_PARAMS;
5444 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5445 if (len != expected_size) {
5446 status = MGMT_STATUS_INVALID_PARAMS;
5450 m = kzalloc(sizeof(*m), GFP_KERNEL);
5452 status = MGMT_STATUS_NO_RESOURCES;
5456 INIT_LIST_HEAD(&m->patterns);
5458 parse_adv_monitor_rssi(m, &cp->rssi);
5459 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5462 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5463 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5466 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5467 void *data, int status)
5469 struct mgmt_rp_remove_adv_monitor rp;
5470 struct mgmt_pending_cmd *cmd = data;
5471 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5475 rp.monitor_handle = cp->monitor_handle;
5478 hci_update_passive_scan(hdev);
5480 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5481 mgmt_status(status), &rp, sizeof(rp));
5482 mgmt_pending_remove(cmd);
5484 hci_dev_unlock(hdev);
5485 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5486 rp.monitor_handle, status);
5489 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5491 struct mgmt_pending_cmd *cmd = data;
5492 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5493 u16 handle = __le16_to_cpu(cp->monitor_handle);
5496 return hci_remove_all_adv_monitor(hdev);
5498 return hci_remove_single_adv_monitor(hdev, handle);
5501 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5502 void *data, u16 len)
5504 struct mgmt_pending_cmd *cmd;
5509 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5510 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5511 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5512 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5513 status = MGMT_STATUS_BUSY;
5517 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5519 status = MGMT_STATUS_NO_RESOURCES;
5523 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5524 mgmt_remove_adv_monitor_complete);
5527 mgmt_pending_remove(cmd);
5530 status = MGMT_STATUS_NO_RESOURCES;
5532 status = MGMT_STATUS_FAILED;
5537 hci_dev_unlock(hdev);
5542 hci_dev_unlock(hdev);
5543 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5547 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5549 struct mgmt_rp_read_local_oob_data mgmt_rp;
5550 size_t rp_size = sizeof(mgmt_rp);
5551 struct mgmt_pending_cmd *cmd = data;
5552 struct sk_buff *skb = cmd->skb;
5553 u8 status = mgmt_status(err);
5557 status = MGMT_STATUS_FAILED;
5558 else if (IS_ERR(skb))
5559 status = mgmt_status(PTR_ERR(skb));
5561 status = mgmt_status(skb->data[0]);
5564 bt_dev_dbg(hdev, "status %d", status);
5567 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5571 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5573 if (!bredr_sc_enabled(hdev)) {
5574 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5576 if (skb->len < sizeof(*rp)) {
5577 mgmt_cmd_status(cmd->sk, hdev->id,
5578 MGMT_OP_READ_LOCAL_OOB_DATA,
5579 MGMT_STATUS_FAILED);
5583 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5584 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5586 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5588 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5590 if (skb->len < sizeof(*rp)) {
5591 mgmt_cmd_status(cmd->sk, hdev->id,
5592 MGMT_OP_READ_LOCAL_OOB_DATA,
5593 MGMT_STATUS_FAILED);
5597 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5598 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5600 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5601 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5604 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5605 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5608 if (skb && !IS_ERR(skb))
5611 mgmt_pending_free(cmd);
5614 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5616 struct mgmt_pending_cmd *cmd = data;
5618 if (bredr_sc_enabled(hdev))
5619 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5621 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5623 if (IS_ERR(cmd->skb))
5624 return PTR_ERR(cmd->skb);
5629 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5630 void *data, u16 data_len)
5632 struct mgmt_pending_cmd *cmd;
5635 bt_dev_dbg(hdev, "sock %p", sk);
5639 if (!hdev_is_powered(hdev)) {
5640 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5641 MGMT_STATUS_NOT_POWERED);
5645 if (!lmp_ssp_capable(hdev)) {
5646 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5647 MGMT_STATUS_NOT_SUPPORTED);
5651 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5655 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5656 read_local_oob_data_complete);
5659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5660 MGMT_STATUS_FAILED);
5663 mgmt_pending_free(cmd);
5667 hci_dev_unlock(hdev);
5671 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5672 void *data, u16 len)
5674 struct mgmt_addr_info *addr = data;
5677 bt_dev_dbg(hdev, "sock %p", sk);
5679 if (!bdaddr_type_is_valid(addr->type))
5680 return mgmt_cmd_complete(sk, hdev->id,
5681 MGMT_OP_ADD_REMOTE_OOB_DATA,
5682 MGMT_STATUS_INVALID_PARAMS,
5683 addr, sizeof(*addr));
5687 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5688 struct mgmt_cp_add_remote_oob_data *cp = data;
5691 if (cp->addr.type != BDADDR_BREDR) {
5692 err = mgmt_cmd_complete(sk, hdev->id,
5693 MGMT_OP_ADD_REMOTE_OOB_DATA,
5694 MGMT_STATUS_INVALID_PARAMS,
5695 &cp->addr, sizeof(cp->addr));
5699 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5700 cp->addr.type, cp->hash,
5701 cp->rand, NULL, NULL);
5703 status = MGMT_STATUS_FAILED;
5705 status = MGMT_STATUS_SUCCESS;
5707 err = mgmt_cmd_complete(sk, hdev->id,
5708 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5709 &cp->addr, sizeof(cp->addr));
5710 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5711 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5712 u8 *rand192, *hash192, *rand256, *hash256;
5715 if (bdaddr_type_is_le(cp->addr.type)) {
5716 /* Enforce zero-valued 192-bit parameters as
5717 * long as legacy SMP OOB isn't implemented.
5719 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5720 memcmp(cp->hash192, ZERO_KEY, 16)) {
5721 err = mgmt_cmd_complete(sk, hdev->id,
5722 MGMT_OP_ADD_REMOTE_OOB_DATA,
5723 MGMT_STATUS_INVALID_PARAMS,
5724 addr, sizeof(*addr));
5731 /* In case one of the P-192 values is set to zero,
5732 * then just disable OOB data for P-192.
5734 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5735 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5739 rand192 = cp->rand192;
5740 hash192 = cp->hash192;
5744 /* In case one of the P-256 values is set to zero, then just
5745 * disable OOB data for P-256.
5747 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5748 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5752 rand256 = cp->rand256;
5753 hash256 = cp->hash256;
5756 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5757 cp->addr.type, hash192, rand192,
5760 status = MGMT_STATUS_FAILED;
5762 status = MGMT_STATUS_SUCCESS;
5764 err = mgmt_cmd_complete(sk, hdev->id,
5765 MGMT_OP_ADD_REMOTE_OOB_DATA,
5766 status, &cp->addr, sizeof(cp->addr));
5768 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5770 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5771 MGMT_STATUS_INVALID_PARAMS);
5775 hci_dev_unlock(hdev);
5779 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5780 void *data, u16 len)
5782 struct mgmt_cp_remove_remote_oob_data *cp = data;
5786 bt_dev_dbg(hdev, "sock %p", sk);
5788 if (cp->addr.type != BDADDR_BREDR)
5789 return mgmt_cmd_complete(sk, hdev->id,
5790 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5791 MGMT_STATUS_INVALID_PARAMS,
5792 &cp->addr, sizeof(cp->addr));
5796 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5797 hci_remote_oob_data_clear(hdev);
5798 status = MGMT_STATUS_SUCCESS;
5802 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5804 status = MGMT_STATUS_INVALID_PARAMS;
5806 status = MGMT_STATUS_SUCCESS;
5809 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5810 status, &cp->addr, sizeof(cp->addr));
5812 hci_dev_unlock(hdev);
5816 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5818 struct mgmt_pending_cmd *cmd;
5820 bt_dev_dbg(hdev, "status %u", status);
5824 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5826 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5829 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5832 cmd->cmd_complete(cmd, mgmt_status(status));
5833 mgmt_pending_remove(cmd);
5836 hci_dev_unlock(hdev);
5839 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5840 uint8_t *mgmt_status)
5843 case DISCOV_TYPE_LE:
5844 *mgmt_status = mgmt_le_support(hdev);
5848 case DISCOV_TYPE_INTERLEAVED:
5849 *mgmt_status = mgmt_le_support(hdev);
5853 case DISCOV_TYPE_BREDR:
5854 *mgmt_status = mgmt_bredr_support(hdev);
5859 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5866 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5868 struct mgmt_pending_cmd *cmd = data;
5870 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5871 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5872 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5875 bt_dev_dbg(hdev, "err %d", err);
5877 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5879 mgmt_pending_remove(cmd);
5881 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5885 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5887 return hci_start_discovery_sync(hdev);
5890 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5891 u16 op, void *data, u16 len)
5893 struct mgmt_cp_start_discovery *cp = data;
5894 struct mgmt_pending_cmd *cmd;
5898 bt_dev_dbg(hdev, "sock %p", sk);
5902 if (!hdev_is_powered(hdev)) {
5903 err = mgmt_cmd_complete(sk, hdev->id, op,
5904 MGMT_STATUS_NOT_POWERED,
5905 &cp->type, sizeof(cp->type));
5909 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5910 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5911 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5912 &cp->type, sizeof(cp->type));
5916 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5917 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5918 &cp->type, sizeof(cp->type));
5922 /* Can't start discovery when it is paused */
5923 if (hdev->discovery_paused) {
5924 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5925 &cp->type, sizeof(cp->type));
5929 /* Clear the discovery filter first to free any previously
5930 * allocated memory for the UUID list.
5932 hci_discovery_filter_clear(hdev);
5934 hdev->discovery.type = cp->type;
5935 hdev->discovery.report_invalid_rssi = false;
5936 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5937 hdev->discovery.limited = true;
5939 hdev->discovery.limited = false;
5941 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5947 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5948 start_discovery_complete);
5950 mgmt_pending_remove(cmd);
5954 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5957 hci_dev_unlock(hdev);
5961 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5962 void *data, u16 len)
5964 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5968 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5969 void *data, u16 len)
5971 return start_discovery_internal(sk, hdev,
5972 MGMT_OP_START_LIMITED_DISCOVERY,
5976 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5977 void *data, u16 len)
5979 struct mgmt_cp_start_service_discovery *cp = data;
5980 struct mgmt_pending_cmd *cmd;
5981 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5982 u16 uuid_count, expected_len;
5986 bt_dev_dbg(hdev, "sock %p", sk);
5990 if (!hdev_is_powered(hdev)) {
5991 err = mgmt_cmd_complete(sk, hdev->id,
5992 MGMT_OP_START_SERVICE_DISCOVERY,
5993 MGMT_STATUS_NOT_POWERED,
5994 &cp->type, sizeof(cp->type));
5998 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5999 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6000 err = mgmt_cmd_complete(sk, hdev->id,
6001 MGMT_OP_START_SERVICE_DISCOVERY,
6002 MGMT_STATUS_BUSY, &cp->type,
6007 if (hdev->discovery_paused) {
6008 err = mgmt_cmd_complete(sk, hdev->id,
6009 MGMT_OP_START_SERVICE_DISCOVERY,
6010 MGMT_STATUS_BUSY, &cp->type,
6015 uuid_count = __le16_to_cpu(cp->uuid_count);
6016 if (uuid_count > max_uuid_count) {
6017 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6019 err = mgmt_cmd_complete(sk, hdev->id,
6020 MGMT_OP_START_SERVICE_DISCOVERY,
6021 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6026 expected_len = sizeof(*cp) + uuid_count * 16;
6027 if (expected_len != len) {
6028 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6030 err = mgmt_cmd_complete(sk, hdev->id,
6031 MGMT_OP_START_SERVICE_DISCOVERY,
6032 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6037 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6038 err = mgmt_cmd_complete(sk, hdev->id,
6039 MGMT_OP_START_SERVICE_DISCOVERY,
6040 status, &cp->type, sizeof(cp->type));
6044 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6051 /* Clear the discovery filter first to free any previously
6052 * allocated memory for the UUID list.
6054 hci_discovery_filter_clear(hdev);
6056 hdev->discovery.result_filtering = true;
6057 hdev->discovery.type = cp->type;
6058 hdev->discovery.rssi = cp->rssi;
6059 hdev->discovery.uuid_count = uuid_count;
6061 if (uuid_count > 0) {
6062 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6064 if (!hdev->discovery.uuids) {
6065 err = mgmt_cmd_complete(sk, hdev->id,
6066 MGMT_OP_START_SERVICE_DISCOVERY,
6068 &cp->type, sizeof(cp->type));
6069 mgmt_pending_remove(cmd);
6074 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6075 start_discovery_complete);
6077 mgmt_pending_remove(cmd);
6081 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6084 hci_dev_unlock(hdev);
6088 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6090 struct mgmt_pending_cmd *cmd;
6092 bt_dev_dbg(hdev, "status %u", status);
6096 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6098 cmd->cmd_complete(cmd, mgmt_status(status));
6099 mgmt_pending_remove(cmd);
6102 hci_dev_unlock(hdev);
6105 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6107 struct mgmt_pending_cmd *cmd = data;
6109 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6112 bt_dev_dbg(hdev, "err %d", err);
6114 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6116 mgmt_pending_remove(cmd);
6119 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6122 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6124 return hci_stop_discovery_sync(hdev);
6127 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6130 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6131 struct mgmt_pending_cmd *cmd;
6134 bt_dev_dbg(hdev, "sock %p", sk);
6138 if (!hci_discovery_active(hdev)) {
6139 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6140 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6141 sizeof(mgmt_cp->type));
6145 if (hdev->discovery.type != mgmt_cp->type) {
6146 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6147 MGMT_STATUS_INVALID_PARAMS,
6148 &mgmt_cp->type, sizeof(mgmt_cp->type));
6152 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6158 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6159 stop_discovery_complete);
6161 mgmt_pending_remove(cmd);
6165 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6168 hci_dev_unlock(hdev);
6172 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6175 struct mgmt_cp_confirm_name *cp = data;
6176 struct inquiry_entry *e;
6179 bt_dev_dbg(hdev, "sock %p", sk);
6183 if (!hci_discovery_active(hdev)) {
6184 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6185 MGMT_STATUS_FAILED, &cp->addr,
6190 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6192 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6193 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6198 if (cp->name_known) {
6199 e->name_state = NAME_KNOWN;
6202 e->name_state = NAME_NEEDED;
6203 hci_inquiry_cache_update_resolve(hdev, e);
6206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6207 &cp->addr, sizeof(cp->addr));
6210 hci_dev_unlock(hdev);
6214 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6217 struct mgmt_cp_block_device *cp = data;
6221 bt_dev_dbg(hdev, "sock %p", sk);
6223 if (!bdaddr_type_is_valid(cp->addr.type))
6224 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6225 MGMT_STATUS_INVALID_PARAMS,
6226 &cp->addr, sizeof(cp->addr));
6230 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6233 status = MGMT_STATUS_FAILED;
6237 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6239 status = MGMT_STATUS_SUCCESS;
6242 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6243 &cp->addr, sizeof(cp->addr));
6245 hci_dev_unlock(hdev);
6250 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6253 struct mgmt_cp_unblock_device *cp = data;
6257 bt_dev_dbg(hdev, "sock %p", sk);
6259 if (!bdaddr_type_is_valid(cp->addr.type))
6260 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6261 MGMT_STATUS_INVALID_PARAMS,
6262 &cp->addr, sizeof(cp->addr));
6266 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6269 status = MGMT_STATUS_INVALID_PARAMS;
6273 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6275 status = MGMT_STATUS_SUCCESS;
6278 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6279 &cp->addr, sizeof(cp->addr));
6281 hci_dev_unlock(hdev);
6286 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6288 return hci_update_eir_sync(hdev);
6291 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6294 struct mgmt_cp_set_device_id *cp = data;
6298 bt_dev_dbg(hdev, "sock %p", sk);
6300 source = __le16_to_cpu(cp->source);
6302 if (source > 0x0002)
6303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6304 MGMT_STATUS_INVALID_PARAMS);
6308 hdev->devid_source = source;
6309 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6310 hdev->devid_product = __le16_to_cpu(cp->product);
6311 hdev->devid_version = __le16_to_cpu(cp->version);
6313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6316 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6318 hci_dev_unlock(hdev);
6323 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6326 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6328 bt_dev_dbg(hdev, "status %d", err);
6331 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6333 struct cmd_lookup match = { NULL, hdev };
6335 struct adv_info *adv_instance;
6336 u8 status = mgmt_status(err);
6339 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6340 cmd_status_rsp, &status);
6344 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6345 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6347 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6349 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6352 new_settings(hdev, match.sk);
6357 /* If "Set Advertising" was just disabled and instance advertising was
6358 * set up earlier, then re-enable multi-instance advertising.
6360 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6361 list_empty(&hdev->adv_instances))
6364 instance = hdev->cur_adv_instance;
6366 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6367 struct adv_info, list);
6371 instance = adv_instance->instance;
6374 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6376 enable_advertising_instance(hdev, err);
6379 static int set_adv_sync(struct hci_dev *hdev, void *data)
6381 struct mgmt_pending_cmd *cmd = data;
6382 struct mgmt_mode *cp = cmd->param;
6385 if (cp->val == 0x02)
6386 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6388 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6390 cancel_adv_timeout(hdev);
6393 /* Switch to instance "0" for the Set Advertising setting.
6394 * We cannot use update_[adv|scan_rsp]_data() here as the
6395 * HCI_ADVERTISING flag is not yet set.
6397 hdev->cur_adv_instance = 0x00;
6399 if (ext_adv_capable(hdev)) {
6400 hci_start_ext_adv_sync(hdev, 0x00);
6402 hci_update_adv_data_sync(hdev, 0x00);
6403 hci_update_scan_rsp_data_sync(hdev, 0x00);
6404 hci_enable_advertising_sync(hdev);
6407 hci_disable_advertising_sync(hdev);
6413 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6416 struct mgmt_mode *cp = data;
6417 struct mgmt_pending_cmd *cmd;
6421 bt_dev_dbg(hdev, "sock %p", sk);
6423 status = mgmt_le_support(hdev);
6425 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6428 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6430 MGMT_STATUS_INVALID_PARAMS);
6432 if (hdev->advertising_paused)
6433 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6440 /* The following conditions are ones which mean that we should
6441 * not do any HCI communication but directly send a mgmt
6442 * response to user space (after toggling the flag if
6445 if (!hdev_is_powered(hdev) ||
6446 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6447 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6448 hci_dev_test_flag(hdev, HCI_MESH) ||
6449 hci_conn_num(hdev, LE_LINK) > 0 ||
6450 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6451 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6455 hdev->cur_adv_instance = 0x00;
6456 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6457 if (cp->val == 0x02)
6458 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6460 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6462 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6463 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6466 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6471 err = new_settings(hdev, sk);
6476 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6477 pending_find(MGMT_OP_SET_LE, hdev)) {
6478 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6483 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6487 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6488 set_advertising_complete);
6491 mgmt_pending_remove(cmd);
6494 hci_dev_unlock(hdev);
6498 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6499 void *data, u16 len)
6501 struct mgmt_cp_set_static_address *cp = data;
6504 bt_dev_dbg(hdev, "sock %p", sk);
6506 if (!lmp_le_capable(hdev))
6507 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6508 MGMT_STATUS_NOT_SUPPORTED);
6510 if (hdev_is_powered(hdev))
6511 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6512 MGMT_STATUS_REJECTED);
6514 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6515 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6516 return mgmt_cmd_status(sk, hdev->id,
6517 MGMT_OP_SET_STATIC_ADDRESS,
6518 MGMT_STATUS_INVALID_PARAMS);
6520 /* Two most significant bits shall be set */
6521 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6522 return mgmt_cmd_status(sk, hdev->id,
6523 MGMT_OP_SET_STATIC_ADDRESS,
6524 MGMT_STATUS_INVALID_PARAMS);
6529 bacpy(&hdev->static_addr, &cp->bdaddr);
6531 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6535 err = new_settings(hdev, sk);
6538 hci_dev_unlock(hdev);
6542 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6543 void *data, u16 len)
6545 struct mgmt_cp_set_scan_params *cp = data;
6546 __u16 interval, window;
6549 bt_dev_dbg(hdev, "sock %p", sk);
6551 if (!lmp_le_capable(hdev))
6552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6553 MGMT_STATUS_NOT_SUPPORTED);
6555 interval = __le16_to_cpu(cp->interval);
6557 if (interval < 0x0004 || interval > 0x4000)
6558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6559 MGMT_STATUS_INVALID_PARAMS);
6561 window = __le16_to_cpu(cp->window);
6563 if (window < 0x0004 || window > 0x4000)
6564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6565 MGMT_STATUS_INVALID_PARAMS);
6567 if (window > interval)
6568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6569 MGMT_STATUS_INVALID_PARAMS);
6573 hdev->le_scan_interval = interval;
6574 hdev->le_scan_window = window;
6576 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6579 /* If background scan is running, restart it so new parameters are
6582 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6583 hdev->discovery.state == DISCOVERY_STOPPED)
6584 hci_update_passive_scan(hdev);
6586 hci_dev_unlock(hdev);
6591 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6593 struct mgmt_pending_cmd *cmd = data;
6595 bt_dev_dbg(hdev, "err %d", err);
6598 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6601 struct mgmt_mode *cp = cmd->param;
6604 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6606 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6608 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6609 new_settings(hdev, cmd->sk);
6612 mgmt_pending_free(cmd);
6615 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6617 struct mgmt_pending_cmd *cmd = data;
6618 struct mgmt_mode *cp = cmd->param;
6620 return hci_write_fast_connectable_sync(hdev, cp->val);
6623 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6624 void *data, u16 len)
6626 struct mgmt_mode *cp = data;
6627 struct mgmt_pending_cmd *cmd;
6630 bt_dev_dbg(hdev, "sock %p", sk);
6632 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6633 hdev->hci_ver < BLUETOOTH_VER_1_2)
6634 return mgmt_cmd_status(sk, hdev->id,
6635 MGMT_OP_SET_FAST_CONNECTABLE,
6636 MGMT_STATUS_NOT_SUPPORTED);
6638 if (cp->val != 0x00 && cp->val != 0x01)
6639 return mgmt_cmd_status(sk, hdev->id,
6640 MGMT_OP_SET_FAST_CONNECTABLE,
6641 MGMT_STATUS_INVALID_PARAMS);
6645 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6646 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6650 if (!hdev_is_powered(hdev)) {
6651 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6652 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6653 new_settings(hdev, sk);
6657 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6662 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6663 fast_connectable_complete);
6666 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6667 MGMT_STATUS_FAILED);
6670 mgmt_pending_free(cmd);
6674 hci_dev_unlock(hdev);
6679 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6681 struct mgmt_pending_cmd *cmd = data;
6683 bt_dev_dbg(hdev, "err %d", err);
6686 u8 mgmt_err = mgmt_status(err);
6688 /* We need to restore the flag if related HCI commands
6691 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6693 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6695 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6696 new_settings(hdev, cmd->sk);
6699 mgmt_pending_free(cmd);
6702 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6706 status = hci_write_fast_connectable_sync(hdev, false);
6709 status = hci_update_scan_sync(hdev);
6711 /* Since only the advertising data flags will change, there
6712 * is no need to update the scan response data.
6715 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6720 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6722 struct mgmt_mode *cp = data;
6723 struct mgmt_pending_cmd *cmd;
6726 bt_dev_dbg(hdev, "sock %p", sk);
6728 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6729 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6730 MGMT_STATUS_NOT_SUPPORTED);
6732 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6733 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6734 MGMT_STATUS_REJECTED);
6736 if (cp->val != 0x00 && cp->val != 0x01)
6737 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6738 MGMT_STATUS_INVALID_PARAMS);
6742 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6743 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6747 if (!hdev_is_powered(hdev)) {
6749 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6750 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6751 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6752 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6753 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6756 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6758 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6762 err = new_settings(hdev, sk);
6766 /* Reject disabling when powered on */
6768 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6769 MGMT_STATUS_REJECTED);
6772 /* When configuring a dual-mode controller to operate
6773 * with LE only and using a static address, then switching
6774 * BR/EDR back on is not allowed.
6776 * Dual-mode controllers shall operate with the public
6777 * address as its identity address for BR/EDR and LE. So
6778 * reject the attempt to create an invalid configuration.
6780 * The same restrictions applies when secure connections
6781 * has been enabled. For BR/EDR this is a controller feature
6782 * while for LE it is a host stack feature. This means that
6783 * switching BR/EDR back on when secure connections has been
6784 * enabled is not a supported transaction.
6786 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6787 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6788 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6789 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6790 MGMT_STATUS_REJECTED);
6795 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6799 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6800 set_bredr_complete);
6803 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6804 MGMT_STATUS_FAILED);
6806 mgmt_pending_free(cmd);
6811 /* We need to flip the bit already here so that
6812 * hci_req_update_adv_data generates the correct flags.
6814 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6817 hci_dev_unlock(hdev);
6821 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6823 struct mgmt_pending_cmd *cmd = data;
6824 struct mgmt_mode *cp;
6826 bt_dev_dbg(hdev, "err %d", err);
6829 u8 mgmt_err = mgmt_status(err);
6831 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6839 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6840 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6843 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6844 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6847 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6848 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6852 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6853 new_settings(hdev, cmd->sk);
6856 mgmt_pending_free(cmd);
6859 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6861 struct mgmt_pending_cmd *cmd = data;
6862 struct mgmt_mode *cp = cmd->param;
6865 /* Force write of val */
6866 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6868 return hci_write_sc_support_sync(hdev, val);
6871 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6872 void *data, u16 len)
6874 struct mgmt_mode *cp = data;
6875 struct mgmt_pending_cmd *cmd;
6879 bt_dev_dbg(hdev, "sock %p", sk);
6881 if (!lmp_sc_capable(hdev) &&
6882 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6884 MGMT_STATUS_NOT_SUPPORTED);
6886 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6887 lmp_sc_capable(hdev) &&
6888 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6890 MGMT_STATUS_REJECTED);
6892 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6894 MGMT_STATUS_INVALID_PARAMS);
6898 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6899 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6903 changed = !hci_dev_test_and_set_flag(hdev,
6905 if (cp->val == 0x02)
6906 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6908 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6910 changed = hci_dev_test_and_clear_flag(hdev,
6912 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6915 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6920 err = new_settings(hdev, sk);
6927 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6928 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6929 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6933 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6937 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6938 set_secure_conn_complete);
6941 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6942 MGMT_STATUS_FAILED);
6944 mgmt_pending_free(cmd);
6948 hci_dev_unlock(hdev);
6952 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6953 void *data, u16 len)
6955 struct mgmt_mode *cp = data;
6956 bool changed, use_changed;
6959 bt_dev_dbg(hdev, "sock %p", sk);
6961 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6962 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6963 MGMT_STATUS_INVALID_PARAMS);
6968 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6970 changed = hci_dev_test_and_clear_flag(hdev,
6971 HCI_KEEP_DEBUG_KEYS);
6973 if (cp->val == 0x02)
6974 use_changed = !hci_dev_test_and_set_flag(hdev,
6975 HCI_USE_DEBUG_KEYS);
6977 use_changed = hci_dev_test_and_clear_flag(hdev,
6978 HCI_USE_DEBUG_KEYS);
6980 if (hdev_is_powered(hdev) && use_changed &&
6981 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6982 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6983 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6984 sizeof(mode), &mode);
6987 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6992 err = new_settings(hdev, sk);
6995 hci_dev_unlock(hdev);
6999 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7002 struct mgmt_cp_set_privacy *cp = cp_data;
7006 bt_dev_dbg(hdev, "sock %p", sk);
7008 if (!lmp_le_capable(hdev))
7009 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7010 MGMT_STATUS_NOT_SUPPORTED);
7012 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7013 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7014 MGMT_STATUS_INVALID_PARAMS);
7016 if (hdev_is_powered(hdev))
7017 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7018 MGMT_STATUS_REJECTED);
7022 /* If user space supports this command it is also expected to
7023 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7025 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7028 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7029 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7030 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7031 hci_adv_instances_set_rpa_expired(hdev, true);
7032 if (cp->privacy == 0x02)
7033 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7035 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7037 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7038 memset(hdev->irk, 0, sizeof(hdev->irk));
7039 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7040 hci_adv_instances_set_rpa_expired(hdev, false);
7041 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7044 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7049 err = new_settings(hdev, sk);
7052 hci_dev_unlock(hdev);
7056 static bool irk_is_valid(struct mgmt_irk_info *irk)
7058 switch (irk->addr.type) {
7059 case BDADDR_LE_PUBLIC:
7062 case BDADDR_LE_RANDOM:
7063 /* Two most significant bits shall be set */
7064 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7072 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7075 struct mgmt_cp_load_irks *cp = cp_data;
7076 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7077 sizeof(struct mgmt_irk_info));
7078 u16 irk_count, expected_len;
7081 bt_dev_dbg(hdev, "sock %p", sk);
7083 if (!lmp_le_capable(hdev))
7084 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7085 MGMT_STATUS_NOT_SUPPORTED);
7087 irk_count = __le16_to_cpu(cp->irk_count);
7088 if (irk_count > max_irk_count) {
7089 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7091 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7092 MGMT_STATUS_INVALID_PARAMS);
7095 expected_len = struct_size(cp, irks, irk_count);
7096 if (expected_len != len) {
7097 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7099 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7100 MGMT_STATUS_INVALID_PARAMS);
7103 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7105 for (i = 0; i < irk_count; i++) {
7106 struct mgmt_irk_info *key = &cp->irks[i];
7108 if (!irk_is_valid(key))
7109 return mgmt_cmd_status(sk, hdev->id,
7111 MGMT_STATUS_INVALID_PARAMS);
7116 hci_smp_irks_clear(hdev);
7118 for (i = 0; i < irk_count; i++) {
7119 struct mgmt_irk_info *irk = &cp->irks[i];
7121 if (hci_is_blocked_key(hdev,
7122 HCI_BLOCKED_KEY_TYPE_IRK,
7124 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7129 hci_add_irk(hdev, &irk->addr.bdaddr,
7130 le_addr_type(irk->addr.type), irk->val,
7134 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7136 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7138 hci_dev_unlock(hdev);
7143 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7145 if (key->initiator != 0x00 && key->initiator != 0x01)
7148 switch (key->addr.type) {
7149 case BDADDR_LE_PUBLIC:
7152 case BDADDR_LE_RANDOM:
7153 /* Two most significant bits shall be set */
7154 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7162 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7163 void *cp_data, u16 len)
7165 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7166 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7167 sizeof(struct mgmt_ltk_info));
7168 u16 key_count, expected_len;
7171 bt_dev_dbg(hdev, "sock %p", sk);
7173 if (!lmp_le_capable(hdev))
7174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7175 MGMT_STATUS_NOT_SUPPORTED);
7177 key_count = __le16_to_cpu(cp->key_count);
7178 if (key_count > max_key_count) {
7179 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7182 MGMT_STATUS_INVALID_PARAMS);
7185 expected_len = struct_size(cp, keys, key_count);
7186 if (expected_len != len) {
7187 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7190 MGMT_STATUS_INVALID_PARAMS);
7193 bt_dev_dbg(hdev, "key_count %u", key_count);
7195 for (i = 0; i < key_count; i++) {
7196 struct mgmt_ltk_info *key = &cp->keys[i];
7198 if (!ltk_is_valid(key))
7199 return mgmt_cmd_status(sk, hdev->id,
7200 MGMT_OP_LOAD_LONG_TERM_KEYS,
7201 MGMT_STATUS_INVALID_PARAMS);
7206 hci_smp_ltks_clear(hdev);
7208 for (i = 0; i < key_count; i++) {
7209 struct mgmt_ltk_info *key = &cp->keys[i];
7210 u8 type, authenticated;
7212 if (hci_is_blocked_key(hdev,
7213 HCI_BLOCKED_KEY_TYPE_LTK,
7215 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7220 switch (key->type) {
7221 case MGMT_LTK_UNAUTHENTICATED:
7222 authenticated = 0x00;
7223 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7225 case MGMT_LTK_AUTHENTICATED:
7226 authenticated = 0x01;
7227 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7229 case MGMT_LTK_P256_UNAUTH:
7230 authenticated = 0x00;
7231 type = SMP_LTK_P256;
7233 case MGMT_LTK_P256_AUTH:
7234 authenticated = 0x01;
7235 type = SMP_LTK_P256;
7237 case MGMT_LTK_P256_DEBUG:
7238 authenticated = 0x00;
7239 type = SMP_LTK_P256_DEBUG;
7245 hci_add_ltk(hdev, &key->addr.bdaddr,
7246 le_addr_type(key->addr.type), type, authenticated,
7247 key->val, key->enc_size, key->ediv, key->rand);
7250 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7253 hci_dev_unlock(hdev);
7258 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7260 struct mgmt_pending_cmd *cmd = data;
7261 struct hci_conn *conn = cmd->user_data;
7262 struct mgmt_cp_get_conn_info *cp = cmd->param;
7263 struct mgmt_rp_get_conn_info rp;
7266 bt_dev_dbg(hdev, "err %d", err);
7268 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
7270 status = mgmt_status(err);
7271 if (status == MGMT_STATUS_SUCCESS) {
7272 rp.rssi = conn->rssi;
7273 rp.tx_power = conn->tx_power;
7274 rp.max_tx_power = conn->max_tx_power;
7276 rp.rssi = HCI_RSSI_INVALID;
7277 rp.tx_power = HCI_TX_POWER_INVALID;
7278 rp.max_tx_power = HCI_TX_POWER_INVALID;
7281 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7284 mgmt_pending_free(cmd);
7287 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7289 struct mgmt_pending_cmd *cmd = data;
7290 struct mgmt_cp_get_conn_info *cp = cmd->param;
7291 struct hci_conn *conn;
7295 /* Make sure we are still connected */
7296 if (cp->addr.type == BDADDR_BREDR)
7297 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7300 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7302 if (!conn || conn->state != BT_CONNECTED)
7303 return MGMT_STATUS_NOT_CONNECTED;
7305 cmd->user_data = conn;
7306 handle = cpu_to_le16(conn->handle);
7308 /* Refresh RSSI each time */
7309 err = hci_read_rssi_sync(hdev, handle);
7311 /* For LE links TX power does not change thus we don't need to
7312 * query for it once value is known.
7314 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7315 conn->tx_power == HCI_TX_POWER_INVALID))
7316 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7318 /* Max TX power needs to be read only once per connection */
7319 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7320 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7325 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7328 struct mgmt_cp_get_conn_info *cp = data;
7329 struct mgmt_rp_get_conn_info rp;
7330 struct hci_conn *conn;
7331 unsigned long conn_info_age;
7334 bt_dev_dbg(hdev, "sock %p", sk);
7336 memset(&rp, 0, sizeof(rp));
7337 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7338 rp.addr.type = cp->addr.type;
7340 if (!bdaddr_type_is_valid(cp->addr.type))
7341 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7342 MGMT_STATUS_INVALID_PARAMS,
7347 if (!hdev_is_powered(hdev)) {
7348 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7349 MGMT_STATUS_NOT_POWERED, &rp,
7354 if (cp->addr.type == BDADDR_BREDR)
7355 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7358 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7360 if (!conn || conn->state != BT_CONNECTED) {
7361 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7362 MGMT_STATUS_NOT_CONNECTED, &rp,
7367 /* To avoid client trying to guess when to poll again for information we
7368 * calculate conn info age as random value between min/max set in hdev.
7370 conn_info_age = hdev->conn_info_min_age +
7371 prandom_u32_max(hdev->conn_info_max_age -
7372 hdev->conn_info_min_age);
7374 /* Query controller to refresh cached values if they are too old or were
7377 if (time_after(jiffies, conn->conn_info_timestamp +
7378 msecs_to_jiffies(conn_info_age)) ||
7379 !conn->conn_info_timestamp) {
7380 struct mgmt_pending_cmd *cmd;
7382 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7387 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7388 cmd, get_conn_info_complete);
7392 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7393 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7396 mgmt_pending_free(cmd);
7401 conn->conn_info_timestamp = jiffies;
7403 /* Cache is valid, just reply with values cached in hci_conn */
7404 rp.rssi = conn->rssi;
7405 rp.tx_power = conn->tx_power;
7406 rp.max_tx_power = conn->max_tx_power;
7408 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7409 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7413 hci_dev_unlock(hdev);
7417 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7419 struct mgmt_pending_cmd *cmd = data;
7420 struct mgmt_cp_get_clock_info *cp = cmd->param;
7421 struct mgmt_rp_get_clock_info rp;
7422 struct hci_conn *conn = cmd->user_data;
7423 u8 status = mgmt_status(err);
7425 bt_dev_dbg(hdev, "err %d", err);
7427 memset(&rp, 0, sizeof(rp));
7428 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7429 rp.addr.type = cp->addr.type;
7434 rp.local_clock = cpu_to_le32(hdev->clock);
7437 rp.piconet_clock = cpu_to_le32(conn->clock);
7438 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7442 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7445 mgmt_pending_free(cmd);
7448 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7450 struct mgmt_pending_cmd *cmd = data;
7451 struct mgmt_cp_get_clock_info *cp = cmd->param;
7452 struct hci_cp_read_clock hci_cp;
7453 struct hci_conn *conn;
7455 memset(&hci_cp, 0, sizeof(hci_cp));
7456 hci_read_clock_sync(hdev, &hci_cp);
7458 /* Make sure connection still exists */
7459 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7460 if (!conn || conn->state != BT_CONNECTED)
7461 return MGMT_STATUS_NOT_CONNECTED;
7463 cmd->user_data = conn;
7464 hci_cp.handle = cpu_to_le16(conn->handle);
7465 hci_cp.which = 0x01; /* Piconet clock */
7467 return hci_read_clock_sync(hdev, &hci_cp);
7470 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7473 struct mgmt_cp_get_clock_info *cp = data;
7474 struct mgmt_rp_get_clock_info rp;
7475 struct mgmt_pending_cmd *cmd;
7476 struct hci_conn *conn;
7479 bt_dev_dbg(hdev, "sock %p", sk);
7481 memset(&rp, 0, sizeof(rp));
7482 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7483 rp.addr.type = cp->addr.type;
7485 if (cp->addr.type != BDADDR_BREDR)
7486 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7487 MGMT_STATUS_INVALID_PARAMS,
7492 if (!hdev_is_powered(hdev)) {
7493 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7494 MGMT_STATUS_NOT_POWERED, &rp,
7499 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7500 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7502 if (!conn || conn->state != BT_CONNECTED) {
7503 err = mgmt_cmd_complete(sk, hdev->id,
7504 MGMT_OP_GET_CLOCK_INFO,
7505 MGMT_STATUS_NOT_CONNECTED,
7513 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7517 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7518 get_clock_info_complete);
7521 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7522 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7525 mgmt_pending_free(cmd);
7530 hci_dev_unlock(hdev);
7534 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7536 struct hci_conn *conn;
7538 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7542 if (conn->dst_type != type)
7545 if (conn->state != BT_CONNECTED)
7551 /* This function requires the caller holds hdev->lock */
7552 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7553 u8 addr_type, u8 auto_connect)
7555 struct hci_conn_params *params;
7557 params = hci_conn_params_add(hdev, addr, addr_type);
7561 if (params->auto_connect == auto_connect)
7564 list_del_init(¶ms->action);
7566 switch (auto_connect) {
7567 case HCI_AUTO_CONN_DISABLED:
7568 case HCI_AUTO_CONN_LINK_LOSS:
7569 /* If auto connect is being disabled when we're trying to
7570 * connect to device, keep connecting.
7572 if (params->explicit_connect)
7573 list_add(¶ms->action, &hdev->pend_le_conns);
7575 case HCI_AUTO_CONN_REPORT:
7576 if (params->explicit_connect)
7577 list_add(¶ms->action, &hdev->pend_le_conns);
7579 list_add(¶ms->action, &hdev->pend_le_reports);
7581 case HCI_AUTO_CONN_DIRECT:
7582 case HCI_AUTO_CONN_ALWAYS:
7583 if (!is_connected(hdev, addr, addr_type))
7584 list_add(¶ms->action, &hdev->pend_le_conns);
7588 params->auto_connect = auto_connect;
7590 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7591 addr, addr_type, auto_connect);
7596 static void device_added(struct sock *sk, struct hci_dev *hdev,
7597 bdaddr_t *bdaddr, u8 type, u8 action)
7599 struct mgmt_ev_device_added ev;
7601 bacpy(&ev.addr.bdaddr, bdaddr);
7602 ev.addr.type = type;
7605 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7608 static int add_device_sync(struct hci_dev *hdev, void *data)
7610 return hci_update_passive_scan_sync(hdev);
7613 static int add_device(struct sock *sk, struct hci_dev *hdev,
7614 void *data, u16 len)
7616 struct mgmt_cp_add_device *cp = data;
7617 u8 auto_conn, addr_type;
7618 struct hci_conn_params *params;
7620 u32 current_flags = 0;
7621 u32 supported_flags;
7623 bt_dev_dbg(hdev, "sock %p", sk);
7625 if (!bdaddr_type_is_valid(cp->addr.type) ||
7626 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7627 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7628 MGMT_STATUS_INVALID_PARAMS,
7629 &cp->addr, sizeof(cp->addr));
7631 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7632 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7633 MGMT_STATUS_INVALID_PARAMS,
7634 &cp->addr, sizeof(cp->addr));
7638 if (cp->addr.type == BDADDR_BREDR) {
7639 /* Only incoming connections action is supported for now */
7640 if (cp->action != 0x01) {
7641 err = mgmt_cmd_complete(sk, hdev->id,
7643 MGMT_STATUS_INVALID_PARAMS,
7644 &cp->addr, sizeof(cp->addr));
7648 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7654 hci_update_scan(hdev);
7659 addr_type = le_addr_type(cp->addr.type);
7661 if (cp->action == 0x02)
7662 auto_conn = HCI_AUTO_CONN_ALWAYS;
7663 else if (cp->action == 0x01)
7664 auto_conn = HCI_AUTO_CONN_DIRECT;
7666 auto_conn = HCI_AUTO_CONN_REPORT;
7668 /* Kernel internally uses conn_params with resolvable private
7669 * address, but Add Device allows only identity addresses.
7670 * Make sure it is enforced before calling
7671 * hci_conn_params_lookup.
7673 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7674 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7675 MGMT_STATUS_INVALID_PARAMS,
7676 &cp->addr, sizeof(cp->addr));
7680 /* If the connection parameters don't exist for this device,
7681 * they will be created and configured with defaults.
7683 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7685 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7686 MGMT_STATUS_FAILED, &cp->addr,
7690 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7693 current_flags = params->flags;
7696 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7701 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7702 supported_flags = hdev->conn_flags;
7703 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7704 supported_flags, current_flags);
7706 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7707 MGMT_STATUS_SUCCESS, &cp->addr,
7711 hci_dev_unlock(hdev);
7715 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7716 bdaddr_t *bdaddr, u8 type)
7718 struct mgmt_ev_device_removed ev;
7720 bacpy(&ev.addr.bdaddr, bdaddr);
7721 ev.addr.type = type;
7723 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7726 static int remove_device_sync(struct hci_dev *hdev, void *data)
7728 return hci_update_passive_scan_sync(hdev);
7731 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7732 void *data, u16 len)
7734 struct mgmt_cp_remove_device *cp = data;
7737 bt_dev_dbg(hdev, "sock %p", sk);
7741 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7742 struct hci_conn_params *params;
7745 if (!bdaddr_type_is_valid(cp->addr.type)) {
7746 err = mgmt_cmd_complete(sk, hdev->id,
7747 MGMT_OP_REMOVE_DEVICE,
7748 MGMT_STATUS_INVALID_PARAMS,
7749 &cp->addr, sizeof(cp->addr));
7753 if (cp->addr.type == BDADDR_BREDR) {
7754 err = hci_bdaddr_list_del(&hdev->accept_list,
7758 err = mgmt_cmd_complete(sk, hdev->id,
7759 MGMT_OP_REMOVE_DEVICE,
7760 MGMT_STATUS_INVALID_PARAMS,
7766 hci_update_scan(hdev);
7768 device_removed(sk, hdev, &cp->addr.bdaddr,
7773 addr_type = le_addr_type(cp->addr.type);
7775 /* Kernel internally uses conn_params with resolvable private
7776 * address, but Remove Device allows only identity addresses.
7777 * Make sure it is enforced before calling
7778 * hci_conn_params_lookup.
7780 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7781 err = mgmt_cmd_complete(sk, hdev->id,
7782 MGMT_OP_REMOVE_DEVICE,
7783 MGMT_STATUS_INVALID_PARAMS,
7784 &cp->addr, sizeof(cp->addr));
7788 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7791 err = mgmt_cmd_complete(sk, hdev->id,
7792 MGMT_OP_REMOVE_DEVICE,
7793 MGMT_STATUS_INVALID_PARAMS,
7794 &cp->addr, sizeof(cp->addr));
7798 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7799 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7800 err = mgmt_cmd_complete(sk, hdev->id,
7801 MGMT_OP_REMOVE_DEVICE,
7802 MGMT_STATUS_INVALID_PARAMS,
7803 &cp->addr, sizeof(cp->addr));
7807 list_del(¶ms->action);
7808 list_del(¶ms->list);
7811 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7813 struct hci_conn_params *p, *tmp;
7814 struct bdaddr_list *b, *btmp;
7816 if (cp->addr.type) {
7817 err = mgmt_cmd_complete(sk, hdev->id,
7818 MGMT_OP_REMOVE_DEVICE,
7819 MGMT_STATUS_INVALID_PARAMS,
7820 &cp->addr, sizeof(cp->addr));
7824 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7825 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7830 hci_update_scan(hdev);
7832 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7833 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7835 device_removed(sk, hdev, &p->addr, p->addr_type);
7836 if (p->explicit_connect) {
7837 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7840 list_del(&p->action);
7845 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7848 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7851 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7852 MGMT_STATUS_SUCCESS, &cp->addr,
7855 hci_dev_unlock(hdev);
7859 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7862 struct mgmt_cp_load_conn_param *cp = data;
7863 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7864 sizeof(struct mgmt_conn_param));
7865 u16 param_count, expected_len;
7868 if (!lmp_le_capable(hdev))
7869 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7870 MGMT_STATUS_NOT_SUPPORTED);
7872 param_count = __le16_to_cpu(cp->param_count);
7873 if (param_count > max_param_count) {
7874 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7876 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7877 MGMT_STATUS_INVALID_PARAMS);
7880 expected_len = struct_size(cp, params, param_count);
7881 if (expected_len != len) {
7882 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7885 MGMT_STATUS_INVALID_PARAMS);
7888 bt_dev_dbg(hdev, "param_count %u", param_count);
7892 hci_conn_params_clear_disabled(hdev);
7894 for (i = 0; i < param_count; i++) {
7895 struct mgmt_conn_param *param = &cp->params[i];
7896 struct hci_conn_params *hci_param;
7897 u16 min, max, latency, timeout;
7900 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7903 if (param->addr.type == BDADDR_LE_PUBLIC) {
7904 addr_type = ADDR_LE_DEV_PUBLIC;
7905 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7906 addr_type = ADDR_LE_DEV_RANDOM;
7908 bt_dev_err(hdev, "ignoring invalid connection parameters");
7912 min = le16_to_cpu(param->min_interval);
7913 max = le16_to_cpu(param->max_interval);
7914 latency = le16_to_cpu(param->latency);
7915 timeout = le16_to_cpu(param->timeout);
7917 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7918 min, max, latency, timeout);
7920 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7921 bt_dev_err(hdev, "ignoring invalid connection parameters");
7925 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7928 bt_dev_err(hdev, "failed to add connection parameters");
7932 hci_param->conn_min_interval = min;
7933 hci_param->conn_max_interval = max;
7934 hci_param->conn_latency = latency;
7935 hci_param->supervision_timeout = timeout;
7938 hci_dev_unlock(hdev);
7940 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7944 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7945 void *data, u16 len)
7947 struct mgmt_cp_set_external_config *cp = data;
7951 bt_dev_dbg(hdev, "sock %p", sk);
7953 if (hdev_is_powered(hdev))
7954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7955 MGMT_STATUS_REJECTED);
7957 if (cp->config != 0x00 && cp->config != 0x01)
7958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7959 MGMT_STATUS_INVALID_PARAMS);
7961 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7962 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7963 MGMT_STATUS_NOT_SUPPORTED);
7968 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7970 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7972 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7979 err = new_options(hdev, sk);
7981 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7982 mgmt_index_removed(hdev);
7984 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7985 hci_dev_set_flag(hdev, HCI_CONFIG);
7986 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7988 queue_work(hdev->req_workqueue, &hdev->power_on);
7990 set_bit(HCI_RAW, &hdev->flags);
7991 mgmt_index_added(hdev);
7996 hci_dev_unlock(hdev);
8000 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8001 void *data, u16 len)
8003 struct mgmt_cp_set_public_address *cp = data;
8007 bt_dev_dbg(hdev, "sock %p", sk);
8009 if (hdev_is_powered(hdev))
8010 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8011 MGMT_STATUS_REJECTED);
8013 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8014 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8015 MGMT_STATUS_INVALID_PARAMS);
8017 if (!hdev->set_bdaddr)
8018 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8019 MGMT_STATUS_NOT_SUPPORTED);
8023 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8024 bacpy(&hdev->public_addr, &cp->bdaddr);
8026 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8033 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8034 err = new_options(hdev, sk);
8036 if (is_configured(hdev)) {
8037 mgmt_index_removed(hdev);
8039 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8041 hci_dev_set_flag(hdev, HCI_CONFIG);
8042 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8044 queue_work(hdev->req_workqueue, &hdev->power_on);
8048 hci_dev_unlock(hdev);
8052 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8055 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8056 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8057 u8 *h192, *r192, *h256, *r256;
8058 struct mgmt_pending_cmd *cmd = data;
8059 struct sk_buff *skb = cmd->skb;
8060 u8 status = mgmt_status(err);
8063 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8068 status = MGMT_STATUS_FAILED;
8069 else if (IS_ERR(skb))
8070 status = mgmt_status(PTR_ERR(skb));
8072 status = mgmt_status(skb->data[0]);
8075 bt_dev_dbg(hdev, "status %u", status);
8077 mgmt_cp = cmd->param;
8080 status = mgmt_status(status);
8087 } else if (!bredr_sc_enabled(hdev)) {
8088 struct hci_rp_read_local_oob_data *rp;
8090 if (skb->len != sizeof(*rp)) {
8091 status = MGMT_STATUS_FAILED;
8094 status = MGMT_STATUS_SUCCESS;
8095 rp = (void *)skb->data;
8097 eir_len = 5 + 18 + 18;
8104 struct hci_rp_read_local_oob_ext_data *rp;
8106 if (skb->len != sizeof(*rp)) {
8107 status = MGMT_STATUS_FAILED;
8110 status = MGMT_STATUS_SUCCESS;
8111 rp = (void *)skb->data;
8113 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8114 eir_len = 5 + 18 + 18;
8118 eir_len = 5 + 18 + 18 + 18 + 18;
8128 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8135 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8136 hdev->dev_class, 3);
8139 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8140 EIR_SSP_HASH_C192, h192, 16);
8141 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8142 EIR_SSP_RAND_R192, r192, 16);
8146 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8147 EIR_SSP_HASH_C256, h256, 16);
8148 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8149 EIR_SSP_RAND_R256, r256, 16);
8153 mgmt_rp->type = mgmt_cp->type;
8154 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8156 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8157 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8158 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8159 if (err < 0 || status)
8162 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8164 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8165 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8166 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8168 if (skb && !IS_ERR(skb))
8172 mgmt_pending_remove(cmd);
8175 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8176 struct mgmt_cp_read_local_oob_ext_data *cp)
8178 struct mgmt_pending_cmd *cmd;
8181 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8186 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8187 read_local_oob_ext_data_complete);
8190 mgmt_pending_remove(cmd);
8197 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8198 void *data, u16 data_len)
8200 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8201 struct mgmt_rp_read_local_oob_ext_data *rp;
8204 u8 status, flags, role, addr[7], hash[16], rand[16];
8207 bt_dev_dbg(hdev, "sock %p", sk);
8209 if (hdev_is_powered(hdev)) {
8211 case BIT(BDADDR_BREDR):
8212 status = mgmt_bredr_support(hdev);
8218 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8219 status = mgmt_le_support(hdev);
8223 eir_len = 9 + 3 + 18 + 18 + 3;
8226 status = MGMT_STATUS_INVALID_PARAMS;
8231 status = MGMT_STATUS_NOT_POWERED;
8235 rp_len = sizeof(*rp) + eir_len;
8236 rp = kmalloc(rp_len, GFP_ATOMIC);
8240 if (!status && !lmp_ssp_capable(hdev)) {
8241 status = MGMT_STATUS_NOT_SUPPORTED;
8252 case BIT(BDADDR_BREDR):
8253 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8254 err = read_local_ssp_oob_req(hdev, sk, cp);
8255 hci_dev_unlock(hdev);
8259 status = MGMT_STATUS_FAILED;
8262 eir_len = eir_append_data(rp->eir, eir_len,
8264 hdev->dev_class, 3);
8267 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8268 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8269 smp_generate_oob(hdev, hash, rand) < 0) {
8270 hci_dev_unlock(hdev);
8271 status = MGMT_STATUS_FAILED;
8275 /* This should return the active RPA, but since the RPA
8276 * is only programmed on demand, it is really hard to fill
8277 * this in at the moment. For now disallow retrieving
8278 * local out-of-band data when privacy is in use.
8280 * Returning the identity address will not help here since
8281 * pairing happens before the identity resolving key is
8282 * known and thus the connection establishment happens
8283 * based on the RPA and not the identity address.
8285 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8286 hci_dev_unlock(hdev);
8287 status = MGMT_STATUS_REJECTED;
8291 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8292 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8293 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8294 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8295 memcpy(addr, &hdev->static_addr, 6);
8298 memcpy(addr, &hdev->bdaddr, 6);
8302 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8303 addr, sizeof(addr));
8305 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8310 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8311 &role, sizeof(role));
8313 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8314 eir_len = eir_append_data(rp->eir, eir_len,
8316 hash, sizeof(hash));
8318 eir_len = eir_append_data(rp->eir, eir_len,
8320 rand, sizeof(rand));
8323 flags = mgmt_get_adv_discov_flags(hdev);
8325 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8326 flags |= LE_AD_NO_BREDR;
8328 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8329 &flags, sizeof(flags));
8333 hci_dev_unlock(hdev);
8335 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8337 status = MGMT_STATUS_SUCCESS;
8340 rp->type = cp->type;
8341 rp->eir_len = cpu_to_le16(eir_len);
8343 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8344 status, rp, sizeof(*rp) + eir_len);
8345 if (err < 0 || status)
8348 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8349 rp, sizeof(*rp) + eir_len,
8350 HCI_MGMT_OOB_DATA_EVENTS, sk);
8358 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8362 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8363 flags |= MGMT_ADV_FLAG_DISCOV;
8364 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8365 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8366 flags |= MGMT_ADV_FLAG_APPEARANCE;
8367 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8368 flags |= MGMT_ADV_PARAM_DURATION;
8369 flags |= MGMT_ADV_PARAM_TIMEOUT;
8370 flags |= MGMT_ADV_PARAM_INTERVALS;
8371 flags |= MGMT_ADV_PARAM_TX_POWER;
8372 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8374 /* In extended adv TX_POWER returned from Set Adv Param
8375 * will be always valid.
8377 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8378 flags |= MGMT_ADV_FLAG_TX_POWER;
8380 if (ext_adv_capable(hdev)) {
8381 flags |= MGMT_ADV_FLAG_SEC_1M;
8382 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8383 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8385 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8386 flags |= MGMT_ADV_FLAG_SEC_2M;
8388 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8389 flags |= MGMT_ADV_FLAG_SEC_CODED;
8395 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8396 void *data, u16 data_len)
8398 struct mgmt_rp_read_adv_features *rp;
8401 struct adv_info *adv_instance;
8402 u32 supported_flags;
8405 bt_dev_dbg(hdev, "sock %p", sk);
8407 if (!lmp_le_capable(hdev))
8408 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8409 MGMT_STATUS_REJECTED);
8413 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8414 rp = kmalloc(rp_len, GFP_ATOMIC);
8416 hci_dev_unlock(hdev);
8420 supported_flags = get_supported_adv_flags(hdev);
8422 rp->supported_flags = cpu_to_le32(supported_flags);
8423 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8424 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8425 rp->max_instances = hdev->le_num_of_adv_sets;
8426 rp->num_instances = hdev->adv_instance_cnt;
8428 instance = rp->instance;
8429 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8430 /* Only instances 1-le_num_of_adv_sets are externally visible */
8431 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8432 *instance = adv_instance->instance;
8435 rp->num_instances--;
8440 hci_dev_unlock(hdev);
8442 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8443 MGMT_STATUS_SUCCESS, rp, rp_len);
8450 static u8 calculate_name_len(struct hci_dev *hdev)
8452 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8454 return eir_append_local_name(hdev, buf, 0);
8457 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8460 u8 max_len = HCI_MAX_AD_LENGTH;
8463 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8464 MGMT_ADV_FLAG_LIMITED_DISCOV |
8465 MGMT_ADV_FLAG_MANAGED_FLAGS))
8468 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8471 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8472 max_len -= calculate_name_len(hdev);
8474 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8481 static bool flags_managed(u32 adv_flags)
8483 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8484 MGMT_ADV_FLAG_LIMITED_DISCOV |
8485 MGMT_ADV_FLAG_MANAGED_FLAGS);
8488 static bool tx_power_managed(u32 adv_flags)
8490 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8493 static bool name_managed(u32 adv_flags)
8495 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8498 static bool appearance_managed(u32 adv_flags)
8500 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8503 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8504 u8 len, bool is_adv_data)
8509 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8514 /* Make sure that the data is correctly formatted. */
8515 for (i = 0; i < len; i += (cur_len + 1)) {
8521 if (data[i + 1] == EIR_FLAGS &&
8522 (!is_adv_data || flags_managed(adv_flags)))
8525 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8528 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8531 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8534 if (data[i + 1] == EIR_APPEARANCE &&
8535 appearance_managed(adv_flags))
8538 /* If the current field length would exceed the total data
8539 * length, then it's invalid.
8541 if (i + cur_len >= len)
8548 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8550 u32 supported_flags, phy_flags;
8552 /* The current implementation only supports a subset of the specified
8553 * flags. Also need to check mutual exclusiveness of sec flags.
8555 supported_flags = get_supported_adv_flags(hdev);
8556 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8557 if (adv_flags & ~supported_flags ||
8558 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8564 static bool adv_busy(struct hci_dev *hdev)
8566 return pending_find(MGMT_OP_SET_LE, hdev);
8569 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8572 struct adv_info *adv, *n;
8574 bt_dev_dbg(hdev, "err %d", err);
8578 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8585 adv->pending = false;
8589 instance = adv->instance;
8591 if (hdev->cur_adv_instance == instance)
8592 cancel_adv_timeout(hdev);
8594 hci_remove_adv_instance(hdev, instance);
8595 mgmt_advertising_removed(sk, hdev, instance);
8598 hci_dev_unlock(hdev);
8601 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8603 struct mgmt_pending_cmd *cmd = data;
8604 struct mgmt_cp_add_advertising *cp = cmd->param;
8605 struct mgmt_rp_add_advertising rp;
8607 memset(&rp, 0, sizeof(rp));
8609 rp.instance = cp->instance;
8612 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8615 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8616 mgmt_status(err), &rp, sizeof(rp));
8618 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8620 mgmt_pending_free(cmd);
8623 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8625 struct mgmt_pending_cmd *cmd = data;
8626 struct mgmt_cp_add_advertising *cp = cmd->param;
8628 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8631 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8632 void *data, u16 data_len)
8634 struct mgmt_cp_add_advertising *cp = data;
8635 struct mgmt_rp_add_advertising rp;
8638 u16 timeout, duration;
8639 unsigned int prev_instance_cnt;
8640 u8 schedule_instance = 0;
8641 struct adv_info *adv, *next_instance;
8643 struct mgmt_pending_cmd *cmd;
8645 bt_dev_dbg(hdev, "sock %p", sk);
8647 status = mgmt_le_support(hdev);
8649 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8652 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8653 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8654 MGMT_STATUS_INVALID_PARAMS);
8656 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8657 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8658 MGMT_STATUS_INVALID_PARAMS);
8660 flags = __le32_to_cpu(cp->flags);
8661 timeout = __le16_to_cpu(cp->timeout);
8662 duration = __le16_to_cpu(cp->duration);
8664 if (!requested_adv_flags_are_valid(hdev, flags))
8665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8666 MGMT_STATUS_INVALID_PARAMS);
8670 if (timeout && !hdev_is_powered(hdev)) {
8671 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8672 MGMT_STATUS_REJECTED);
8676 if (adv_busy(hdev)) {
8677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8682 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8683 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8684 cp->scan_rsp_len, false)) {
8685 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8686 MGMT_STATUS_INVALID_PARAMS);
8690 prev_instance_cnt = hdev->adv_instance_cnt;
8692 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8693 cp->adv_data_len, cp->data,
8695 cp->data + cp->adv_data_len,
8697 HCI_ADV_TX_POWER_NO_PREFERENCE,
8698 hdev->le_adv_min_interval,
8699 hdev->le_adv_max_interval, 0);
8701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8702 MGMT_STATUS_FAILED);
8706 /* Only trigger an advertising added event if a new instance was
8709 if (hdev->adv_instance_cnt > prev_instance_cnt)
8710 mgmt_advertising_added(sk, hdev, cp->instance);
8712 if (hdev->cur_adv_instance == cp->instance) {
8713 /* If the currently advertised instance is being changed then
8714 * cancel the current advertising and schedule the next
8715 * instance. If there is only one instance then the overridden
8716 * advertising data will be visible right away.
8718 cancel_adv_timeout(hdev);
8720 next_instance = hci_get_next_instance(hdev, cp->instance);
8722 schedule_instance = next_instance->instance;
8723 } else if (!hdev->adv_instance_timeout) {
8724 /* Immediately advertise the new instance if no other
8725 * instance is currently being advertised.
8727 schedule_instance = cp->instance;
8730 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8731 * there is no instance to be advertised then we have no HCI
8732 * communication to make. Simply return.
8734 if (!hdev_is_powered(hdev) ||
8735 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8736 !schedule_instance) {
8737 rp.instance = cp->instance;
8738 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8739 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8743 /* We're good to go, update advertising data, parameters, and start
8746 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8753 cp->instance = schedule_instance;
8755 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8756 add_advertising_complete);
8758 mgmt_pending_free(cmd);
8761 hci_dev_unlock(hdev);
8766 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8769 struct mgmt_pending_cmd *cmd = data;
8770 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8771 struct mgmt_rp_add_ext_adv_params rp;
8772 struct adv_info *adv;
8775 BT_DBG("%s", hdev->name);
8779 adv = hci_find_adv_instance(hdev, cp->instance);
8783 rp.instance = cp->instance;
8784 rp.tx_power = adv->tx_power;
8786 /* While we're at it, inform userspace of the available space for this
8787 * advertisement, given the flags that will be used.
8789 flags = __le32_to_cpu(cp->flags);
8790 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8791 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8794 /* If this advertisement was previously advertising and we
8795 * failed to update it, we signal that it has been removed and
8796 * delete its structure
8799 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8801 hci_remove_adv_instance(hdev, cp->instance);
8803 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8806 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8807 mgmt_status(err), &rp, sizeof(rp));
8812 mgmt_pending_free(cmd);
8814 hci_dev_unlock(hdev);
8817 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8819 struct mgmt_pending_cmd *cmd = data;
8820 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8822 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8825 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8826 void *data, u16 data_len)
8828 struct mgmt_cp_add_ext_adv_params *cp = data;
8829 struct mgmt_rp_add_ext_adv_params rp;
8830 struct mgmt_pending_cmd *cmd = NULL;
8831 struct adv_info *adv;
8832 u32 flags, min_interval, max_interval;
8833 u16 timeout, duration;
8838 BT_DBG("%s", hdev->name);
8840 status = mgmt_le_support(hdev);
8842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8845 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8847 MGMT_STATUS_INVALID_PARAMS);
8849 /* The purpose of breaking add_advertising into two separate MGMT calls
8850 * for params and data is to allow more parameters to be added to this
8851 * structure in the future. For this reason, we verify that we have the
8852 * bare minimum structure we know of when the interface was defined. Any
8853 * extra parameters we don't know about will be ignored in this request.
8855 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8856 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8857 MGMT_STATUS_INVALID_PARAMS);
8859 flags = __le32_to_cpu(cp->flags);
8861 if (!requested_adv_flags_are_valid(hdev, flags))
8862 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8863 MGMT_STATUS_INVALID_PARAMS);
8867 /* In new interface, we require that we are powered to register */
8868 if (!hdev_is_powered(hdev)) {
8869 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8870 MGMT_STATUS_REJECTED);
8874 if (adv_busy(hdev)) {
8875 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8880 /* Parse defined parameters from request, use defaults otherwise */
8881 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8882 __le16_to_cpu(cp->timeout) : 0;
8884 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8885 __le16_to_cpu(cp->duration) :
8886 hdev->def_multi_adv_rotation_duration;
8888 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8889 __le32_to_cpu(cp->min_interval) :
8890 hdev->le_adv_min_interval;
8892 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8893 __le32_to_cpu(cp->max_interval) :
8894 hdev->le_adv_max_interval;
8896 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8898 HCI_ADV_TX_POWER_NO_PREFERENCE;
8900 /* Create advertising instance with no advertising or response data */
8901 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8902 timeout, duration, tx_power, min_interval,
8906 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8907 MGMT_STATUS_FAILED);
8911 /* Submit request for advertising params if ext adv available */
8912 if (ext_adv_capable(hdev)) {
8913 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8917 hci_remove_adv_instance(hdev, cp->instance);
8921 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8922 add_ext_adv_params_complete);
8924 mgmt_pending_free(cmd);
8926 rp.instance = cp->instance;
8927 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8928 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8929 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8930 err = mgmt_cmd_complete(sk, hdev->id,
8931 MGMT_OP_ADD_EXT_ADV_PARAMS,
8932 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8936 hci_dev_unlock(hdev);
8941 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8943 struct mgmt_pending_cmd *cmd = data;
8944 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8945 struct mgmt_rp_add_advertising rp;
8947 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8949 memset(&rp, 0, sizeof(rp));
8951 rp.instance = cp->instance;
8954 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8957 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8958 mgmt_status(err), &rp, sizeof(rp));
8960 mgmt_pending_free(cmd);
8963 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8965 struct mgmt_pending_cmd *cmd = data;
8966 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8969 if (ext_adv_capable(hdev)) {
8970 err = hci_update_adv_data_sync(hdev, cp->instance);
8974 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8978 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8981 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8984 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8987 struct mgmt_cp_add_ext_adv_data *cp = data;
8988 struct mgmt_rp_add_ext_adv_data rp;
8989 u8 schedule_instance = 0;
8990 struct adv_info *next_instance;
8991 struct adv_info *adv_instance;
8993 struct mgmt_pending_cmd *cmd;
8995 BT_DBG("%s", hdev->name);
8999 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9001 if (!adv_instance) {
9002 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9003 MGMT_STATUS_INVALID_PARAMS);
9007 /* In new interface, we require that we are powered to register */
9008 if (!hdev_is_powered(hdev)) {
9009 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9010 MGMT_STATUS_REJECTED);
9011 goto clear_new_instance;
9014 if (adv_busy(hdev)) {
9015 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9017 goto clear_new_instance;
9020 /* Validate new data */
9021 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9022 cp->adv_data_len, true) ||
9023 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9024 cp->adv_data_len, cp->scan_rsp_len, false)) {
9025 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9026 MGMT_STATUS_INVALID_PARAMS);
9027 goto clear_new_instance;
9030 /* Set the data in the advertising instance */
9031 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9032 cp->data, cp->scan_rsp_len,
9033 cp->data + cp->adv_data_len);
9035 /* If using software rotation, determine next instance to use */
9036 if (hdev->cur_adv_instance == cp->instance) {
9037 /* If the currently advertised instance is being changed
9038 * then cancel the current advertising and schedule the
9039 * next instance. If there is only one instance then the
9040 * overridden advertising data will be visible right
9043 cancel_adv_timeout(hdev);
9045 next_instance = hci_get_next_instance(hdev, cp->instance);
9047 schedule_instance = next_instance->instance;
9048 } else if (!hdev->adv_instance_timeout) {
9049 /* Immediately advertise the new instance if no other
9050 * instance is currently being advertised.
9052 schedule_instance = cp->instance;
9055 /* If the HCI_ADVERTISING flag is set or there is no instance to
9056 * be advertised then we have no HCI communication to make.
9059 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9060 if (adv_instance->pending) {
9061 mgmt_advertising_added(sk, hdev, cp->instance);
9062 adv_instance->pending = false;
9064 rp.instance = cp->instance;
9065 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9066 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9070 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9074 goto clear_new_instance;
9077 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9078 add_ext_adv_data_complete);
9080 mgmt_pending_free(cmd);
9081 goto clear_new_instance;
9084 /* We were successful in updating data, so trigger advertising_added
9085 * event if this is an instance that wasn't previously advertising. If
9086 * a failure occurs in the requests we initiated, we will remove the
9087 * instance again in add_advertising_complete
9089 if (adv_instance->pending)
9090 mgmt_advertising_added(sk, hdev, cp->instance);
9095 hci_remove_adv_instance(hdev, cp->instance);
9098 hci_dev_unlock(hdev);
9103 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9106 struct mgmt_pending_cmd *cmd = data;
9107 struct mgmt_cp_remove_advertising *cp = cmd->param;
9108 struct mgmt_rp_remove_advertising rp;
9110 bt_dev_dbg(hdev, "err %d", err);
9112 memset(&rp, 0, sizeof(rp));
9113 rp.instance = cp->instance;
9116 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9119 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9120 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9122 mgmt_pending_free(cmd);
9125 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9127 struct mgmt_pending_cmd *cmd = data;
9128 struct mgmt_cp_remove_advertising *cp = cmd->param;
9131 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9135 if (list_empty(&hdev->adv_instances))
9136 err = hci_disable_advertising_sync(hdev);
9141 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9142 void *data, u16 data_len)
9144 struct mgmt_cp_remove_advertising *cp = data;
9145 struct mgmt_pending_cmd *cmd;
9148 bt_dev_dbg(hdev, "sock %p", sk);
9152 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9153 err = mgmt_cmd_status(sk, hdev->id,
9154 MGMT_OP_REMOVE_ADVERTISING,
9155 MGMT_STATUS_INVALID_PARAMS);
9159 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9160 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9165 if (list_empty(&hdev->adv_instances)) {
9166 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9167 MGMT_STATUS_INVALID_PARAMS);
9171 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9178 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9179 remove_advertising_complete);
9181 mgmt_pending_free(cmd);
9184 hci_dev_unlock(hdev);
9189 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9190 void *data, u16 data_len)
9192 struct mgmt_cp_get_adv_size_info *cp = data;
9193 struct mgmt_rp_get_adv_size_info rp;
9194 u32 flags, supported_flags;
9196 bt_dev_dbg(hdev, "sock %p", sk);
9198 if (!lmp_le_capable(hdev))
9199 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9200 MGMT_STATUS_REJECTED);
9202 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9203 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9204 MGMT_STATUS_INVALID_PARAMS);
9206 flags = __le32_to_cpu(cp->flags);
9208 /* The current implementation only supports a subset of the specified
9211 supported_flags = get_supported_adv_flags(hdev);
9212 if (flags & ~supported_flags)
9213 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9214 MGMT_STATUS_INVALID_PARAMS);
9216 rp.instance = cp->instance;
9217 rp.flags = cp->flags;
9218 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9219 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9221 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9222 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9225 static const struct hci_mgmt_handler mgmt_handlers[] = {
9226 { NULL }, /* 0x0000 (no command) */
9227 { read_version, MGMT_READ_VERSION_SIZE,
9229 HCI_MGMT_UNTRUSTED },
9230 { read_commands, MGMT_READ_COMMANDS_SIZE,
9232 HCI_MGMT_UNTRUSTED },
9233 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9235 HCI_MGMT_UNTRUSTED },
9236 { read_controller_info, MGMT_READ_INFO_SIZE,
9237 HCI_MGMT_UNTRUSTED },
9238 { set_powered, MGMT_SETTING_SIZE },
9239 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9240 { set_connectable, MGMT_SETTING_SIZE },
9241 { set_fast_connectable, MGMT_SETTING_SIZE },
9242 { set_bondable, MGMT_SETTING_SIZE },
9243 { set_link_security, MGMT_SETTING_SIZE },
9244 { set_ssp, MGMT_SETTING_SIZE },
9245 { set_hs, MGMT_SETTING_SIZE },
9246 { set_le, MGMT_SETTING_SIZE },
9247 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9248 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9249 { add_uuid, MGMT_ADD_UUID_SIZE },
9250 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9251 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9253 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9255 { disconnect, MGMT_DISCONNECT_SIZE },
9256 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9257 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9258 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9259 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9260 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9261 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9262 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9263 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9264 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9265 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9266 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9267 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9268 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9270 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9271 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9272 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9273 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9274 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9275 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9276 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9277 { set_advertising, MGMT_SETTING_SIZE },
9278 { set_bredr, MGMT_SETTING_SIZE },
9279 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9280 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9281 { set_secure_conn, MGMT_SETTING_SIZE },
9282 { set_debug_keys, MGMT_SETTING_SIZE },
9283 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9284 { load_irks, MGMT_LOAD_IRKS_SIZE,
9286 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9287 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9288 { add_device, MGMT_ADD_DEVICE_SIZE },
9289 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9290 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9292 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9294 HCI_MGMT_UNTRUSTED },
9295 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9296 HCI_MGMT_UNCONFIGURED |
9297 HCI_MGMT_UNTRUSTED },
9298 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9299 HCI_MGMT_UNCONFIGURED },
9300 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9301 HCI_MGMT_UNCONFIGURED },
9302 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9304 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9305 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9307 HCI_MGMT_UNTRUSTED },
9308 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9309 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9311 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9312 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9313 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9314 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9315 HCI_MGMT_UNTRUSTED },
9316 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9317 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9318 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9319 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9321 { set_wideband_speech, MGMT_SETTING_SIZE },
9322 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9323 HCI_MGMT_UNTRUSTED },
9324 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9325 HCI_MGMT_UNTRUSTED |
9326 HCI_MGMT_HDEV_OPTIONAL },
9327 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9329 HCI_MGMT_HDEV_OPTIONAL },
9330 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9331 HCI_MGMT_UNTRUSTED },
9332 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9334 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9335 HCI_MGMT_UNTRUSTED },
9336 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9338 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9339 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9340 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9341 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9343 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9344 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9346 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9348 { add_adv_patterns_monitor_rssi,
9349 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9351 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9353 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9354 { mesh_send, MGMT_MESH_SEND_SIZE,
9356 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9359 void mgmt_index_added(struct hci_dev *hdev)
9361 struct mgmt_ev_ext_index ev;
9363 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9366 switch (hdev->dev_type) {
9368 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9369 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9370 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9373 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9374 HCI_MGMT_INDEX_EVENTS);
9387 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9388 HCI_MGMT_EXT_INDEX_EVENTS);
9391 void mgmt_index_removed(struct hci_dev *hdev)
9393 struct mgmt_ev_ext_index ev;
9394 u8 status = MGMT_STATUS_INVALID_INDEX;
9396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9399 switch (hdev->dev_type) {
9401 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9403 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9404 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9405 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9408 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9409 HCI_MGMT_INDEX_EVENTS);
9422 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9423 HCI_MGMT_EXT_INDEX_EVENTS);
9425 /* Cancel any remaining timed work */
9426 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9428 cancel_delayed_work_sync(&hdev->discov_off);
9429 cancel_delayed_work_sync(&hdev->service_cache);
9430 cancel_delayed_work_sync(&hdev->rpa_expired);
9433 void mgmt_power_on(struct hci_dev *hdev, int err)
9435 struct cmd_lookup match = { NULL, hdev };
9437 bt_dev_dbg(hdev, "err %d", err);
9442 restart_le_actions(hdev);
9443 hci_update_passive_scan(hdev);
9446 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9448 new_settings(hdev, match.sk);
9453 hci_dev_unlock(hdev);
9456 void __mgmt_power_off(struct hci_dev *hdev)
9458 struct cmd_lookup match = { NULL, hdev };
9459 u8 status, zero_cod[] = { 0, 0, 0 };
9461 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9463 /* If the power off is because of hdev unregistration let
9464 * use the appropriate INVALID_INDEX status. Otherwise use
9465 * NOT_POWERED. We cover both scenarios here since later in
9466 * mgmt_index_removed() any hci_conn callbacks will have already
9467 * been triggered, potentially causing misleading DISCONNECTED
9470 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9471 status = MGMT_STATUS_INVALID_INDEX;
9473 status = MGMT_STATUS_NOT_POWERED;
9475 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9477 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9478 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9479 zero_cod, sizeof(zero_cod),
9480 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9481 ext_info_changed(hdev, NULL);
9484 new_settings(hdev, match.sk);
9490 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9492 struct mgmt_pending_cmd *cmd;
9495 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9499 if (err == -ERFKILL)
9500 status = MGMT_STATUS_RFKILLED;
9502 status = MGMT_STATUS_FAILED;
9504 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9506 mgmt_pending_remove(cmd);
9509 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9512 struct mgmt_ev_new_link_key ev;
9514 memset(&ev, 0, sizeof(ev));
9516 ev.store_hint = persistent;
9517 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9518 ev.key.addr.type = BDADDR_BREDR;
9519 ev.key.type = key->type;
9520 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9521 ev.key.pin_len = key->pin_len;
9523 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9526 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9528 switch (ltk->type) {
9530 case SMP_LTK_RESPONDER:
9531 if (ltk->authenticated)
9532 return MGMT_LTK_AUTHENTICATED;
9533 return MGMT_LTK_UNAUTHENTICATED;
9535 if (ltk->authenticated)
9536 return MGMT_LTK_P256_AUTH;
9537 return MGMT_LTK_P256_UNAUTH;
9538 case SMP_LTK_P256_DEBUG:
9539 return MGMT_LTK_P256_DEBUG;
9542 return MGMT_LTK_UNAUTHENTICATED;
9545 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9547 struct mgmt_ev_new_long_term_key ev;
9549 memset(&ev, 0, sizeof(ev));
9551 /* Devices using resolvable or non-resolvable random addresses
9552 * without providing an identity resolving key don't require
9553 * to store long term keys. Their addresses will change the
9556 * Only when a remote device provides an identity address
9557 * make sure the long term key is stored. If the remote
9558 * identity is known, the long term keys are internally
9559 * mapped to the identity address. So allow static random
9560 * and public addresses here.
9562 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9563 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9564 ev.store_hint = 0x00;
9566 ev.store_hint = persistent;
9568 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9569 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9570 ev.key.type = mgmt_ltk_type(key);
9571 ev.key.enc_size = key->enc_size;
9572 ev.key.ediv = key->ediv;
9573 ev.key.rand = key->rand;
9575 if (key->type == SMP_LTK)
9576 ev.key.initiator = 1;
9578 /* Make sure we copy only the significant bytes based on the
9579 * encryption key size, and set the rest of the value to zeroes.
9581 memcpy(ev.key.val, key->val, key->enc_size);
9582 memset(ev.key.val + key->enc_size, 0,
9583 sizeof(ev.key.val) - key->enc_size);
9585 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9588 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9590 struct mgmt_ev_new_irk ev;
9592 memset(&ev, 0, sizeof(ev));
9594 ev.store_hint = persistent;
9596 bacpy(&ev.rpa, &irk->rpa);
9597 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9598 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9599 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9601 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9604 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9607 struct mgmt_ev_new_csrk ev;
9609 memset(&ev, 0, sizeof(ev));
9611 /* Devices using resolvable or non-resolvable random addresses
9612 * without providing an identity resolving key don't require
9613 * to store signature resolving keys. Their addresses will change
9614 * the next time around.
9616 * Only when a remote device provides an identity address
9617 * make sure the signature resolving key is stored. So allow
9618 * static random and public addresses here.
9620 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9621 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9622 ev.store_hint = 0x00;
9624 ev.store_hint = persistent;
9626 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9627 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9628 ev.key.type = csrk->type;
9629 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9631 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9634 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9635 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9636 u16 max_interval, u16 latency, u16 timeout)
9638 struct mgmt_ev_new_conn_param ev;
9640 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9643 memset(&ev, 0, sizeof(ev));
9644 bacpy(&ev.addr.bdaddr, bdaddr);
9645 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9646 ev.store_hint = store_hint;
9647 ev.min_interval = cpu_to_le16(min_interval);
9648 ev.max_interval = cpu_to_le16(max_interval);
9649 ev.latency = cpu_to_le16(latency);
9650 ev.timeout = cpu_to_le16(timeout);
9652 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9655 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9656 u8 *name, u8 name_len)
9658 struct sk_buff *skb;
9659 struct mgmt_ev_device_connected *ev;
9663 /* allocate buff for LE or BR/EDR adv */
9664 if (conn->le_adv_data_len > 0)
9665 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9666 sizeof(*ev) + conn->le_adv_data_len);
9668 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9669 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9670 eir_precalc_len(sizeof(conn->dev_class)));
9672 ev = skb_put(skb, sizeof(*ev));
9673 bacpy(&ev->addr.bdaddr, &conn->dst);
9674 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9677 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9679 ev->flags = __cpu_to_le32(flags);
9681 /* We must ensure that the EIR Data fields are ordered and
9682 * unique. Keep it simple for now and avoid the problem by not
9683 * adding any BR/EDR data to the LE adv.
9685 if (conn->le_adv_data_len > 0) {
9686 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9687 eir_len = conn->le_adv_data_len;
9690 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9692 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9693 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9694 conn->dev_class, sizeof(conn->dev_class));
9697 ev->eir_len = cpu_to_le16(eir_len);
9699 mgmt_event_skb(skb, NULL);
9702 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9704 struct sock **sk = data;
9706 cmd->cmd_complete(cmd, 0);
9711 mgmt_pending_remove(cmd);
9714 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9716 struct hci_dev *hdev = data;
9717 struct mgmt_cp_unpair_device *cp = cmd->param;
9719 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9721 cmd->cmd_complete(cmd, 0);
9722 mgmt_pending_remove(cmd);
9725 bool mgmt_powering_down(struct hci_dev *hdev)
9727 struct mgmt_pending_cmd *cmd;
9728 struct mgmt_mode *cp;
9730 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9741 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9742 u8 link_type, u8 addr_type, u8 reason,
9743 bool mgmt_connected)
9745 struct mgmt_ev_device_disconnected ev;
9746 struct sock *sk = NULL;
9748 /* The connection is still in hci_conn_hash so test for 1
9749 * instead of 0 to know if this is the last one.
9751 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9752 cancel_delayed_work(&hdev->power_off);
9753 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9756 if (!mgmt_connected)
9759 if (link_type != ACL_LINK && link_type != LE_LINK)
9762 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9764 bacpy(&ev.addr.bdaddr, bdaddr);
9765 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9768 /* Report disconnects due to suspend */
9769 if (hdev->suspended)
9770 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9772 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9777 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9781 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9782 u8 link_type, u8 addr_type, u8 status)
9784 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9785 struct mgmt_cp_disconnect *cp;
9786 struct mgmt_pending_cmd *cmd;
9788 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9791 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9797 if (bacmp(bdaddr, &cp->addr.bdaddr))
9800 if (cp->addr.type != bdaddr_type)
9803 cmd->cmd_complete(cmd, mgmt_status(status));
9804 mgmt_pending_remove(cmd);
9807 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9808 u8 addr_type, u8 status)
9810 struct mgmt_ev_connect_failed ev;
9812 /* The connection is still in hci_conn_hash so test for 1
9813 * instead of 0 to know if this is the last one.
9815 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9816 cancel_delayed_work(&hdev->power_off);
9817 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9820 bacpy(&ev.addr.bdaddr, bdaddr);
9821 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9822 ev.status = mgmt_status(status);
9824 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9827 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9829 struct mgmt_ev_pin_code_request ev;
9831 bacpy(&ev.addr.bdaddr, bdaddr);
9832 ev.addr.type = BDADDR_BREDR;
9835 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9838 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9841 struct mgmt_pending_cmd *cmd;
9843 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9847 cmd->cmd_complete(cmd, mgmt_status(status));
9848 mgmt_pending_remove(cmd);
9851 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854 struct mgmt_pending_cmd *cmd;
9856 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9860 cmd->cmd_complete(cmd, mgmt_status(status));
9861 mgmt_pending_remove(cmd);
9864 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9865 u8 link_type, u8 addr_type, u32 value,
9868 struct mgmt_ev_user_confirm_request ev;
9870 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9872 bacpy(&ev.addr.bdaddr, bdaddr);
9873 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9874 ev.confirm_hint = confirm_hint;
9875 ev.value = cpu_to_le32(value);
9877 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9881 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9882 u8 link_type, u8 addr_type)
9884 struct mgmt_ev_user_passkey_request ev;
9886 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9888 bacpy(&ev.addr.bdaddr, bdaddr);
9889 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9891 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9895 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9896 u8 link_type, u8 addr_type, u8 status,
9899 struct mgmt_pending_cmd *cmd;
9901 cmd = pending_find(opcode, hdev);
9905 cmd->cmd_complete(cmd, mgmt_status(status));
9906 mgmt_pending_remove(cmd);
9911 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9912 u8 link_type, u8 addr_type, u8 status)
9914 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9915 status, MGMT_OP_USER_CONFIRM_REPLY);
9918 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9919 u8 link_type, u8 addr_type, u8 status)
9921 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9923 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9926 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9927 u8 link_type, u8 addr_type, u8 status)
9929 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9930 status, MGMT_OP_USER_PASSKEY_REPLY);
9933 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9934 u8 link_type, u8 addr_type, u8 status)
9936 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9938 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9941 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9942 u8 link_type, u8 addr_type, u32 passkey,
9945 struct mgmt_ev_passkey_notify ev;
9947 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9949 bacpy(&ev.addr.bdaddr, bdaddr);
9950 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9951 ev.passkey = __cpu_to_le32(passkey);
9952 ev.entered = entered;
9954 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9957 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9959 struct mgmt_ev_auth_failed ev;
9960 struct mgmt_pending_cmd *cmd;
9961 u8 status = mgmt_status(hci_status);
9963 bacpy(&ev.addr.bdaddr, &conn->dst);
9964 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9967 cmd = find_pairing(conn);
9969 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9970 cmd ? cmd->sk : NULL);
9973 cmd->cmd_complete(cmd, status);
9974 mgmt_pending_remove(cmd);
9978 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9980 struct cmd_lookup match = { NULL, hdev };
9984 u8 mgmt_err = mgmt_status(status);
9985 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9986 cmd_status_rsp, &mgmt_err);
9990 if (test_bit(HCI_AUTH, &hdev->flags))
9991 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9993 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9995 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9999 new_settings(hdev, match.sk);
10002 sock_put(match.sk);
10005 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10007 struct cmd_lookup *match = data;
10009 if (match->sk == NULL) {
10010 match->sk = cmd->sk;
10011 sock_hold(match->sk);
10015 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10018 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10020 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10021 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10022 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10025 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10026 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10027 ext_info_changed(hdev, NULL);
10031 sock_put(match.sk);
10034 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10036 struct mgmt_cp_set_local_name ev;
10037 struct mgmt_pending_cmd *cmd;
10042 memset(&ev, 0, sizeof(ev));
10043 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10044 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10046 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10048 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10050 /* If this is a HCI command related to powering on the
10051 * HCI dev don't send any mgmt signals.
10053 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10057 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10058 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10059 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10062 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10066 for (i = 0; i < uuid_count; i++) {
10067 if (!memcmp(uuid, uuids[i], 16))
10074 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10078 while (parsed < eir_len) {
10079 u8 field_len = eir[0];
10083 if (field_len == 0)
10086 if (eir_len - parsed < field_len + 1)
10090 case EIR_UUID16_ALL:
10091 case EIR_UUID16_SOME:
10092 for (i = 0; i + 3 <= field_len; i += 2) {
10093 memcpy(uuid, bluetooth_base_uuid, 16);
10094 uuid[13] = eir[i + 3];
10095 uuid[12] = eir[i + 2];
10096 if (has_uuid(uuid, uuid_count, uuids))
10100 case EIR_UUID32_ALL:
10101 case EIR_UUID32_SOME:
10102 for (i = 0; i + 5 <= field_len; i += 4) {
10103 memcpy(uuid, bluetooth_base_uuid, 16);
10104 uuid[15] = eir[i + 5];
10105 uuid[14] = eir[i + 4];
10106 uuid[13] = eir[i + 3];
10107 uuid[12] = eir[i + 2];
10108 if (has_uuid(uuid, uuid_count, uuids))
10112 case EIR_UUID128_ALL:
10113 case EIR_UUID128_SOME:
10114 for (i = 0; i + 17 <= field_len; i += 16) {
10115 memcpy(uuid, eir + i + 2, 16);
10116 if (has_uuid(uuid, uuid_count, uuids))
10122 parsed += field_len + 1;
10123 eir += field_len + 1;
10129 static void restart_le_scan(struct hci_dev *hdev)
10131 /* If controller is not scanning we are done. */
10132 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10135 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10136 hdev->discovery.scan_start +
10137 hdev->discovery.scan_duration))
10140 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10141 DISCOV_LE_RESTART_DELAY);
10144 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10145 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10147 /* If a RSSI threshold has been specified, and
10148 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10149 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10150 * is set, let it through for further processing, as we might need to
10151 * restart the scan.
10153 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10154 * the results are also dropped.
10156 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10157 (rssi == HCI_RSSI_INVALID ||
10158 (rssi < hdev->discovery.rssi &&
10159 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10162 if (hdev->discovery.uuid_count != 0) {
10163 /* If a list of UUIDs is provided in filter, results with no
10164 * matching UUID should be dropped.
10166 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10167 hdev->discovery.uuids) &&
10168 !eir_has_uuids(scan_rsp, scan_rsp_len,
10169 hdev->discovery.uuid_count,
10170 hdev->discovery.uuids))
10174 /* If duplicate filtering does not report RSSI changes, then restart
10175 * scanning to ensure updated result with updated RSSI values.
10177 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10178 restart_le_scan(hdev);
10180 /* Validate RSSI value against the RSSI threshold once more. */
10181 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10182 rssi < hdev->discovery.rssi)
10189 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10190 bdaddr_t *bdaddr, u8 addr_type)
10192 struct mgmt_ev_adv_monitor_device_lost ev;
10194 ev.monitor_handle = cpu_to_le16(handle);
10195 bacpy(&ev.addr.bdaddr, bdaddr);
10196 ev.addr.type = addr_type;
10198 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10202 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10203 struct sk_buff *skb,
10204 struct sock *skip_sk,
10207 struct sk_buff *advmon_skb;
10208 size_t advmon_skb_len;
10209 __le16 *monitor_handle;
10214 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10215 sizeof(struct mgmt_ev_device_found)) + skb->len;
10216 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10221 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10222 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10223 * store monitor_handle of the matched monitor.
10225 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10226 *monitor_handle = cpu_to_le16(handle);
10227 skb_put_data(advmon_skb, skb->data, skb->len);
10229 mgmt_event_skb(advmon_skb, skip_sk);
10232 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10233 bdaddr_t *bdaddr, bool report_device,
10234 struct sk_buff *skb,
10235 struct sock *skip_sk)
10237 struct monitored_device *dev, *tmp;
10238 bool matched = false;
10239 bool notified = false;
10241 /* We have received the Advertisement Report because:
10242 * 1. the kernel has initiated active discovery
10243 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10245 * 3. if none of the above is true, we have one or more active
10246 * Advertisement Monitor
10248 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10249 * and report ONLY one advertisement per device for the matched Monitor
10250 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10252 * For case 3, since we are not active scanning and all advertisements
10253 * received are due to a matched Advertisement Monitor, report all
10254 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10256 if (report_device && !hdev->advmon_pend_notify) {
10257 mgmt_event_skb(skb, skip_sk);
10261 hdev->advmon_pend_notify = false;
10263 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10264 if (!bacmp(&dev->bdaddr, bdaddr)) {
10267 if (!dev->notified) {
10268 mgmt_send_adv_monitor_device_found(hdev, skb,
10272 dev->notified = true;
10276 if (!dev->notified)
10277 hdev->advmon_pend_notify = true;
10280 if (!report_device &&
10281 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10282 /* Handle 0 indicates that we are not active scanning and this
10283 * is a subsequent advertisement report for an already matched
10284 * Advertisement Monitor or the controller offloading support
10285 * is not available.
10287 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10291 mgmt_event_skb(skb, skip_sk);
10296 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10297 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10298 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10301 struct sk_buff *skb;
10302 struct mgmt_ev_mesh_device_found *ev;
10305 if (!hdev->mesh_ad_types[0])
10308 /* Scan for requested AD types */
10310 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10311 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10312 if (!hdev->mesh_ad_types[j])
10315 if (hdev->mesh_ad_types[j] == eir[i + 1])
10321 if (scan_rsp_len > 0) {
10322 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10323 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10324 if (!hdev->mesh_ad_types[j])
10327 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10336 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10337 sizeof(*ev) + eir_len + scan_rsp_len);
10341 ev = skb_put(skb, sizeof(*ev));
10343 bacpy(&ev->addr.bdaddr, bdaddr);
10344 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10346 ev->flags = cpu_to_le32(flags);
10347 ev->instant = cpu_to_le64(instant);
10350 /* Copy EIR or advertising data into event */
10351 skb_put_data(skb, eir, eir_len);
10353 if (scan_rsp_len > 0)
10354 /* Append scan response data to event */
10355 skb_put_data(skb, scan_rsp, scan_rsp_len);
10357 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10359 mgmt_event_skb(skb, NULL);
10362 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10363 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10364 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10367 struct sk_buff *skb;
10368 struct mgmt_ev_device_found *ev;
10369 bool report_device = hci_discovery_active(hdev);
10371 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10372 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10373 eir, eir_len, scan_rsp, scan_rsp_len,
10376 /* Don't send events for a non-kernel initiated discovery. With
10377 * LE one exception is if we have pend_le_reports > 0 in which
10378 * case we're doing passive scanning and want these events.
10380 if (!hci_discovery_active(hdev)) {
10381 if (link_type == ACL_LINK)
10383 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10384 report_device = true;
10385 else if (!hci_is_adv_monitoring(hdev))
10389 if (hdev->discovery.result_filtering) {
10390 /* We are using service discovery */
10391 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10396 if (hdev->discovery.limited) {
10397 /* Check for limited discoverable bit */
10399 if (!(dev_class[1] & 0x20))
10402 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10403 if (!flags || !(flags[0] & LE_AD_LIMITED))
10408 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10409 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10410 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10414 ev = skb_put(skb, sizeof(*ev));
10416 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10417 * RSSI value was reported as 0 when not available. This behavior
10418 * is kept when using device discovery. This is required for full
10419 * backwards compatibility with the API.
10421 * However when using service discovery, the value 127 will be
10422 * returned when the RSSI is not available.
10424 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10425 link_type == ACL_LINK)
10428 bacpy(&ev->addr.bdaddr, bdaddr);
10429 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10431 ev->flags = cpu_to_le32(flags);
10434 /* Copy EIR or advertising data into event */
10435 skb_put_data(skb, eir, eir_len);
10437 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10440 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10442 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10445 if (scan_rsp_len > 0)
10446 /* Append scan response data to event */
10447 skb_put_data(skb, scan_rsp, scan_rsp_len);
10449 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10451 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10454 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10455 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10457 struct sk_buff *skb;
10458 struct mgmt_ev_device_found *ev;
10462 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10463 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10465 ev = skb_put(skb, sizeof(*ev));
10466 bacpy(&ev->addr.bdaddr, bdaddr);
10467 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10471 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10473 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10475 ev->eir_len = cpu_to_le16(eir_len);
10476 ev->flags = cpu_to_le32(flags);
10478 mgmt_event_skb(skb, NULL);
10481 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10483 struct mgmt_ev_discovering ev;
10485 bt_dev_dbg(hdev, "discovering %u", discovering);
10487 memset(&ev, 0, sizeof(ev));
10488 ev.type = hdev->discovery.type;
10489 ev.discovering = discovering;
10491 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10494 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10496 struct mgmt_ev_controller_suspend ev;
10498 ev.suspend_state = state;
10499 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10502 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10505 struct mgmt_ev_controller_resume ev;
10507 ev.wake_reason = reason;
10509 bacpy(&ev.addr.bdaddr, bdaddr);
10510 ev.addr.type = addr_type;
10512 memset(&ev.addr, 0, sizeof(ev.addr));
10515 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10518 static struct hci_mgmt_chan chan = {
10519 .channel = HCI_CHANNEL_CONTROL,
10520 .handler_count = ARRAY_SIZE(mgmt_handlers),
10521 .handlers = mgmt_handlers,
10522 .hdev_init = mgmt_init_hdev,
10525 int mgmt_init(void)
10527 return hci_mgmt_chan_register(&chan);
10530 void mgmt_exit(void)
10532 hci_mgmt_chan_unregister(&chan);
10535 void mgmt_cleanup(struct sock *sk)
10537 struct mgmt_mesh_tx *mesh_tx;
10538 struct hci_dev *hdev;
10540 read_lock(&hci_dev_list_lock);
10542 list_for_each_entry(hdev, &hci_dev_list, list) {
10544 mesh_tx = mgmt_mesh_next(hdev, sk);
10547 mesh_send_complete(hdev, mesh_tx, true);
10551 read_unlock(&hci_dev_list_lock);