2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
135 MGMT_OP_MESH_SEND_CANCEL,
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
154 MGMT_EV_DEVICE_FOUND,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
198 static const u16 mgmt_untrusted_events[] = {
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
286 static u8 mgmt_errno_status(int err)
290 return MGMT_STATUS_SUCCESS;
292 return MGMT_STATUS_REJECTED;
294 return MGMT_STATUS_INVALID_PARAMS;
296 return MGMT_STATUS_NOT_SUPPORTED;
298 return MGMT_STATUS_BUSY;
300 return MGMT_STATUS_AUTH_FAILED;
302 return MGMT_STATUS_NO_RESOURCES;
304 return MGMT_STATUS_ALREADY_CONNECTED;
306 return MGMT_STATUS_DISCONNECTED;
309 return MGMT_STATUS_FAILED;
312 static u8 mgmt_status(int err)
315 return mgmt_errno_status(err);
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
320 return MGMT_STATUS_FAILED;
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
350 static u8 le_addr_type(u8 mgmt_addr_type)
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
355 return ADDR_LE_DEV_RANDOM;
358 void mgmt_fill_version_info(void *ver)
360 struct mgmt_rp_read_version *rp = ver;
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
369 struct mgmt_rp_read_version rp;
371 bt_dev_dbg(hdev, "sock %p", sk);
373 mgmt_fill_version_info(&rp);
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
387 bt_dev_dbg(hdev, "sock %p", sk);
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
399 rp = kmalloc(rp_size, GFP_KERNEL);
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
415 __le16 *opcode = rp->opcodes;
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
434 struct mgmt_rp_read_index_list *rp;
440 bt_dev_dbg(hdev, "sock %p", sk);
442 read_lock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
454 read_unlock(&hci_dev_list_lock);
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
481 read_unlock(&hci_dev_list_lock);
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
494 struct mgmt_rp_read_unconf_index_list *rp;
500 bt_dev_dbg(hdev, "sock %p", sk);
502 read_lock(&hci_dev_list_lock);
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
514 read_unlock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
541 read_unlock(&hci_dev_list_lock);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
554 struct mgmt_rp_read_ext_index_list *rp;
559 bt_dev_dbg(hdev, "sock %p", sk);
561 read_lock(&hci_dev_list_lock);
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
571 read_unlock(&hci_dev_list_lock);
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
604 rp->num_controllers = cpu_to_le16(count);
606 read_unlock(&hci_dev_list_lock);
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
625 static bool is_configured(struct hci_dev *hdev)
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
639 static __le32 get_missing_options(struct hci_dev *hdev)
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
652 return cpu_to_le32(options);
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
657 __le32 options = get_missing_options(hdev);
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
665 __le32 options = get_missing_options(hdev);
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
674 struct mgmt_rp_read_config_info rp;
677 bt_dev_dbg(hdev, "sock %p", sk);
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
693 hci_dev_unlock(hdev);
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
699 static u32 get_supported_phys(struct hci_dev *hdev)
701 u32 supported_phys = 0;
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
748 return supported_phys;
751 static u32 get_selected_phys(struct hci_dev *hdev)
753 u32 selected_phys = 0;
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
811 return selected_phys;
814 static u32 get_configurable_phys(struct hci_dev *hdev)
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
820 static u32 get_supported_settings(struct hci_dev *hdev)
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
840 if (lmp_sc_capable(hdev))
841 settings |= MGMT_SETTING_SECURE_CONN;
843 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
845 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 if (lmp_le_capable(hdev)) {
849 settings |= MGMT_SETTING_LE;
850 settings |= MGMT_SETTING_SECURE_CONN;
851 settings |= MGMT_SETTING_PRIVACY;
852 settings |= MGMT_SETTING_STATIC_ADDRESS;
853 settings |= MGMT_SETTING_ADVERTISING;
856 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
858 settings |= MGMT_SETTING_CONFIGURATION;
860 if (cis_central_capable(hdev))
861 settings |= MGMT_SETTING_CIS_CENTRAL;
863 if (cis_peripheral_capable(hdev))
864 settings |= MGMT_SETTING_CIS_PERIPHERAL;
866 settings |= MGMT_SETTING_PHY_CONFIGURATION;
871 static u32 get_current_settings(struct hci_dev *hdev)
875 if (hdev_is_powered(hdev))
876 settings |= MGMT_SETTING_POWERED;
878 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
879 settings |= MGMT_SETTING_CONNECTABLE;
881 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
882 settings |= MGMT_SETTING_FAST_CONNECTABLE;
884 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 settings |= MGMT_SETTING_DISCOVERABLE;
887 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
888 settings |= MGMT_SETTING_BONDABLE;
890 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
891 settings |= MGMT_SETTING_BREDR;
893 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
894 settings |= MGMT_SETTING_LE;
896 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
897 settings |= MGMT_SETTING_LINK_SECURITY;
899 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
900 settings |= MGMT_SETTING_SSP;
902 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
903 settings |= MGMT_SETTING_ADVERTISING;
905 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
906 settings |= MGMT_SETTING_SECURE_CONN;
908 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
909 settings |= MGMT_SETTING_DEBUG_KEYS;
911 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
912 settings |= MGMT_SETTING_PRIVACY;
914 /* The current setting for static address has two purposes. The
915 * first is to indicate if the static address will be used and
916 * the second is to indicate if it is actually set.
918 * This means if the static address is not configured, this flag
919 * will never be set. If the address is configured, then if the
920 * address is actually used decides if the flag is set or not.
922 * For single mode LE only controllers and dual-mode controllers
923 * with BR/EDR disabled, the existence of the static address will
926 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
927 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
928 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
929 if (bacmp(&hdev->static_addr, BDADDR_ANY))
930 settings |= MGMT_SETTING_STATIC_ADDRESS;
933 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
934 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
936 if (cis_central_capable(hdev))
937 settings |= MGMT_SETTING_CIS_CENTRAL;
939 if (cis_peripheral_capable(hdev))
940 settings |= MGMT_SETTING_CIS_PERIPHERAL;
942 if (bis_capable(hdev))
943 settings |= MGMT_SETTING_ISO_BROADCASTER;
945 if (sync_recv_capable(hdev))
946 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
951 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
953 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
956 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
958 struct mgmt_pending_cmd *cmd;
960 /* If there's a pending mgmt command the flags will not yet have
961 * their final values, so check for this first.
963 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
965 struct mgmt_mode *cp = cmd->param;
967 return LE_AD_GENERAL;
968 else if (cp->val == 0x02)
969 return LE_AD_LIMITED;
971 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
972 return LE_AD_LIMITED;
973 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
974 return LE_AD_GENERAL;
980 bool mgmt_get_connectable(struct hci_dev *hdev)
982 struct mgmt_pending_cmd *cmd;
984 /* If there's a pending mgmt command the flag will not yet have
985 * it's final value, so check for this first.
987 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
989 struct mgmt_mode *cp = cmd->param;
994 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
997 static int service_cache_sync(struct hci_dev *hdev, void *data)
999 hci_update_eir_sync(hdev);
1000 hci_update_class_sync(hdev);
1005 static void service_cache_off(struct work_struct *work)
1007 struct hci_dev *hdev = container_of(work, struct hci_dev,
1008 service_cache.work);
1010 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1013 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1016 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1018 /* The generation of a new RPA and programming it into the
1019 * controller happens in the hci_req_enable_advertising()
1022 if (ext_adv_capable(hdev))
1023 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1025 return hci_enable_advertising_sync(hdev);
1028 static void rpa_expired(struct work_struct *work)
1030 struct hci_dev *hdev = container_of(work, struct hci_dev,
1033 bt_dev_dbg(hdev, "");
1035 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1037 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1040 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1043 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1045 static void discov_off(struct work_struct *work)
1047 struct hci_dev *hdev = container_of(work, struct hci_dev,
1050 bt_dev_dbg(hdev, "");
1054 /* When discoverable timeout triggers, then just make sure
1055 * the limited discoverable flag is cleared. Even in the case
1056 * of a timeout triggered from general discoverable, it is
1057 * safe to unconditionally clear the flag.
1059 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061 hdev->discov_timeout = 0;
1063 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1065 mgmt_new_settings(hdev);
1067 hci_dev_unlock(hdev);
1070 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1072 static void mesh_send_complete(struct hci_dev *hdev,
1073 struct mgmt_mesh_tx *mesh_tx, bool silent)
1075 u8 handle = mesh_tx->handle;
1078 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1079 sizeof(handle), NULL);
1081 mgmt_mesh_remove(mesh_tx);
1084 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1086 struct mgmt_mesh_tx *mesh_tx;
1088 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089 hci_disable_advertising_sync(hdev);
1090 mesh_tx = mgmt_mesh_next(hdev, NULL);
1093 mesh_send_complete(hdev, mesh_tx, false);
1098 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1100 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1102 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1107 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1108 mesh_send_start_complete);
1111 mesh_send_complete(hdev, mesh_tx, false);
1113 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1116 static void mesh_send_done(struct work_struct *work)
1118 struct hci_dev *hdev = container_of(work, struct hci_dev,
1119 mesh_send_done.work);
1121 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1124 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1127 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1129 if (hci_dev_test_flag(hdev, HCI_MGMT))
1132 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1134 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1139 /* Non-mgmt controlled devices get this bit set
1140 * implicitly so that pairing works for them, however
1141 * for mgmt we require user-space to explicitly enable
1144 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1146 hci_dev_set_flag(hdev, HCI_MGMT);
1149 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150 void *data, u16 data_len)
1152 struct mgmt_rp_read_info rp;
1154 bt_dev_dbg(hdev, "sock %p", sk);
1158 memset(&rp, 0, sizeof(rp));
1160 bacpy(&rp.bdaddr, &hdev->bdaddr);
1162 rp.version = hdev->hci_ver;
1163 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1165 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1168 memcpy(rp.dev_class, hdev->dev_class, 3);
1170 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1173 hci_dev_unlock(hdev);
1175 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1179 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1184 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186 hdev->dev_class, 3);
1188 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1192 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1193 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194 hdev->dev_name, name_len);
1196 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1197 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198 hdev->short_name, name_len);
1203 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204 void *data, u16 data_len)
1207 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1210 bt_dev_dbg(hdev, "sock %p", sk);
1212 memset(&buf, 0, sizeof(buf));
1216 bacpy(&rp->bdaddr, &hdev->bdaddr);
1218 rp->version = hdev->hci_ver;
1219 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1221 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1225 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1226 rp->eir_len = cpu_to_le16(eir_len);
1228 hci_dev_unlock(hdev);
1230 /* If this command is called at least once, then the events
1231 * for class of device and local name changes are disabled
1232 * and only the new extended controller information event
1235 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1236 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1237 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1240 sizeof(*rp) + eir_len);
1243 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1246 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1249 memset(buf, 0, sizeof(buf));
1251 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1252 ev->eir_len = cpu_to_le16(eir_len);
1254 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1255 sizeof(*ev) + eir_len,
1256 HCI_MGMT_EXT_INFO_EVENTS, skip);
1259 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1261 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1263 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1267 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1269 struct mgmt_ev_advertising_added ev;
1271 ev.instance = instance;
1273 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1276 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1279 struct mgmt_ev_advertising_removed ev;
1281 ev.instance = instance;
1283 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1286 static void cancel_adv_timeout(struct hci_dev *hdev)
1288 if (hdev->adv_instance_timeout) {
1289 hdev->adv_instance_timeout = 0;
1290 cancel_delayed_work(&hdev->adv_instance_expire);
1294 /* This function requires the caller holds hdev->lock */
1295 static void restart_le_actions(struct hci_dev *hdev)
1297 struct hci_conn_params *p;
1299 list_for_each_entry(p, &hdev->le_conn_params, list) {
1300 /* Needed for AUTO_OFF case where might not "really"
1301 * have been powered off.
1303 hci_pend_le_list_del_init(p);
1305 switch (p->auto_connect) {
1306 case HCI_AUTO_CONN_DIRECT:
1307 case HCI_AUTO_CONN_ALWAYS:
1308 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1310 case HCI_AUTO_CONN_REPORT:
1311 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1319 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1321 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1323 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1324 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1327 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1329 struct mgmt_pending_cmd *cmd = data;
1330 struct mgmt_mode *cp;
1332 /* Make sure cmd still outstanding. */
1333 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1338 bt_dev_dbg(hdev, "err %d", err);
1343 restart_le_actions(hdev);
1344 hci_update_passive_scan(hdev);
1345 hci_dev_unlock(hdev);
1348 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1350 /* Only call new_setting for power on as power off is deferred
1351 * to hdev->power_off work which does call hci_dev_do_close.
1354 new_settings(hdev, cmd->sk);
1356 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1360 mgmt_pending_remove(cmd);
1363 static int set_powered_sync(struct hci_dev *hdev, void *data)
1365 struct mgmt_pending_cmd *cmd = data;
1366 struct mgmt_mode *cp = cmd->param;
1368 BT_DBG("%s", hdev->name);
1370 return hci_set_powered_sync(hdev, cp->val);
1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1376 struct mgmt_mode *cp = data;
1377 struct mgmt_pending_cmd *cmd;
1380 bt_dev_dbg(hdev, "sock %p", sk);
1382 if (cp->val != 0x00 && cp->val != 0x01)
1383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 MGMT_STATUS_INVALID_PARAMS);
1388 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1389 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1394 if (!!cp->val == hdev_is_powered(hdev)) {
1395 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1405 /* Cancel potentially blocking sync operation before power off */
1406 if (cp->val == 0x00) {
1407 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1408 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1409 mgmt_set_powered_complete);
1411 /* Use hci_cmd_sync_submit since hdev might not be running */
1412 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1413 mgmt_set_powered_complete);
1417 mgmt_pending_remove(cmd);
1420 hci_dev_unlock(hdev);
1424 int mgmt_new_settings(struct hci_dev *hdev)
1426 return new_settings(hdev, NULL);
1431 struct hci_dev *hdev;
1435 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1437 struct cmd_lookup *match = data;
1439 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1441 list_del(&cmd->list);
1443 if (match->sk == NULL) {
1444 match->sk = cmd->sk;
1445 sock_hold(match->sk);
1448 mgmt_pending_free(cmd);
1451 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1456 mgmt_pending_remove(cmd);
1459 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 if (cmd->cmd_complete) {
1464 cmd->cmd_complete(cmd, *status);
1465 mgmt_pending_remove(cmd);
1470 cmd_status_rsp(cmd, data);
1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1475 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476 cmd->param, cmd->param_len);
1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1481 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482 cmd->param, sizeof(struct mgmt_addr_info));
1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1487 if (!lmp_bredr_capable(hdev))
1488 return MGMT_STATUS_NOT_SUPPORTED;
1489 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490 return MGMT_STATUS_REJECTED;
1492 return MGMT_STATUS_SUCCESS;
1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1497 if (!lmp_le_capable(hdev))
1498 return MGMT_STATUS_NOT_SUPPORTED;
1499 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 return MGMT_STATUS_REJECTED;
1502 return MGMT_STATUS_SUCCESS;
1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1508 struct mgmt_pending_cmd *cmd = data;
1510 bt_dev_dbg(hdev, "err %d", err);
1512 /* Make sure cmd still outstanding. */
1513 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1519 u8 mgmt_err = mgmt_status(err);
1520 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1521 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1525 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1526 hdev->discov_timeout > 0) {
1527 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1528 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1531 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1532 new_settings(hdev, cmd->sk);
1535 mgmt_pending_remove(cmd);
1536 hci_dev_unlock(hdev);
1539 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1541 BT_DBG("%s", hdev->name);
1543 return hci_update_discoverable_sync(hdev);
1546 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1549 struct mgmt_cp_set_discoverable *cp = data;
1550 struct mgmt_pending_cmd *cmd;
1554 bt_dev_dbg(hdev, "sock %p", sk);
1556 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1557 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1559 MGMT_STATUS_REJECTED);
1561 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1562 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1563 MGMT_STATUS_INVALID_PARAMS);
1565 timeout = __le16_to_cpu(cp->timeout);
1567 /* Disabling discoverable requires that no timeout is set,
1568 * and enabling limited discoverable requires a timeout.
1570 if ((cp->val == 0x00 && timeout > 0) ||
1571 (cp->val == 0x02 && timeout == 0))
1572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 MGMT_STATUS_INVALID_PARAMS);
1577 if (!hdev_is_powered(hdev) && timeout > 0) {
1578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 MGMT_STATUS_NOT_POWERED);
1583 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1584 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1590 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1592 MGMT_STATUS_REJECTED);
1596 if (hdev->advertising_paused) {
1597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1602 if (!hdev_is_powered(hdev)) {
1603 bool changed = false;
1605 /* Setting limited discoverable when powered off is
1606 * not a valid operation since it requires a timeout
1607 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1609 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1610 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1614 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1619 err = new_settings(hdev, sk);
1624 /* If the current mode is the same, then just update the timeout
1625 * value with the new value. And if only the timeout gets updated,
1626 * then no need for any HCI transactions.
1628 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1629 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1630 HCI_LIMITED_DISCOVERABLE)) {
1631 cancel_delayed_work(&hdev->discov_off);
1632 hdev->discov_timeout = timeout;
1634 if (cp->val && hdev->discov_timeout > 0) {
1635 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1636 queue_delayed_work(hdev->req_workqueue,
1637 &hdev->discov_off, to);
1640 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1644 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1650 /* Cancel any potential discoverable timeout that might be
1651 * still active and store new timeout value. The arming of
1652 * the timeout happens in the complete handler.
1654 cancel_delayed_work(&hdev->discov_off);
1655 hdev->discov_timeout = timeout;
1658 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1660 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1662 /* Limited discoverable mode */
1663 if (cp->val == 0x02)
1664 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1666 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1668 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1669 mgmt_set_discoverable_complete);
1672 mgmt_pending_remove(cmd);
1675 hci_dev_unlock(hdev);
1679 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1682 struct mgmt_pending_cmd *cmd = data;
1684 bt_dev_dbg(hdev, "err %d", err);
1686 /* Make sure cmd still outstanding. */
1687 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1693 u8 mgmt_err = mgmt_status(err);
1694 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1698 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1699 new_settings(hdev, cmd->sk);
1703 mgmt_pending_remove(cmd);
1705 hci_dev_unlock(hdev);
1708 static int set_connectable_update_settings(struct hci_dev *hdev,
1709 struct sock *sk, u8 val)
1711 bool changed = false;
1714 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1718 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1720 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1721 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1724 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1729 hci_update_scan(hdev);
1730 hci_update_passive_scan(hdev);
1731 return new_settings(hdev, sk);
1737 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1739 BT_DBG("%s", hdev->name);
1741 return hci_update_connectable_sync(hdev);
1744 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1747 struct mgmt_mode *cp = data;
1748 struct mgmt_pending_cmd *cmd;
1751 bt_dev_dbg(hdev, "sock %p", sk);
1753 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1754 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1756 MGMT_STATUS_REJECTED);
1758 if (cp->val != 0x00 && cp->val != 0x01)
1759 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 MGMT_STATUS_INVALID_PARAMS);
1764 if (!hdev_is_powered(hdev)) {
1765 err = set_connectable_update_settings(hdev, sk, cp->val);
1769 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1770 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1771 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1776 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1783 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1785 if (hdev->discov_timeout > 0)
1786 cancel_delayed_work(&hdev->discov_off);
1788 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1789 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1790 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1793 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1794 mgmt_set_connectable_complete);
1797 mgmt_pending_remove(cmd);
1800 hci_dev_unlock(hdev);
1804 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1807 struct mgmt_mode *cp = data;
1811 bt_dev_dbg(hdev, "sock %p", sk);
1813 if (cp->val != 0x00 && cp->val != 0x01)
1814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1815 MGMT_STATUS_INVALID_PARAMS);
1820 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1822 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1824 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1829 /* In limited privacy mode the change of bondable mode
1830 * may affect the local advertising address.
1832 hci_update_discoverable(hdev);
1834 err = new_settings(hdev, sk);
1838 hci_dev_unlock(hdev);
1842 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1845 struct mgmt_mode *cp = data;
1846 struct mgmt_pending_cmd *cmd;
1850 bt_dev_dbg(hdev, "sock %p", sk);
1852 status = mgmt_bredr_support(hdev);
1854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1857 if (cp->val != 0x00 && cp->val != 0x01)
1858 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1859 MGMT_STATUS_INVALID_PARAMS);
1863 if (!hdev_is_powered(hdev)) {
1864 bool changed = false;
1866 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1867 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1871 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1876 err = new_settings(hdev, sk);
1881 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1882 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1889 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1890 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1900 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1902 mgmt_pending_remove(cmd);
1907 hci_dev_unlock(hdev);
1911 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1913 struct cmd_lookup match = { NULL, hdev };
1914 struct mgmt_pending_cmd *cmd = data;
1915 struct mgmt_mode *cp = cmd->param;
1916 u8 enable = cp->val;
1919 /* Make sure cmd still outstanding. */
1920 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1924 u8 mgmt_err = mgmt_status(err);
1926 if (enable && hci_dev_test_and_clear_flag(hdev,
1928 new_settings(hdev, NULL);
1931 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1937 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1939 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1942 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1945 new_settings(hdev, match.sk);
1950 hci_update_eir_sync(hdev);
1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1955 struct mgmt_pending_cmd *cmd = data;
1956 struct mgmt_mode *cp = cmd->param;
1957 bool changed = false;
1961 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1963 err = hci_write_ssp_mode_sync(hdev, cp->val);
1965 if (!err && changed)
1966 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1973 struct mgmt_mode *cp = data;
1974 struct mgmt_pending_cmd *cmd;
1978 bt_dev_dbg(hdev, "sock %p", sk);
1980 status = mgmt_bredr_support(hdev);
1982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1984 if (!lmp_ssp_capable(hdev))
1985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 MGMT_STATUS_NOT_SUPPORTED);
1988 if (cp->val != 0x00 && cp->val != 0x01)
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 MGMT_STATUS_INVALID_PARAMS);
1994 if (!hdev_is_powered(hdev)) {
1998 changed = !hci_dev_test_and_set_flag(hdev,
2001 changed = hci_dev_test_and_clear_flag(hdev,
2005 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2010 err = new_settings(hdev, sk);
2015 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2016 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2021 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2022 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2026 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2030 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2034 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035 MGMT_STATUS_FAILED);
2038 mgmt_pending_remove(cmd);
2042 hci_dev_unlock(hdev);
2046 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2048 bt_dev_dbg(hdev, "sock %p", sk);
2050 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2051 MGMT_STATUS_NOT_SUPPORTED);
2054 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2056 struct cmd_lookup match = { NULL, hdev };
2057 u8 status = mgmt_status(err);
2059 bt_dev_dbg(hdev, "err %d", err);
2062 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2067 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2069 new_settings(hdev, match.sk);
2075 static int set_le_sync(struct hci_dev *hdev, void *data)
2077 struct mgmt_pending_cmd *cmd = data;
2078 struct mgmt_mode *cp = cmd->param;
2083 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2085 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2086 hci_disable_advertising_sync(hdev);
2088 if (ext_adv_capable(hdev))
2089 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2091 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2094 err = hci_write_le_host_supported_sync(hdev, val, 0);
2096 /* Make sure the controller has a good default for
2097 * advertising data. Restrict the update to when LE
2098 * has actually been enabled. During power on, the
2099 * update in powered_update_hci will take care of it.
2101 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2102 if (ext_adv_capable(hdev)) {
2105 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2107 hci_update_scan_rsp_data_sync(hdev, 0x00);
2109 hci_update_adv_data_sync(hdev, 0x00);
2110 hci_update_scan_rsp_data_sync(hdev, 0x00);
2113 hci_update_passive_scan(hdev);
2119 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2121 struct mgmt_pending_cmd *cmd = data;
2122 u8 status = mgmt_status(err);
2123 struct sock *sk = cmd->sk;
2126 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2127 cmd_status_rsp, &status);
2131 mgmt_pending_remove(cmd);
2132 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2135 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2137 struct mgmt_pending_cmd *cmd = data;
2138 struct mgmt_cp_set_mesh *cp = cmd->param;
2139 size_t len = cmd->param_len;
2141 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2144 hci_dev_set_flag(hdev, HCI_MESH);
2146 hci_dev_clear_flag(hdev, HCI_MESH);
2150 /* If filters don't fit, forward all adv pkts */
2151 if (len <= sizeof(hdev->mesh_ad_types))
2152 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2154 hci_update_passive_scan_sync(hdev);
2158 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2160 struct mgmt_cp_set_mesh *cp = data;
2161 struct mgmt_pending_cmd *cmd;
2164 bt_dev_dbg(hdev, "sock %p", sk);
2166 if (!lmp_le_capable(hdev) ||
2167 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2169 MGMT_STATUS_NOT_SUPPORTED);
2171 if (cp->enable != 0x00 && cp->enable != 0x01)
2172 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2173 MGMT_STATUS_INVALID_PARAMS);
2177 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2181 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2185 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186 MGMT_STATUS_FAILED);
2189 mgmt_pending_remove(cmd);
2192 hci_dev_unlock(hdev);
2196 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2198 struct mgmt_mesh_tx *mesh_tx = data;
2199 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2200 unsigned long mesh_send_interval;
2201 u8 mgmt_err = mgmt_status(err);
2203 /* Report any errors here, but don't report completion */
2206 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2207 /* Send Complete Error Code for handle */
2208 mesh_send_complete(hdev, mesh_tx, false);
2212 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2213 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2214 mesh_send_interval);
2217 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2219 struct mgmt_mesh_tx *mesh_tx = data;
2220 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2221 struct adv_info *adv, *next_instance;
2222 u8 instance = hdev->le_num_of_adv_sets + 1;
2223 u16 timeout, duration;
2226 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2227 return MGMT_STATUS_BUSY;
2230 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2231 adv = hci_add_adv_instance(hdev, instance, 0,
2232 send->adv_data_len, send->adv_data,
2235 HCI_ADV_TX_POWER_NO_PREFERENCE,
2236 hdev->le_adv_min_interval,
2237 hdev->le_adv_max_interval,
2241 mesh_tx->instance = instance;
2245 if (hdev->cur_adv_instance == instance) {
2246 /* If the currently advertised instance is being changed then
2247 * cancel the current advertising and schedule the next
2248 * instance. If there is only one instance then the overridden
2249 * advertising data will be visible right away.
2251 cancel_adv_timeout(hdev);
2253 next_instance = hci_get_next_instance(hdev, instance);
2255 instance = next_instance->instance;
2258 } else if (hdev->adv_instance_timeout) {
2259 /* Immediately advertise the new instance if no other, or
2260 * let it go naturally from queue if ADV is already happening
2266 return hci_schedule_adv_instance_sync(hdev, instance, true);
2271 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2273 struct mgmt_rp_mesh_read_features *rp = data;
2275 if (rp->used_handles >= rp->max_handles)
2278 rp->handles[rp->used_handles++] = mesh_tx->handle;
2281 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2282 void *data, u16 len)
2284 struct mgmt_rp_mesh_read_features rp;
2286 if (!lmp_le_capable(hdev) ||
2287 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2288 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2289 MGMT_STATUS_NOT_SUPPORTED);
2291 memset(&rp, 0, sizeof(rp));
2292 rp.index = cpu_to_le16(hdev->id);
2293 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2294 rp.max_handles = MESH_HANDLES_MAX;
2299 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2301 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2302 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2304 hci_dev_unlock(hdev);
2308 static int send_cancel(struct hci_dev *hdev, void *data)
2310 struct mgmt_pending_cmd *cmd = data;
2311 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2312 struct mgmt_mesh_tx *mesh_tx;
2314 if (!cancel->handle) {
2316 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2319 mesh_send_complete(hdev, mesh_tx, false);
2322 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2324 if (mesh_tx && mesh_tx->sk == cmd->sk)
2325 mesh_send_complete(hdev, mesh_tx, false);
2328 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2330 mgmt_pending_free(cmd);
2335 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2336 void *data, u16 len)
2338 struct mgmt_pending_cmd *cmd;
2341 if (!lmp_le_capable(hdev) ||
2342 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 MGMT_STATUS_NOT_SUPPORTED);
2346 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2348 MGMT_STATUS_REJECTED);
2351 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2355 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2358 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 MGMT_STATUS_FAILED);
2362 mgmt_pending_free(cmd);
2365 hci_dev_unlock(hdev);
2369 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2371 struct mgmt_mesh_tx *mesh_tx;
2372 struct mgmt_cp_mesh_send *send = data;
2373 struct mgmt_rp_mesh_read_features rp;
2377 if (!lmp_le_capable(hdev) ||
2378 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2379 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2380 MGMT_STATUS_NOT_SUPPORTED);
2381 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2382 len <= MGMT_MESH_SEND_SIZE ||
2383 len > (MGMT_MESH_SEND_SIZE + 31))
2384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2385 MGMT_STATUS_REJECTED);
2389 memset(&rp, 0, sizeof(rp));
2390 rp.max_handles = MESH_HANDLES_MAX;
2392 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2394 if (rp.max_handles <= rp.used_handles) {
2395 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2401 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2406 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2407 mesh_send_start_complete);
2410 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2411 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2412 MGMT_STATUS_FAILED);
2416 mgmt_mesh_remove(mesh_tx);
2419 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2421 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2422 &mesh_tx->handle, 1);
2426 hci_dev_unlock(hdev);
2430 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2432 struct mgmt_mode *cp = data;
2433 struct mgmt_pending_cmd *cmd;
2437 bt_dev_dbg(hdev, "sock %p", sk);
2439 if (!lmp_le_capable(hdev))
2440 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2441 MGMT_STATUS_NOT_SUPPORTED);
2443 if (cp->val != 0x00 && cp->val != 0x01)
2444 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2445 MGMT_STATUS_INVALID_PARAMS);
2447 /* Bluetooth single mode LE only controllers or dual-mode
2448 * controllers configured as LE only devices, do not allow
2449 * switching LE off. These have either LE enabled explicitly
2450 * or BR/EDR has been previously switched off.
2452 * When trying to enable an already enabled LE, then gracefully
2453 * send a positive response. Trying to disable it however will
2454 * result into rejection.
2456 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2457 if (cp->val == 0x01)
2458 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2460 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2461 MGMT_STATUS_REJECTED);
2467 enabled = lmp_host_le_capable(hdev);
2469 if (!hdev_is_powered(hdev) || val == enabled) {
2470 bool changed = false;
2472 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2473 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2477 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2478 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2482 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2487 err = new_settings(hdev, sk);
2492 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2493 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2503 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2507 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2508 MGMT_STATUS_FAILED);
2511 mgmt_pending_remove(cmd);
2515 hci_dev_unlock(hdev);
2519 /* This is a helper function to test for pending mgmt commands that can
2520 * cause CoD or EIR HCI commands. We can only allow one such pending
2521 * mgmt command at a time since otherwise we cannot easily track what
2522 * the current values are, will be, and based on that calculate if a new
2523 * HCI command needs to be sent and if yes with what value.
2525 static bool pending_eir_or_class(struct hci_dev *hdev)
2527 struct mgmt_pending_cmd *cmd;
2529 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2530 switch (cmd->opcode) {
2531 case MGMT_OP_ADD_UUID:
2532 case MGMT_OP_REMOVE_UUID:
2533 case MGMT_OP_SET_DEV_CLASS:
2534 case MGMT_OP_SET_POWERED:
2542 static const u8 bluetooth_base_uuid[] = {
2543 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2544 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2547 static u8 get_uuid_size(const u8 *uuid)
2551 if (memcmp(uuid, bluetooth_base_uuid, 12))
2554 val = get_unaligned_le32(&uuid[12]);
2561 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2563 struct mgmt_pending_cmd *cmd = data;
2565 bt_dev_dbg(hdev, "err %d", err);
2567 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2568 mgmt_status(err), hdev->dev_class, 3);
2570 mgmt_pending_free(cmd);
2573 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2577 err = hci_update_class_sync(hdev);
2581 return hci_update_eir_sync(hdev);
2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2586 struct mgmt_cp_add_uuid *cp = data;
2587 struct mgmt_pending_cmd *cmd;
2588 struct bt_uuid *uuid;
2591 bt_dev_dbg(hdev, "sock %p", sk);
2595 if (pending_eir_or_class(hdev)) {
2596 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2601 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2607 memcpy(uuid->uuid, cp->uuid, 16);
2608 uuid->svc_hint = cp->svc_hint;
2609 uuid->size = get_uuid_size(cp->uuid);
2611 list_add_tail(&uuid->list, &hdev->uuids);
2613 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2619 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2620 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2622 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2623 mgmt_class_complete);
2625 mgmt_pending_free(cmd);
2630 hci_dev_unlock(hdev);
2634 static bool enable_service_cache(struct hci_dev *hdev)
2636 if (!hdev_is_powered(hdev))
2639 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2640 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2648 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2652 err = hci_update_class_sync(hdev);
2656 return hci_update_eir_sync(hdev);
2659 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2662 struct mgmt_cp_remove_uuid *cp = data;
2663 struct mgmt_pending_cmd *cmd;
2664 struct bt_uuid *match, *tmp;
2665 static const u8 bt_uuid_any[] = {
2666 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2670 bt_dev_dbg(hdev, "sock %p", sk);
2674 if (pending_eir_or_class(hdev)) {
2675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2680 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2681 hci_uuids_clear(hdev);
2683 if (enable_service_cache(hdev)) {
2684 err = mgmt_cmd_complete(sk, hdev->id,
2685 MGMT_OP_REMOVE_UUID,
2686 0, hdev->dev_class, 3);
2695 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2696 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2699 list_del(&match->list);
2705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2706 MGMT_STATUS_INVALID_PARAMS);
2711 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2717 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2718 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2720 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2721 mgmt_class_complete);
2723 mgmt_pending_free(cmd);
2726 hci_dev_unlock(hdev);
2730 static int set_class_sync(struct hci_dev *hdev, void *data)
2734 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2735 cancel_delayed_work_sync(&hdev->service_cache);
2736 err = hci_update_eir_sync(hdev);
2742 return hci_update_class_sync(hdev);
2745 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2748 struct mgmt_cp_set_dev_class *cp = data;
2749 struct mgmt_pending_cmd *cmd;
2752 bt_dev_dbg(hdev, "sock %p", sk);
2754 if (!lmp_bredr_capable(hdev))
2755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 MGMT_STATUS_NOT_SUPPORTED);
2760 if (pending_eir_or_class(hdev)) {
2761 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2766 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2768 MGMT_STATUS_INVALID_PARAMS);
2772 hdev->major_class = cp->major;
2773 hdev->minor_class = cp->minor;
2775 if (!hdev_is_powered(hdev)) {
2776 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2777 hdev->dev_class, 3);
2781 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2787 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2788 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2790 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2791 mgmt_class_complete);
2793 mgmt_pending_free(cmd);
2796 hci_dev_unlock(hdev);
2800 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2803 struct mgmt_cp_load_link_keys *cp = data;
2804 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2805 sizeof(struct mgmt_link_key_info));
2806 u16 key_count, expected_len;
2810 bt_dev_dbg(hdev, "sock %p", sk);
2812 if (!lmp_bredr_capable(hdev))
2813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2814 MGMT_STATUS_NOT_SUPPORTED);
2816 key_count = __le16_to_cpu(cp->key_count);
2817 if (key_count > max_key_count) {
2818 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2821 MGMT_STATUS_INVALID_PARAMS);
2824 expected_len = struct_size(cp, keys, key_count);
2825 if (expected_len != len) {
2826 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2828 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2829 MGMT_STATUS_INVALID_PARAMS);
2832 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2834 MGMT_STATUS_INVALID_PARAMS);
2836 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2839 for (i = 0; i < key_count; i++) {
2840 struct mgmt_link_key_info *key = &cp->keys[i];
2842 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2843 if (key->type > 0x08)
2844 return mgmt_cmd_status(sk, hdev->id,
2845 MGMT_OP_LOAD_LINK_KEYS,
2846 MGMT_STATUS_INVALID_PARAMS);
2851 hci_link_keys_clear(hdev);
2854 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2856 changed = hci_dev_test_and_clear_flag(hdev,
2857 HCI_KEEP_DEBUG_KEYS);
2860 new_settings(hdev, NULL);
2862 for (i = 0; i < key_count; i++) {
2863 struct mgmt_link_key_info *key = &cp->keys[i];
2865 if (hci_is_blocked_key(hdev,
2866 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2868 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2873 /* Always ignore debug keys and require a new pairing if
2874 * the user wants to use them.
2876 if (key->type == HCI_LK_DEBUG_COMBINATION)
2879 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2880 key->type, key->pin_len, NULL);
2883 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2885 hci_dev_unlock(hdev);
2890 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2891 u8 addr_type, struct sock *skip_sk)
2893 struct mgmt_ev_device_unpaired ev;
2895 bacpy(&ev.addr.bdaddr, bdaddr);
2896 ev.addr.type = addr_type;
2898 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2902 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2904 struct mgmt_pending_cmd *cmd = data;
2905 struct mgmt_cp_unpair_device *cp = cmd->param;
2908 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2910 cmd->cmd_complete(cmd, err);
2911 mgmt_pending_free(cmd);
2914 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2916 struct mgmt_pending_cmd *cmd = data;
2917 struct mgmt_cp_unpair_device *cp = cmd->param;
2918 struct hci_conn *conn;
2920 if (cp->addr.type == BDADDR_BREDR)
2921 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2924 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2925 le_addr_type(cp->addr.type));
2930 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2933 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2936 struct mgmt_cp_unpair_device *cp = data;
2937 struct mgmt_rp_unpair_device rp;
2938 struct hci_conn_params *params;
2939 struct mgmt_pending_cmd *cmd;
2940 struct hci_conn *conn;
2944 memset(&rp, 0, sizeof(rp));
2945 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2946 rp.addr.type = cp->addr.type;
2948 if (!bdaddr_type_is_valid(cp->addr.type))
2949 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2950 MGMT_STATUS_INVALID_PARAMS,
2953 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2954 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2955 MGMT_STATUS_INVALID_PARAMS,
2960 if (!hdev_is_powered(hdev)) {
2961 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2962 MGMT_STATUS_NOT_POWERED, &rp,
2967 if (cp->addr.type == BDADDR_BREDR) {
2968 /* If disconnection is requested, then look up the
2969 * connection. If the remote device is connected, it
2970 * will be later used to terminate the link.
2972 * Setting it to NULL explicitly will cause no
2973 * termination of the link.
2976 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2981 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2983 err = mgmt_cmd_complete(sk, hdev->id,
2984 MGMT_OP_UNPAIR_DEVICE,
2985 MGMT_STATUS_NOT_PAIRED, &rp,
2993 /* LE address type */
2994 addr_type = le_addr_type(cp->addr.type);
2996 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2997 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2999 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3000 MGMT_STATUS_NOT_PAIRED, &rp,
3005 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3007 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3012 /* Defer clearing up the connection parameters until closing to
3013 * give a chance of keeping them if a repairing happens.
3015 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3017 /* Disable auto-connection parameters if present */
3018 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3020 if (params->explicit_connect)
3021 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3023 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3026 /* If disconnection is not requested, then clear the connection
3027 * variable so that the link is not terminated.
3029 if (!cp->disconnect)
3033 /* If the connection variable is set, then termination of the
3034 * link is requested.
3037 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3039 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3043 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3050 cmd->cmd_complete = addr_cmd_complete;
3052 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3053 unpair_device_complete);
3055 mgmt_pending_free(cmd);
3058 hci_dev_unlock(hdev);
3062 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3065 struct mgmt_cp_disconnect *cp = data;
3066 struct mgmt_rp_disconnect rp;
3067 struct mgmt_pending_cmd *cmd;
3068 struct hci_conn *conn;
3071 bt_dev_dbg(hdev, "sock %p", sk);
3073 memset(&rp, 0, sizeof(rp));
3074 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3075 rp.addr.type = cp->addr.type;
3077 if (!bdaddr_type_is_valid(cp->addr.type))
3078 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3079 MGMT_STATUS_INVALID_PARAMS,
3084 if (!test_bit(HCI_UP, &hdev->flags)) {
3085 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3086 MGMT_STATUS_NOT_POWERED, &rp,
3091 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3092 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3093 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3097 if (cp->addr.type == BDADDR_BREDR)
3098 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3101 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3102 le_addr_type(cp->addr.type));
3104 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3105 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3106 MGMT_STATUS_NOT_CONNECTED, &rp,
3111 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3117 cmd->cmd_complete = generic_cmd_complete;
3119 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3121 mgmt_pending_remove(cmd);
3124 hci_dev_unlock(hdev);
3128 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3130 switch (link_type) {
3133 switch (addr_type) {
3134 case ADDR_LE_DEV_PUBLIC:
3135 return BDADDR_LE_PUBLIC;
3138 /* Fallback to LE Random address type */
3139 return BDADDR_LE_RANDOM;
3143 /* Fallback to BR/EDR type */
3144 return BDADDR_BREDR;
3148 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3151 struct mgmt_rp_get_connections *rp;
3156 bt_dev_dbg(hdev, "sock %p", sk);
3160 if (!hdev_is_powered(hdev)) {
3161 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3162 MGMT_STATUS_NOT_POWERED);
3167 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3168 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3172 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3179 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3180 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3182 bacpy(&rp->addr[i].bdaddr, &c->dst);
3183 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3184 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3189 rp->conn_count = cpu_to_le16(i);
3191 /* Recalculate length in case of filtered SCO connections, etc */
3192 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3193 struct_size(rp, addr, i));
3198 hci_dev_unlock(hdev);
3202 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3203 struct mgmt_cp_pin_code_neg_reply *cp)
3205 struct mgmt_pending_cmd *cmd;
3208 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3213 cmd->cmd_complete = addr_cmd_complete;
3215 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3216 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3218 mgmt_pending_remove(cmd);
3223 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3226 struct hci_conn *conn;
3227 struct mgmt_cp_pin_code_reply *cp = data;
3228 struct hci_cp_pin_code_reply reply;
3229 struct mgmt_pending_cmd *cmd;
3232 bt_dev_dbg(hdev, "sock %p", sk);
3236 if (!hdev_is_powered(hdev)) {
3237 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3238 MGMT_STATUS_NOT_POWERED);
3242 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3244 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3245 MGMT_STATUS_NOT_CONNECTED);
3249 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3250 struct mgmt_cp_pin_code_neg_reply ncp;
3252 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3254 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3256 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3258 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3259 MGMT_STATUS_INVALID_PARAMS);
3264 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3270 cmd->cmd_complete = addr_cmd_complete;
3272 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3273 reply.pin_len = cp->pin_len;
3274 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3276 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3278 mgmt_pending_remove(cmd);
3281 hci_dev_unlock(hdev);
3285 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3288 struct mgmt_cp_set_io_capability *cp = data;
3290 bt_dev_dbg(hdev, "sock %p", sk);
3292 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3293 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3294 MGMT_STATUS_INVALID_PARAMS);
3298 hdev->io_capability = cp->io_capability;
3300 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3302 hci_dev_unlock(hdev);
3304 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3308 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3310 struct hci_dev *hdev = conn->hdev;
3311 struct mgmt_pending_cmd *cmd;
3313 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3314 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3317 if (cmd->user_data != conn)
3326 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3328 struct mgmt_rp_pair_device rp;
3329 struct hci_conn *conn = cmd->user_data;
3332 bacpy(&rp.addr.bdaddr, &conn->dst);
3333 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3335 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3336 status, &rp, sizeof(rp));
3338 /* So we don't get further callbacks for this connection */
3339 conn->connect_cfm_cb = NULL;
3340 conn->security_cfm_cb = NULL;
3341 conn->disconn_cfm_cb = NULL;
3343 hci_conn_drop(conn);
3345 /* The device is paired so there is no need to remove
3346 * its connection parameters anymore.
3348 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3355 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3357 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3358 struct mgmt_pending_cmd *cmd;
3360 cmd = find_pairing(conn);
3362 cmd->cmd_complete(cmd, status);
3363 mgmt_pending_remove(cmd);
3367 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3369 struct mgmt_pending_cmd *cmd;
3371 BT_DBG("status %u", status);
3373 cmd = find_pairing(conn);
3375 BT_DBG("Unable to find a pending command");
3379 cmd->cmd_complete(cmd, mgmt_status(status));
3380 mgmt_pending_remove(cmd);
3383 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3385 struct mgmt_pending_cmd *cmd;
3387 BT_DBG("status %u", status);
3392 cmd = find_pairing(conn);
3394 BT_DBG("Unable to find a pending command");
3398 cmd->cmd_complete(cmd, mgmt_status(status));
3399 mgmt_pending_remove(cmd);
3402 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3405 struct mgmt_cp_pair_device *cp = data;
3406 struct mgmt_rp_pair_device rp;
3407 struct mgmt_pending_cmd *cmd;
3408 u8 sec_level, auth_type;
3409 struct hci_conn *conn;
3412 bt_dev_dbg(hdev, "sock %p", sk);
3414 memset(&rp, 0, sizeof(rp));
3415 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3416 rp.addr.type = cp->addr.type;
3418 if (!bdaddr_type_is_valid(cp->addr.type))
3419 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3420 MGMT_STATUS_INVALID_PARAMS,
3423 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3424 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3425 MGMT_STATUS_INVALID_PARAMS,
3430 if (!hdev_is_powered(hdev)) {
3431 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3432 MGMT_STATUS_NOT_POWERED, &rp,
3437 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3438 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3439 MGMT_STATUS_ALREADY_PAIRED, &rp,
3444 sec_level = BT_SECURITY_MEDIUM;
3445 auth_type = HCI_AT_DEDICATED_BONDING;
3447 if (cp->addr.type == BDADDR_BREDR) {
3448 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3449 auth_type, CONN_REASON_PAIR_DEVICE,
3450 HCI_ACL_CONN_TIMEOUT);
3452 u8 addr_type = le_addr_type(cp->addr.type);
3453 struct hci_conn_params *p;
3455 /* When pairing a new device, it is expected to remember
3456 * this device for future connections. Adding the connection
3457 * parameter information ahead of time allows tracking
3458 * of the peripheral preferred values and will speed up any
3459 * further connection establishment.
3461 * If connection parameters already exist, then they
3462 * will be kept and this function does nothing.
3464 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3466 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3467 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3469 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3470 sec_level, HCI_LE_CONN_TIMEOUT,
3471 CONN_REASON_PAIR_DEVICE);
3477 if (PTR_ERR(conn) == -EBUSY)
3478 status = MGMT_STATUS_BUSY;
3479 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3480 status = MGMT_STATUS_NOT_SUPPORTED;
3481 else if (PTR_ERR(conn) == -ECONNREFUSED)
3482 status = MGMT_STATUS_REJECTED;
3484 status = MGMT_STATUS_CONNECT_FAILED;
3486 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3487 status, &rp, sizeof(rp));
3491 if (conn->connect_cfm_cb) {
3492 hci_conn_drop(conn);
3493 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3494 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3498 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3501 hci_conn_drop(conn);
3505 cmd->cmd_complete = pairing_complete;
3507 /* For LE, just connecting isn't a proof that the pairing finished */
3508 if (cp->addr.type == BDADDR_BREDR) {
3509 conn->connect_cfm_cb = pairing_complete_cb;
3510 conn->security_cfm_cb = pairing_complete_cb;
3511 conn->disconn_cfm_cb = pairing_complete_cb;
3513 conn->connect_cfm_cb = le_pairing_complete_cb;
3514 conn->security_cfm_cb = le_pairing_complete_cb;
3515 conn->disconn_cfm_cb = le_pairing_complete_cb;
3518 conn->io_capability = cp->io_cap;
3519 cmd->user_data = hci_conn_get(conn);
3521 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3522 hci_conn_security(conn, sec_level, auth_type, true)) {
3523 cmd->cmd_complete(cmd, 0);
3524 mgmt_pending_remove(cmd);
3530 hci_dev_unlock(hdev);
3534 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3537 struct mgmt_addr_info *addr = data;
3538 struct mgmt_pending_cmd *cmd;
3539 struct hci_conn *conn;
3542 bt_dev_dbg(hdev, "sock %p", sk);
3546 if (!hdev_is_powered(hdev)) {
3547 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3548 MGMT_STATUS_NOT_POWERED);
3552 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3554 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3555 MGMT_STATUS_INVALID_PARAMS);
3559 conn = cmd->user_data;
3561 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3562 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3563 MGMT_STATUS_INVALID_PARAMS);
3567 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3568 mgmt_pending_remove(cmd);
3570 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3571 addr, sizeof(*addr));
3573 /* Since user doesn't want to proceed with the connection, abort any
3574 * ongoing pairing and then terminate the link if it was created
3575 * because of the pair device action.
3577 if (addr->type == BDADDR_BREDR)
3578 hci_remove_link_key(hdev, &addr->bdaddr);
3580 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3581 le_addr_type(addr->type));
3583 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3584 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3587 hci_dev_unlock(hdev);
3591 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3592 struct mgmt_addr_info *addr, u16 mgmt_op,
3593 u16 hci_op, __le32 passkey)
3595 struct mgmt_pending_cmd *cmd;
3596 struct hci_conn *conn;
3601 if (!hdev_is_powered(hdev)) {
3602 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3603 MGMT_STATUS_NOT_POWERED, addr,
3608 if (addr->type == BDADDR_BREDR)
3609 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3611 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3612 le_addr_type(addr->type));
3615 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3616 MGMT_STATUS_NOT_CONNECTED, addr,
3621 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3622 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3624 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3625 MGMT_STATUS_SUCCESS, addr,
3628 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3629 MGMT_STATUS_FAILED, addr,
3635 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3641 cmd->cmd_complete = addr_cmd_complete;
3643 /* Continue with pairing via HCI */
3644 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3645 struct hci_cp_user_passkey_reply cp;
3647 bacpy(&cp.bdaddr, &addr->bdaddr);
3648 cp.passkey = passkey;
3649 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3651 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3655 mgmt_pending_remove(cmd);
3658 hci_dev_unlock(hdev);
3662 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3663 void *data, u16 len)
3665 struct mgmt_cp_pin_code_neg_reply *cp = data;
3667 bt_dev_dbg(hdev, "sock %p", sk);
3669 return user_pairing_resp(sk, hdev, &cp->addr,
3670 MGMT_OP_PIN_CODE_NEG_REPLY,
3671 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3674 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3677 struct mgmt_cp_user_confirm_reply *cp = data;
3679 bt_dev_dbg(hdev, "sock %p", sk);
3681 if (len != sizeof(*cp))
3682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3683 MGMT_STATUS_INVALID_PARAMS);
3685 return user_pairing_resp(sk, hdev, &cp->addr,
3686 MGMT_OP_USER_CONFIRM_REPLY,
3687 HCI_OP_USER_CONFIRM_REPLY, 0);
3690 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3691 void *data, u16 len)
3693 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3695 bt_dev_dbg(hdev, "sock %p", sk);
3697 return user_pairing_resp(sk, hdev, &cp->addr,
3698 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3699 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3702 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3705 struct mgmt_cp_user_passkey_reply *cp = data;
3707 bt_dev_dbg(hdev, "sock %p", sk);
3709 return user_pairing_resp(sk, hdev, &cp->addr,
3710 MGMT_OP_USER_PASSKEY_REPLY,
3711 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3714 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3715 void *data, u16 len)
3717 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3719 bt_dev_dbg(hdev, "sock %p", sk);
3721 return user_pairing_resp(sk, hdev, &cp->addr,
3722 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3723 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3726 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3728 struct adv_info *adv_instance;
3730 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3734 /* stop if current instance doesn't need to be changed */
3735 if (!(adv_instance->flags & flags))
3738 cancel_adv_timeout(hdev);
3740 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3744 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3749 static int name_changed_sync(struct hci_dev *hdev, void *data)
3751 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3754 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3756 struct mgmt_pending_cmd *cmd = data;
3757 struct mgmt_cp_set_local_name *cp = cmd->param;
3758 u8 status = mgmt_status(err);
3760 bt_dev_dbg(hdev, "err %d", err);
3762 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3766 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3769 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3772 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3773 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3776 mgmt_pending_remove(cmd);
3779 static int set_name_sync(struct hci_dev *hdev, void *data)
3781 if (lmp_bredr_capable(hdev)) {
3782 hci_update_name_sync(hdev);
3783 hci_update_eir_sync(hdev);
3786 /* The name is stored in the scan response data and so
3787 * no need to update the advertising data here.
3789 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3790 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3795 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3798 struct mgmt_cp_set_local_name *cp = data;
3799 struct mgmt_pending_cmd *cmd;
3802 bt_dev_dbg(hdev, "sock %p", sk);
3806 /* If the old values are the same as the new ones just return a
3807 * direct command complete event.
3809 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3810 !memcmp(hdev->short_name, cp->short_name,
3811 sizeof(hdev->short_name))) {
3812 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3817 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3819 if (!hdev_is_powered(hdev)) {
3820 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3822 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3827 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3828 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3829 ext_info_changed(hdev, sk);
3834 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3838 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3842 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3843 MGMT_STATUS_FAILED);
3846 mgmt_pending_remove(cmd);
3851 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3854 hci_dev_unlock(hdev);
3858 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3860 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3863 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3866 struct mgmt_cp_set_appearance *cp = data;
3870 bt_dev_dbg(hdev, "sock %p", sk);
3872 if (!lmp_le_capable(hdev))
3873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3874 MGMT_STATUS_NOT_SUPPORTED);
3876 appearance = le16_to_cpu(cp->appearance);
3880 if (hdev->appearance != appearance) {
3881 hdev->appearance = appearance;
3883 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3884 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3887 ext_info_changed(hdev, sk);
3890 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3893 hci_dev_unlock(hdev);
3898 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3899 void *data, u16 len)
3901 struct mgmt_rp_get_phy_configuration rp;
3903 bt_dev_dbg(hdev, "sock %p", sk);
3907 memset(&rp, 0, sizeof(rp));
3909 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3910 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3911 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3913 hci_dev_unlock(hdev);
3915 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3919 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3921 struct mgmt_ev_phy_configuration_changed ev;
3923 memset(&ev, 0, sizeof(ev));
3925 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3927 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3931 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3933 struct mgmt_pending_cmd *cmd = data;
3934 struct sk_buff *skb = cmd->skb;
3935 u8 status = mgmt_status(err);
3937 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3942 status = MGMT_STATUS_FAILED;
3943 else if (IS_ERR(skb))
3944 status = mgmt_status(PTR_ERR(skb));
3946 status = mgmt_status(skb->data[0]);
3949 bt_dev_dbg(hdev, "status %d", status);
3952 mgmt_cmd_status(cmd->sk, hdev->id,
3953 MGMT_OP_SET_PHY_CONFIGURATION, status);
3955 mgmt_cmd_complete(cmd->sk, hdev->id,
3956 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3959 mgmt_phy_configuration_changed(hdev, cmd->sk);
3962 if (skb && !IS_ERR(skb))
3965 mgmt_pending_remove(cmd);
3968 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3970 struct mgmt_pending_cmd *cmd = data;
3971 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3972 struct hci_cp_le_set_default_phy cp_phy;
3973 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3975 memset(&cp_phy, 0, sizeof(cp_phy));
3977 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3978 cp_phy.all_phys |= 0x01;
3980 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3981 cp_phy.all_phys |= 0x02;
3983 if (selected_phys & MGMT_PHY_LE_1M_TX)
3984 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3986 if (selected_phys & MGMT_PHY_LE_2M_TX)
3987 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3989 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3990 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3992 if (selected_phys & MGMT_PHY_LE_1M_RX)
3993 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3995 if (selected_phys & MGMT_PHY_LE_2M_RX)
3996 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3998 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3999 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4001 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4002 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4007 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4008 void *data, u16 len)
4010 struct mgmt_cp_set_phy_configuration *cp = data;
4011 struct mgmt_pending_cmd *cmd;
4012 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4013 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4014 bool changed = false;
4017 bt_dev_dbg(hdev, "sock %p", sk);
4019 configurable_phys = get_configurable_phys(hdev);
4020 supported_phys = get_supported_phys(hdev);
4021 selected_phys = __le32_to_cpu(cp->selected_phys);
4023 if (selected_phys & ~supported_phys)
4024 return mgmt_cmd_status(sk, hdev->id,
4025 MGMT_OP_SET_PHY_CONFIGURATION,
4026 MGMT_STATUS_INVALID_PARAMS);
4028 unconfigure_phys = supported_phys & ~configurable_phys;
4030 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4031 return mgmt_cmd_status(sk, hdev->id,
4032 MGMT_OP_SET_PHY_CONFIGURATION,
4033 MGMT_STATUS_INVALID_PARAMS);
4035 if (selected_phys == get_selected_phys(hdev))
4036 return mgmt_cmd_complete(sk, hdev->id,
4037 MGMT_OP_SET_PHY_CONFIGURATION,
4042 if (!hdev_is_powered(hdev)) {
4043 err = mgmt_cmd_status(sk, hdev->id,
4044 MGMT_OP_SET_PHY_CONFIGURATION,
4045 MGMT_STATUS_REJECTED);
4049 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4050 err = mgmt_cmd_status(sk, hdev->id,
4051 MGMT_OP_SET_PHY_CONFIGURATION,
4056 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4057 pkt_type |= (HCI_DH3 | HCI_DM3);
4059 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4061 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4062 pkt_type |= (HCI_DH5 | HCI_DM5);
4064 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4066 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4067 pkt_type &= ~HCI_2DH1;
4069 pkt_type |= HCI_2DH1;
4071 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4072 pkt_type &= ~HCI_2DH3;
4074 pkt_type |= HCI_2DH3;
4076 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4077 pkt_type &= ~HCI_2DH5;
4079 pkt_type |= HCI_2DH5;
4081 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4082 pkt_type &= ~HCI_3DH1;
4084 pkt_type |= HCI_3DH1;
4086 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4087 pkt_type &= ~HCI_3DH3;
4089 pkt_type |= HCI_3DH3;
4091 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4092 pkt_type &= ~HCI_3DH5;
4094 pkt_type |= HCI_3DH5;
4096 if (pkt_type != hdev->pkt_type) {
4097 hdev->pkt_type = pkt_type;
4101 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4102 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4104 mgmt_phy_configuration_changed(hdev, sk);
4106 err = mgmt_cmd_complete(sk, hdev->id,
4107 MGMT_OP_SET_PHY_CONFIGURATION,
4113 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4118 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4119 set_default_phy_complete);
4122 err = mgmt_cmd_status(sk, hdev->id,
4123 MGMT_OP_SET_PHY_CONFIGURATION,
4124 MGMT_STATUS_FAILED);
4127 mgmt_pending_remove(cmd);
4131 hci_dev_unlock(hdev);
4136 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4139 int err = MGMT_STATUS_SUCCESS;
4140 struct mgmt_cp_set_blocked_keys *keys = data;
4141 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4142 sizeof(struct mgmt_blocked_key_info));
4143 u16 key_count, expected_len;
4146 bt_dev_dbg(hdev, "sock %p", sk);
4148 key_count = __le16_to_cpu(keys->key_count);
4149 if (key_count > max_key_count) {
4150 bt_dev_err(hdev, "too big key_count value %u", key_count);
4151 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4152 MGMT_STATUS_INVALID_PARAMS);
4155 expected_len = struct_size(keys, keys, key_count);
4156 if (expected_len != len) {
4157 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4159 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4160 MGMT_STATUS_INVALID_PARAMS);
4165 hci_blocked_keys_clear(hdev);
4167 for (i = 0; i < key_count; ++i) {
4168 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4171 err = MGMT_STATUS_NO_RESOURCES;
4175 b->type = keys->keys[i].type;
4176 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4177 list_add_rcu(&b->list, &hdev->blocked_keys);
4179 hci_dev_unlock(hdev);
4181 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4185 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4186 void *data, u16 len)
4188 struct mgmt_mode *cp = data;
4190 bool changed = false;
4192 bt_dev_dbg(hdev, "sock %p", sk);
4194 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4195 return mgmt_cmd_status(sk, hdev->id,
4196 MGMT_OP_SET_WIDEBAND_SPEECH,
4197 MGMT_STATUS_NOT_SUPPORTED);
4199 if (cp->val != 0x00 && cp->val != 0x01)
4200 return mgmt_cmd_status(sk, hdev->id,
4201 MGMT_OP_SET_WIDEBAND_SPEECH,
4202 MGMT_STATUS_INVALID_PARAMS);
4206 if (hdev_is_powered(hdev) &&
4207 !!cp->val != hci_dev_test_flag(hdev,
4208 HCI_WIDEBAND_SPEECH_ENABLED)) {
4209 err = mgmt_cmd_status(sk, hdev->id,
4210 MGMT_OP_SET_WIDEBAND_SPEECH,
4211 MGMT_STATUS_REJECTED);
4216 changed = !hci_dev_test_and_set_flag(hdev,
4217 HCI_WIDEBAND_SPEECH_ENABLED);
4219 changed = hci_dev_test_and_clear_flag(hdev,
4220 HCI_WIDEBAND_SPEECH_ENABLED);
4222 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4227 err = new_settings(hdev, sk);
4230 hci_dev_unlock(hdev);
4234 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4235 void *data, u16 data_len)
4238 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4241 u8 tx_power_range[2];
4243 bt_dev_dbg(hdev, "sock %p", sk);
4245 memset(&buf, 0, sizeof(buf));
4249 /* When the Read Simple Pairing Options command is supported, then
4250 * the remote public key validation is supported.
4252 * Alternatively, when Microsoft extensions are available, they can
4253 * indicate support for public key validation as well.
4255 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4256 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4258 flags |= 0x02; /* Remote public key validation (LE) */
4260 /* When the Read Encryption Key Size command is supported, then the
4261 * encryption key size is enforced.
4263 if (hdev->commands[20] & 0x10)
4264 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4266 flags |= 0x08; /* Encryption key size enforcement (LE) */
4268 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4271 /* When the Read Simple Pairing Options command is supported, then
4272 * also max encryption key size information is provided.
4274 if (hdev->commands[41] & 0x08)
4275 cap_len = eir_append_le16(rp->cap, cap_len,
4276 MGMT_CAP_MAX_ENC_KEY_SIZE,
4277 hdev->max_enc_key_size);
4279 cap_len = eir_append_le16(rp->cap, cap_len,
4280 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4281 SMP_MAX_ENC_KEY_SIZE);
4283 /* Append the min/max LE tx power parameters if we were able to fetch
4284 * it from the controller
4286 if (hdev->commands[38] & 0x80) {
4287 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4288 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4289 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4293 rp->cap_len = cpu_to_le16(cap_len);
4295 hci_dev_unlock(hdev);
4297 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4298 rp, sizeof(*rp) + cap_len);
4301 #ifdef CONFIG_BT_FEATURE_DEBUG
4302 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4303 static const u8 debug_uuid[16] = {
4304 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4305 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4309 /* 330859bc-7506-492d-9370-9a6f0614037f */
4310 static const u8 quality_report_uuid[16] = {
4311 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4312 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4315 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4316 static const u8 offload_codecs_uuid[16] = {
4317 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4318 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4321 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4322 static const u8 le_simultaneous_roles_uuid[16] = {
4323 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4324 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4327 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4328 static const u8 rpa_resolution_uuid[16] = {
4329 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4330 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4333 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4334 static const u8 iso_socket_uuid[16] = {
4335 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4336 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4339 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4340 static const u8 mgmt_mesh_uuid[16] = {
4341 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4342 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4345 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4346 void *data, u16 data_len)
4348 struct mgmt_rp_read_exp_features_info *rp;
4354 bt_dev_dbg(hdev, "sock %p", sk);
4356 /* Enough space for 7 features */
4357 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4358 rp = kzalloc(len, GFP_KERNEL);
4362 #ifdef CONFIG_BT_FEATURE_DEBUG
4364 flags = bt_dbg_get() ? BIT(0) : 0;
4366 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4367 rp->features[idx].flags = cpu_to_le32(flags);
4372 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4373 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4378 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4379 rp->features[idx].flags = cpu_to_le32(flags);
4383 if (hdev && ll_privacy_capable(hdev)) {
4384 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4385 flags = BIT(0) | BIT(1);
4389 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4390 rp->features[idx].flags = cpu_to_le32(flags);
4394 if (hdev && (aosp_has_quality_report(hdev) ||
4395 hdev->set_quality_report)) {
4396 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4401 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4402 rp->features[idx].flags = cpu_to_le32(flags);
4406 if (hdev && hdev->get_data_path_id) {
4407 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4412 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4413 rp->features[idx].flags = cpu_to_le32(flags);
4417 if (IS_ENABLED(CONFIG_BT_LE)) {
4418 flags = iso_enabled() ? BIT(0) : 0;
4419 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4420 rp->features[idx].flags = cpu_to_le32(flags);
4424 if (hdev && lmp_le_capable(hdev)) {
4425 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4430 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4431 rp->features[idx].flags = cpu_to_le32(flags);
4435 rp->feature_count = cpu_to_le16(idx);
4437 /* After reading the experimental features information, enable
4438 * the events to update client on any future change.
4440 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4442 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4443 MGMT_OP_READ_EXP_FEATURES_INFO,
4444 0, rp, sizeof(*rp) + (20 * idx));
4450 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4453 struct mgmt_ev_exp_feature_changed ev;
4455 memset(&ev, 0, sizeof(ev));
4456 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4457 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4459 // Do we need to be atomic with the conn_flags?
4460 if (enabled && privacy_mode_capable(hdev))
4461 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4463 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4465 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4467 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4471 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4472 bool enabled, struct sock *skip)
4474 struct mgmt_ev_exp_feature_changed ev;
4476 memset(&ev, 0, sizeof(ev));
4477 memcpy(ev.uuid, uuid, 16);
4478 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4480 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4482 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4485 #define EXP_FEAT(_uuid, _set_func) \
4488 .set_func = _set_func, \
4491 /* The zero key uuid is special. Multiple exp features are set through it. */
4492 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4493 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4495 struct mgmt_rp_set_exp_feature rp;
4497 memset(rp.uuid, 0, 16);
4498 rp.flags = cpu_to_le32(0);
4500 #ifdef CONFIG_BT_FEATURE_DEBUG
4502 bool changed = bt_dbg_get();
4507 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4511 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4514 changed = hci_dev_test_and_clear_flag(hdev,
4515 HCI_ENABLE_LL_PRIVACY);
4517 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4521 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4523 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4524 MGMT_OP_SET_EXP_FEATURE, 0,
4528 #ifdef CONFIG_BT_FEATURE_DEBUG
4529 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4530 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4532 struct mgmt_rp_set_exp_feature rp;
4537 /* Command requires to use the non-controller index */
4539 return mgmt_cmd_status(sk, hdev->id,
4540 MGMT_OP_SET_EXP_FEATURE,
4541 MGMT_STATUS_INVALID_INDEX);
4543 /* Parameters are limited to a single octet */
4544 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4545 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4546 MGMT_OP_SET_EXP_FEATURE,
4547 MGMT_STATUS_INVALID_PARAMS);
4549 /* Only boolean on/off is supported */
4550 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4551 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4552 MGMT_OP_SET_EXP_FEATURE,
4553 MGMT_STATUS_INVALID_PARAMS);
4555 val = !!cp->param[0];
4556 changed = val ? !bt_dbg_get() : bt_dbg_get();
4559 memcpy(rp.uuid, debug_uuid, 16);
4560 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4562 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4564 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4565 MGMT_OP_SET_EXP_FEATURE, 0,
4569 exp_feature_changed(hdev, debug_uuid, val, sk);
4575 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4576 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4578 struct mgmt_rp_set_exp_feature rp;
4582 /* Command requires to use the controller index */
4584 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4585 MGMT_OP_SET_EXP_FEATURE,
4586 MGMT_STATUS_INVALID_INDEX);
4588 /* Parameters are limited to a single octet */
4589 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4590 return mgmt_cmd_status(sk, hdev->id,
4591 MGMT_OP_SET_EXP_FEATURE,
4592 MGMT_STATUS_INVALID_PARAMS);
4594 /* Only boolean on/off is supported */
4595 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4596 return mgmt_cmd_status(sk, hdev->id,
4597 MGMT_OP_SET_EXP_FEATURE,
4598 MGMT_STATUS_INVALID_PARAMS);
4600 val = !!cp->param[0];
4603 changed = !hci_dev_test_and_set_flag(hdev,
4604 HCI_MESH_EXPERIMENTAL);
4606 hci_dev_clear_flag(hdev, HCI_MESH);
4607 changed = hci_dev_test_and_clear_flag(hdev,
4608 HCI_MESH_EXPERIMENTAL);
4611 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4612 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4614 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4616 err = mgmt_cmd_complete(sk, hdev->id,
4617 MGMT_OP_SET_EXP_FEATURE, 0,
4621 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4626 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4627 struct mgmt_cp_set_exp_feature *cp,
4630 struct mgmt_rp_set_exp_feature rp;
4635 /* Command requires to use the controller index */
4637 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4638 MGMT_OP_SET_EXP_FEATURE,
4639 MGMT_STATUS_INVALID_INDEX);
4641 /* Changes can only be made when controller is powered down */
4642 if (hdev_is_powered(hdev))
4643 return mgmt_cmd_status(sk, hdev->id,
4644 MGMT_OP_SET_EXP_FEATURE,
4645 MGMT_STATUS_REJECTED);
4647 /* Parameters are limited to a single octet */
4648 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4649 return mgmt_cmd_status(sk, hdev->id,
4650 MGMT_OP_SET_EXP_FEATURE,
4651 MGMT_STATUS_INVALID_PARAMS);
4653 /* Only boolean on/off is supported */
4654 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4655 return mgmt_cmd_status(sk, hdev->id,
4656 MGMT_OP_SET_EXP_FEATURE,
4657 MGMT_STATUS_INVALID_PARAMS);
4659 val = !!cp->param[0];
4662 changed = !hci_dev_test_and_set_flag(hdev,
4663 HCI_ENABLE_LL_PRIVACY);
4664 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4666 /* Enable LL privacy + supported settings changed */
4667 flags = BIT(0) | BIT(1);
4669 changed = hci_dev_test_and_clear_flag(hdev,
4670 HCI_ENABLE_LL_PRIVACY);
4672 /* Disable LL privacy + supported settings changed */
4676 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4677 rp.flags = cpu_to_le32(flags);
4679 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4681 err = mgmt_cmd_complete(sk, hdev->id,
4682 MGMT_OP_SET_EXP_FEATURE, 0,
4686 exp_ll_privacy_feature_changed(val, hdev, sk);
4691 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4692 struct mgmt_cp_set_exp_feature *cp,
4695 struct mgmt_rp_set_exp_feature rp;
4699 /* Command requires to use a valid controller index */
4701 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4702 MGMT_OP_SET_EXP_FEATURE,
4703 MGMT_STATUS_INVALID_INDEX);
4705 /* Parameters are limited to a single octet */
4706 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4707 return mgmt_cmd_status(sk, hdev->id,
4708 MGMT_OP_SET_EXP_FEATURE,
4709 MGMT_STATUS_INVALID_PARAMS);
4711 /* Only boolean on/off is supported */
4712 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4713 return mgmt_cmd_status(sk, hdev->id,
4714 MGMT_OP_SET_EXP_FEATURE,
4715 MGMT_STATUS_INVALID_PARAMS);
4717 hci_req_sync_lock(hdev);
4719 val = !!cp->param[0];
4720 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4722 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4723 err = mgmt_cmd_status(sk, hdev->id,
4724 MGMT_OP_SET_EXP_FEATURE,
4725 MGMT_STATUS_NOT_SUPPORTED);
4726 goto unlock_quality_report;
4730 if (hdev->set_quality_report)
4731 err = hdev->set_quality_report(hdev, val);
4733 err = aosp_set_quality_report(hdev, val);
4736 err = mgmt_cmd_status(sk, hdev->id,
4737 MGMT_OP_SET_EXP_FEATURE,
4738 MGMT_STATUS_FAILED);
4739 goto unlock_quality_report;
4743 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4745 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4748 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4750 memcpy(rp.uuid, quality_report_uuid, 16);
4751 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4752 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4754 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4758 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4760 unlock_quality_report:
4761 hci_req_sync_unlock(hdev);
4765 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4766 struct mgmt_cp_set_exp_feature *cp,
4771 struct mgmt_rp_set_exp_feature rp;
4773 /* Command requires to use a valid controller index */
4775 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4776 MGMT_OP_SET_EXP_FEATURE,
4777 MGMT_STATUS_INVALID_INDEX);
4779 /* Parameters are limited to a single octet */
4780 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4781 return mgmt_cmd_status(sk, hdev->id,
4782 MGMT_OP_SET_EXP_FEATURE,
4783 MGMT_STATUS_INVALID_PARAMS);
4785 /* Only boolean on/off is supported */
4786 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4787 return mgmt_cmd_status(sk, hdev->id,
4788 MGMT_OP_SET_EXP_FEATURE,
4789 MGMT_STATUS_INVALID_PARAMS);
4791 val = !!cp->param[0];
4792 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4794 if (!hdev->get_data_path_id) {
4795 return mgmt_cmd_status(sk, hdev->id,
4796 MGMT_OP_SET_EXP_FEATURE,
4797 MGMT_STATUS_NOT_SUPPORTED);
4802 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4804 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4807 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4810 memcpy(rp.uuid, offload_codecs_uuid, 16);
4811 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4812 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4813 err = mgmt_cmd_complete(sk, hdev->id,
4814 MGMT_OP_SET_EXP_FEATURE, 0,
4818 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4823 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4824 struct mgmt_cp_set_exp_feature *cp,
4829 struct mgmt_rp_set_exp_feature rp;
4831 /* Command requires to use a valid controller index */
4833 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4834 MGMT_OP_SET_EXP_FEATURE,
4835 MGMT_STATUS_INVALID_INDEX);
4837 /* Parameters are limited to a single octet */
4838 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4839 return mgmt_cmd_status(sk, hdev->id,
4840 MGMT_OP_SET_EXP_FEATURE,
4841 MGMT_STATUS_INVALID_PARAMS);
4843 /* Only boolean on/off is supported */
4844 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4845 return mgmt_cmd_status(sk, hdev->id,
4846 MGMT_OP_SET_EXP_FEATURE,
4847 MGMT_STATUS_INVALID_PARAMS);
4849 val = !!cp->param[0];
4850 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4852 if (!hci_dev_le_state_simultaneous(hdev)) {
4853 return mgmt_cmd_status(sk, hdev->id,
4854 MGMT_OP_SET_EXP_FEATURE,
4855 MGMT_STATUS_NOT_SUPPORTED);
4860 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4862 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4865 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4868 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4869 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4870 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4871 err = mgmt_cmd_complete(sk, hdev->id,
4872 MGMT_OP_SET_EXP_FEATURE, 0,
4876 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4882 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4883 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4885 struct mgmt_rp_set_exp_feature rp;
4886 bool val, changed = false;
4889 /* Command requires to use the non-controller index */
4891 return mgmt_cmd_status(sk, hdev->id,
4892 MGMT_OP_SET_EXP_FEATURE,
4893 MGMT_STATUS_INVALID_INDEX);
4895 /* Parameters are limited to a single octet */
4896 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4897 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4898 MGMT_OP_SET_EXP_FEATURE,
4899 MGMT_STATUS_INVALID_PARAMS);
4901 /* Only boolean on/off is supported */
4902 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4903 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4904 MGMT_OP_SET_EXP_FEATURE,
4905 MGMT_STATUS_INVALID_PARAMS);
4907 val = cp->param[0] ? true : false;
4916 memcpy(rp.uuid, iso_socket_uuid, 16);
4917 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4919 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4921 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4922 MGMT_OP_SET_EXP_FEATURE, 0,
4926 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4932 static const struct mgmt_exp_feature {
4934 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4935 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4936 } exp_features[] = {
4937 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4938 #ifdef CONFIG_BT_FEATURE_DEBUG
4939 EXP_FEAT(debug_uuid, set_debug_func),
4941 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4942 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4943 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4944 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4945 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4947 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4950 /* end with a null feature */
4951 EXP_FEAT(NULL, NULL)
4954 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4955 void *data, u16 data_len)
4957 struct mgmt_cp_set_exp_feature *cp = data;
4960 bt_dev_dbg(hdev, "sock %p", sk);
4962 for (i = 0; exp_features[i].uuid; i++) {
4963 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4964 return exp_features[i].set_func(sk, hdev, cp, data_len);
4967 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4968 MGMT_OP_SET_EXP_FEATURE,
4969 MGMT_STATUS_NOT_SUPPORTED);
4972 static u32 get_params_flags(struct hci_dev *hdev,
4973 struct hci_conn_params *params)
4975 u32 flags = hdev->conn_flags;
4977 /* Devices using RPAs can only be programmed in the acceptlist if
4978 * LL Privacy has been enable otherwise they cannot mark
4979 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4981 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4982 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
4983 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4988 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4991 struct mgmt_cp_get_device_flags *cp = data;
4992 struct mgmt_rp_get_device_flags rp;
4993 struct bdaddr_list_with_flags *br_params;
4994 struct hci_conn_params *params;
4995 u32 supported_flags;
4996 u32 current_flags = 0;
4997 u8 status = MGMT_STATUS_INVALID_PARAMS;
4999 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5000 &cp->addr.bdaddr, cp->addr.type);
5004 supported_flags = hdev->conn_flags;
5006 memset(&rp, 0, sizeof(rp));
5008 if (cp->addr.type == BDADDR_BREDR) {
5009 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5015 current_flags = br_params->flags;
5017 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5018 le_addr_type(cp->addr.type));
5022 supported_flags = get_params_flags(hdev, params);
5023 current_flags = params->flags;
5026 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5027 rp.addr.type = cp->addr.type;
5028 rp.supported_flags = cpu_to_le32(supported_flags);
5029 rp.current_flags = cpu_to_le32(current_flags);
5031 status = MGMT_STATUS_SUCCESS;
5034 hci_dev_unlock(hdev);
5036 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5040 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5041 bdaddr_t *bdaddr, u8 bdaddr_type,
5042 u32 supported_flags, u32 current_flags)
5044 struct mgmt_ev_device_flags_changed ev;
5046 bacpy(&ev.addr.bdaddr, bdaddr);
5047 ev.addr.type = bdaddr_type;
5048 ev.supported_flags = cpu_to_le32(supported_flags);
5049 ev.current_flags = cpu_to_le32(current_flags);
5051 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5054 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5057 struct mgmt_cp_set_device_flags *cp = data;
5058 struct bdaddr_list_with_flags *br_params;
5059 struct hci_conn_params *params;
5060 u8 status = MGMT_STATUS_INVALID_PARAMS;
5061 u32 supported_flags;
5062 u32 current_flags = __le32_to_cpu(cp->current_flags);
5064 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5065 &cp->addr.bdaddr, cp->addr.type, current_flags);
5067 // We should take hci_dev_lock() early, I think.. conn_flags can change
5068 supported_flags = hdev->conn_flags;
5070 if ((supported_flags | current_flags) != supported_flags) {
5071 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5072 current_flags, supported_flags);
5078 if (cp->addr.type == BDADDR_BREDR) {
5079 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5084 br_params->flags = current_flags;
5085 status = MGMT_STATUS_SUCCESS;
5087 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5088 &cp->addr.bdaddr, cp->addr.type);
5094 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5095 le_addr_type(cp->addr.type));
5097 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5098 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5102 supported_flags = get_params_flags(hdev, params);
5104 if ((supported_flags | current_flags) != supported_flags) {
5105 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5106 current_flags, supported_flags);
5110 WRITE_ONCE(params->flags, current_flags);
5111 status = MGMT_STATUS_SUCCESS;
5113 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5116 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5117 hci_update_passive_scan(hdev);
5120 hci_dev_unlock(hdev);
5123 if (status == MGMT_STATUS_SUCCESS)
5124 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5125 supported_flags, current_flags);
5127 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5128 &cp->addr, sizeof(cp->addr));
5131 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5134 struct mgmt_ev_adv_monitor_added ev;
5136 ev.monitor_handle = cpu_to_le16(handle);
5138 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5141 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5143 struct mgmt_ev_adv_monitor_removed ev;
5144 struct mgmt_pending_cmd *cmd;
5145 struct sock *sk_skip = NULL;
5146 struct mgmt_cp_remove_adv_monitor *cp;
5148 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5152 if (cp->monitor_handle)
5156 ev.monitor_handle = cpu_to_le16(handle);
5158 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5161 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5162 void *data, u16 len)
5164 struct adv_monitor *monitor = NULL;
5165 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5168 __u32 supported = 0;
5170 __u16 num_handles = 0;
5171 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5173 BT_DBG("request for %s", hdev->name);
5177 if (msft_monitor_supported(hdev))
5178 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5180 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5181 handles[num_handles++] = monitor->handle;
5183 hci_dev_unlock(hdev);
5185 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5186 rp = kmalloc(rp_size, GFP_KERNEL);
5190 /* All supported features are currently enabled */
5191 enabled = supported;
5193 rp->supported_features = cpu_to_le32(supported);
5194 rp->enabled_features = cpu_to_le32(enabled);
5195 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5196 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5197 rp->num_handles = cpu_to_le16(num_handles);
5199 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5201 err = mgmt_cmd_complete(sk, hdev->id,
5202 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5203 MGMT_STATUS_SUCCESS, rp, rp_size);
5210 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5211 void *data, int status)
5213 struct mgmt_rp_add_adv_patterns_monitor rp;
5214 struct mgmt_pending_cmd *cmd = data;
5215 struct adv_monitor *monitor = cmd->user_data;
5219 rp.monitor_handle = cpu_to_le16(monitor->handle);
5222 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5223 hdev->adv_monitors_cnt++;
5224 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5225 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5226 hci_update_passive_scan(hdev);
5229 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5230 mgmt_status(status), &rp, sizeof(rp));
5231 mgmt_pending_remove(cmd);
5233 hci_dev_unlock(hdev);
5234 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5235 rp.monitor_handle, status);
5238 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5240 struct mgmt_pending_cmd *cmd = data;
5241 struct adv_monitor *monitor = cmd->user_data;
5243 return hci_add_adv_monitor(hdev, monitor);
5246 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5247 struct adv_monitor *m, u8 status,
5248 void *data, u16 len, u16 op)
5250 struct mgmt_pending_cmd *cmd;
5258 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5259 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5260 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5261 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5262 status = MGMT_STATUS_BUSY;
5266 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5268 status = MGMT_STATUS_NO_RESOURCES;
5273 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5274 mgmt_add_adv_patterns_monitor_complete);
5277 status = MGMT_STATUS_NO_RESOURCES;
5279 status = MGMT_STATUS_FAILED;
5284 hci_dev_unlock(hdev);
5289 hci_free_adv_monitor(hdev, m);
5290 hci_dev_unlock(hdev);
5291 return mgmt_cmd_status(sk, hdev->id, op, status);
5294 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5295 struct mgmt_adv_rssi_thresholds *rssi)
5298 m->rssi.low_threshold = rssi->low_threshold;
5299 m->rssi.low_threshold_timeout =
5300 __le16_to_cpu(rssi->low_threshold_timeout);
5301 m->rssi.high_threshold = rssi->high_threshold;
5302 m->rssi.high_threshold_timeout =
5303 __le16_to_cpu(rssi->high_threshold_timeout);
5304 m->rssi.sampling_period = rssi->sampling_period;
5306 /* Default values. These numbers are the least constricting
5307 * parameters for MSFT API to work, so it behaves as if there
5308 * are no rssi parameter to consider. May need to be changed
5309 * if other API are to be supported.
5311 m->rssi.low_threshold = -127;
5312 m->rssi.low_threshold_timeout = 60;
5313 m->rssi.high_threshold = -127;
5314 m->rssi.high_threshold_timeout = 0;
5315 m->rssi.sampling_period = 0;
5319 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5320 struct mgmt_adv_pattern *patterns)
5322 u8 offset = 0, length = 0;
5323 struct adv_pattern *p = NULL;
5326 for (i = 0; i < pattern_count; i++) {
5327 offset = patterns[i].offset;
5328 length = patterns[i].length;
5329 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5330 length > HCI_MAX_EXT_AD_LENGTH ||
5331 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5332 return MGMT_STATUS_INVALID_PARAMS;
5334 p = kmalloc(sizeof(*p), GFP_KERNEL);
5336 return MGMT_STATUS_NO_RESOURCES;
5338 p->ad_type = patterns[i].ad_type;
5339 p->offset = patterns[i].offset;
5340 p->length = patterns[i].length;
5341 memcpy(p->value, patterns[i].value, p->length);
5343 INIT_LIST_HEAD(&p->list);
5344 list_add(&p->list, &m->patterns);
5347 return MGMT_STATUS_SUCCESS;
5350 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5351 void *data, u16 len)
5353 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5354 struct adv_monitor *m = NULL;
5355 u8 status = MGMT_STATUS_SUCCESS;
5356 size_t expected_size = sizeof(*cp);
5358 BT_DBG("request for %s", hdev->name);
5360 if (len <= sizeof(*cp)) {
5361 status = MGMT_STATUS_INVALID_PARAMS;
5365 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5366 if (len != expected_size) {
5367 status = MGMT_STATUS_INVALID_PARAMS;
5371 m = kzalloc(sizeof(*m), GFP_KERNEL);
5373 status = MGMT_STATUS_NO_RESOURCES;
5377 INIT_LIST_HEAD(&m->patterns);
5379 parse_adv_monitor_rssi(m, NULL);
5380 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5383 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5384 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5387 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5388 void *data, u16 len)
5390 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5391 struct adv_monitor *m = NULL;
5392 u8 status = MGMT_STATUS_SUCCESS;
5393 size_t expected_size = sizeof(*cp);
5395 BT_DBG("request for %s", hdev->name);
5397 if (len <= sizeof(*cp)) {
5398 status = MGMT_STATUS_INVALID_PARAMS;
5402 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5403 if (len != expected_size) {
5404 status = MGMT_STATUS_INVALID_PARAMS;
5408 m = kzalloc(sizeof(*m), GFP_KERNEL);
5410 status = MGMT_STATUS_NO_RESOURCES;
5414 INIT_LIST_HEAD(&m->patterns);
5416 parse_adv_monitor_rssi(m, &cp->rssi);
5417 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5420 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5421 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5424 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5425 void *data, int status)
5427 struct mgmt_rp_remove_adv_monitor rp;
5428 struct mgmt_pending_cmd *cmd = data;
5429 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5433 rp.monitor_handle = cp->monitor_handle;
5436 hci_update_passive_scan(hdev);
5438 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5439 mgmt_status(status), &rp, sizeof(rp));
5440 mgmt_pending_remove(cmd);
5442 hci_dev_unlock(hdev);
5443 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5444 rp.monitor_handle, status);
5447 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5449 struct mgmt_pending_cmd *cmd = data;
5450 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5451 u16 handle = __le16_to_cpu(cp->monitor_handle);
5454 return hci_remove_all_adv_monitor(hdev);
5456 return hci_remove_single_adv_monitor(hdev, handle);
5459 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5460 void *data, u16 len)
5462 struct mgmt_pending_cmd *cmd;
5467 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5468 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5469 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5470 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5471 status = MGMT_STATUS_BUSY;
5475 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5477 status = MGMT_STATUS_NO_RESOURCES;
5481 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5482 mgmt_remove_adv_monitor_complete);
5485 mgmt_pending_remove(cmd);
5488 status = MGMT_STATUS_NO_RESOURCES;
5490 status = MGMT_STATUS_FAILED;
5495 hci_dev_unlock(hdev);
5500 hci_dev_unlock(hdev);
5501 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5505 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5507 struct mgmt_rp_read_local_oob_data mgmt_rp;
5508 size_t rp_size = sizeof(mgmt_rp);
5509 struct mgmt_pending_cmd *cmd = data;
5510 struct sk_buff *skb = cmd->skb;
5511 u8 status = mgmt_status(err);
5515 status = MGMT_STATUS_FAILED;
5516 else if (IS_ERR(skb))
5517 status = mgmt_status(PTR_ERR(skb));
5519 status = mgmt_status(skb->data[0]);
5522 bt_dev_dbg(hdev, "status %d", status);
5525 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5529 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5531 if (!bredr_sc_enabled(hdev)) {
5532 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5534 if (skb->len < sizeof(*rp)) {
5535 mgmt_cmd_status(cmd->sk, hdev->id,
5536 MGMT_OP_READ_LOCAL_OOB_DATA,
5537 MGMT_STATUS_FAILED);
5541 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5542 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5544 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5546 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5548 if (skb->len < sizeof(*rp)) {
5549 mgmt_cmd_status(cmd->sk, hdev->id,
5550 MGMT_OP_READ_LOCAL_OOB_DATA,
5551 MGMT_STATUS_FAILED);
5555 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5556 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5558 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5559 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5562 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5563 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5566 if (skb && !IS_ERR(skb))
5569 mgmt_pending_free(cmd);
5572 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5574 struct mgmt_pending_cmd *cmd = data;
5576 if (bredr_sc_enabled(hdev))
5577 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5579 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5581 if (IS_ERR(cmd->skb))
5582 return PTR_ERR(cmd->skb);
5587 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5588 void *data, u16 data_len)
5590 struct mgmt_pending_cmd *cmd;
5593 bt_dev_dbg(hdev, "sock %p", sk);
5597 if (!hdev_is_powered(hdev)) {
5598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5599 MGMT_STATUS_NOT_POWERED);
5603 if (!lmp_ssp_capable(hdev)) {
5604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5605 MGMT_STATUS_NOT_SUPPORTED);
5609 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5613 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5614 read_local_oob_data_complete);
5617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5618 MGMT_STATUS_FAILED);
5621 mgmt_pending_free(cmd);
5625 hci_dev_unlock(hdev);
5629 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5630 void *data, u16 len)
5632 struct mgmt_addr_info *addr = data;
5635 bt_dev_dbg(hdev, "sock %p", sk);
5637 if (!bdaddr_type_is_valid(addr->type))
5638 return mgmt_cmd_complete(sk, hdev->id,
5639 MGMT_OP_ADD_REMOTE_OOB_DATA,
5640 MGMT_STATUS_INVALID_PARAMS,
5641 addr, sizeof(*addr));
5645 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5646 struct mgmt_cp_add_remote_oob_data *cp = data;
5649 if (cp->addr.type != BDADDR_BREDR) {
5650 err = mgmt_cmd_complete(sk, hdev->id,
5651 MGMT_OP_ADD_REMOTE_OOB_DATA,
5652 MGMT_STATUS_INVALID_PARAMS,
5653 &cp->addr, sizeof(cp->addr));
5657 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5658 cp->addr.type, cp->hash,
5659 cp->rand, NULL, NULL);
5661 status = MGMT_STATUS_FAILED;
5663 status = MGMT_STATUS_SUCCESS;
5665 err = mgmt_cmd_complete(sk, hdev->id,
5666 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5667 &cp->addr, sizeof(cp->addr));
5668 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5669 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5670 u8 *rand192, *hash192, *rand256, *hash256;
5673 if (bdaddr_type_is_le(cp->addr.type)) {
5674 /* Enforce zero-valued 192-bit parameters as
5675 * long as legacy SMP OOB isn't implemented.
5677 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5678 memcmp(cp->hash192, ZERO_KEY, 16)) {
5679 err = mgmt_cmd_complete(sk, hdev->id,
5680 MGMT_OP_ADD_REMOTE_OOB_DATA,
5681 MGMT_STATUS_INVALID_PARAMS,
5682 addr, sizeof(*addr));
5689 /* In case one of the P-192 values is set to zero,
5690 * then just disable OOB data for P-192.
5692 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5693 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5697 rand192 = cp->rand192;
5698 hash192 = cp->hash192;
5702 /* In case one of the P-256 values is set to zero, then just
5703 * disable OOB data for P-256.
5705 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5706 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5710 rand256 = cp->rand256;
5711 hash256 = cp->hash256;
5714 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5715 cp->addr.type, hash192, rand192,
5718 status = MGMT_STATUS_FAILED;
5720 status = MGMT_STATUS_SUCCESS;
5722 err = mgmt_cmd_complete(sk, hdev->id,
5723 MGMT_OP_ADD_REMOTE_OOB_DATA,
5724 status, &cp->addr, sizeof(cp->addr));
5726 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5728 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5729 MGMT_STATUS_INVALID_PARAMS);
5733 hci_dev_unlock(hdev);
5737 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5738 void *data, u16 len)
5740 struct mgmt_cp_remove_remote_oob_data *cp = data;
5744 bt_dev_dbg(hdev, "sock %p", sk);
5746 if (cp->addr.type != BDADDR_BREDR)
5747 return mgmt_cmd_complete(sk, hdev->id,
5748 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5749 MGMT_STATUS_INVALID_PARAMS,
5750 &cp->addr, sizeof(cp->addr));
5754 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5755 hci_remote_oob_data_clear(hdev);
5756 status = MGMT_STATUS_SUCCESS;
5760 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5762 status = MGMT_STATUS_INVALID_PARAMS;
5764 status = MGMT_STATUS_SUCCESS;
5767 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5768 status, &cp->addr, sizeof(cp->addr));
5770 hci_dev_unlock(hdev);
5774 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5776 struct mgmt_pending_cmd *cmd;
5778 bt_dev_dbg(hdev, "status %u", status);
5782 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5784 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5787 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5790 cmd->cmd_complete(cmd, mgmt_status(status));
5791 mgmt_pending_remove(cmd);
5794 hci_dev_unlock(hdev);
5797 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5798 uint8_t *mgmt_status)
5801 case DISCOV_TYPE_LE:
5802 *mgmt_status = mgmt_le_support(hdev);
5806 case DISCOV_TYPE_INTERLEAVED:
5807 *mgmt_status = mgmt_le_support(hdev);
5811 case DISCOV_TYPE_BREDR:
5812 *mgmt_status = mgmt_bredr_support(hdev);
5817 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5824 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5826 struct mgmt_pending_cmd *cmd = data;
5828 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5829 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5830 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5833 bt_dev_dbg(hdev, "err %d", err);
5835 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5837 mgmt_pending_remove(cmd);
5839 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5843 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5845 return hci_start_discovery_sync(hdev);
5848 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5849 u16 op, void *data, u16 len)
5851 struct mgmt_cp_start_discovery *cp = data;
5852 struct mgmt_pending_cmd *cmd;
5856 bt_dev_dbg(hdev, "sock %p", sk);
5860 if (!hdev_is_powered(hdev)) {
5861 err = mgmt_cmd_complete(sk, hdev->id, op,
5862 MGMT_STATUS_NOT_POWERED,
5863 &cp->type, sizeof(cp->type));
5867 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5868 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5869 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5870 &cp->type, sizeof(cp->type));
5874 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5875 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5876 &cp->type, sizeof(cp->type));
5880 /* Can't start discovery when it is paused */
5881 if (hdev->discovery_paused) {
5882 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5883 &cp->type, sizeof(cp->type));
5887 /* Clear the discovery filter first to free any previously
5888 * allocated memory for the UUID list.
5890 hci_discovery_filter_clear(hdev);
5892 hdev->discovery.type = cp->type;
5893 hdev->discovery.report_invalid_rssi = false;
5894 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5895 hdev->discovery.limited = true;
5897 hdev->discovery.limited = false;
5899 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5905 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5906 start_discovery_complete);
5908 mgmt_pending_remove(cmd);
5912 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5915 hci_dev_unlock(hdev);
5919 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5920 void *data, u16 len)
5922 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5926 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5927 void *data, u16 len)
5929 return start_discovery_internal(sk, hdev,
5930 MGMT_OP_START_LIMITED_DISCOVERY,
5934 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5935 void *data, u16 len)
5937 struct mgmt_cp_start_service_discovery *cp = data;
5938 struct mgmt_pending_cmd *cmd;
5939 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5940 u16 uuid_count, expected_len;
5944 bt_dev_dbg(hdev, "sock %p", sk);
5948 if (!hdev_is_powered(hdev)) {
5949 err = mgmt_cmd_complete(sk, hdev->id,
5950 MGMT_OP_START_SERVICE_DISCOVERY,
5951 MGMT_STATUS_NOT_POWERED,
5952 &cp->type, sizeof(cp->type));
5956 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5957 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5958 err = mgmt_cmd_complete(sk, hdev->id,
5959 MGMT_OP_START_SERVICE_DISCOVERY,
5960 MGMT_STATUS_BUSY, &cp->type,
5965 if (hdev->discovery_paused) {
5966 err = mgmt_cmd_complete(sk, hdev->id,
5967 MGMT_OP_START_SERVICE_DISCOVERY,
5968 MGMT_STATUS_BUSY, &cp->type,
5973 uuid_count = __le16_to_cpu(cp->uuid_count);
5974 if (uuid_count > max_uuid_count) {
5975 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5977 err = mgmt_cmd_complete(sk, hdev->id,
5978 MGMT_OP_START_SERVICE_DISCOVERY,
5979 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5984 expected_len = sizeof(*cp) + uuid_count * 16;
5985 if (expected_len != len) {
5986 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5988 err = mgmt_cmd_complete(sk, hdev->id,
5989 MGMT_OP_START_SERVICE_DISCOVERY,
5990 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5995 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5996 err = mgmt_cmd_complete(sk, hdev->id,
5997 MGMT_OP_START_SERVICE_DISCOVERY,
5998 status, &cp->type, sizeof(cp->type));
6002 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6009 /* Clear the discovery filter first to free any previously
6010 * allocated memory for the UUID list.
6012 hci_discovery_filter_clear(hdev);
6014 hdev->discovery.result_filtering = true;
6015 hdev->discovery.type = cp->type;
6016 hdev->discovery.rssi = cp->rssi;
6017 hdev->discovery.uuid_count = uuid_count;
6019 if (uuid_count > 0) {
6020 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6022 if (!hdev->discovery.uuids) {
6023 err = mgmt_cmd_complete(sk, hdev->id,
6024 MGMT_OP_START_SERVICE_DISCOVERY,
6026 &cp->type, sizeof(cp->type));
6027 mgmt_pending_remove(cmd);
6032 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6033 start_discovery_complete);
6035 mgmt_pending_remove(cmd);
6039 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6042 hci_dev_unlock(hdev);
6046 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6048 struct mgmt_pending_cmd *cmd;
6050 bt_dev_dbg(hdev, "status %u", status);
6054 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6056 cmd->cmd_complete(cmd, mgmt_status(status));
6057 mgmt_pending_remove(cmd);
6060 hci_dev_unlock(hdev);
6063 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6065 struct mgmt_pending_cmd *cmd = data;
6067 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6070 bt_dev_dbg(hdev, "err %d", err);
6072 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6074 mgmt_pending_remove(cmd);
6077 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6080 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6082 return hci_stop_discovery_sync(hdev);
6085 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6088 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6089 struct mgmt_pending_cmd *cmd;
6092 bt_dev_dbg(hdev, "sock %p", sk);
6096 if (!hci_discovery_active(hdev)) {
6097 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6098 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6099 sizeof(mgmt_cp->type));
6103 if (hdev->discovery.type != mgmt_cp->type) {
6104 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6105 MGMT_STATUS_INVALID_PARAMS,
6106 &mgmt_cp->type, sizeof(mgmt_cp->type));
6110 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6116 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6117 stop_discovery_complete);
6119 mgmt_pending_remove(cmd);
6123 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6126 hci_dev_unlock(hdev);
6130 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6133 struct mgmt_cp_confirm_name *cp = data;
6134 struct inquiry_entry *e;
6137 bt_dev_dbg(hdev, "sock %p", sk);
6141 if (!hci_discovery_active(hdev)) {
6142 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6143 MGMT_STATUS_FAILED, &cp->addr,
6148 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6150 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6151 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6156 if (cp->name_known) {
6157 e->name_state = NAME_KNOWN;
6160 e->name_state = NAME_NEEDED;
6161 hci_inquiry_cache_update_resolve(hdev, e);
6164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6165 &cp->addr, sizeof(cp->addr));
6168 hci_dev_unlock(hdev);
6172 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6175 struct mgmt_cp_block_device *cp = data;
6179 bt_dev_dbg(hdev, "sock %p", sk);
6181 if (!bdaddr_type_is_valid(cp->addr.type))
6182 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6183 MGMT_STATUS_INVALID_PARAMS,
6184 &cp->addr, sizeof(cp->addr));
6188 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6191 status = MGMT_STATUS_FAILED;
6195 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6197 status = MGMT_STATUS_SUCCESS;
6200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6201 &cp->addr, sizeof(cp->addr));
6203 hci_dev_unlock(hdev);
6208 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6211 struct mgmt_cp_unblock_device *cp = data;
6215 bt_dev_dbg(hdev, "sock %p", sk);
6217 if (!bdaddr_type_is_valid(cp->addr.type))
6218 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6219 MGMT_STATUS_INVALID_PARAMS,
6220 &cp->addr, sizeof(cp->addr));
6224 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6227 status = MGMT_STATUS_INVALID_PARAMS;
6231 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6233 status = MGMT_STATUS_SUCCESS;
6236 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6237 &cp->addr, sizeof(cp->addr));
6239 hci_dev_unlock(hdev);
6244 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6246 return hci_update_eir_sync(hdev);
6249 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6252 struct mgmt_cp_set_device_id *cp = data;
6256 bt_dev_dbg(hdev, "sock %p", sk);
6258 source = __le16_to_cpu(cp->source);
6260 if (source > 0x0002)
6261 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6262 MGMT_STATUS_INVALID_PARAMS);
6266 hdev->devid_source = source;
6267 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6268 hdev->devid_product = __le16_to_cpu(cp->product);
6269 hdev->devid_version = __le16_to_cpu(cp->version);
6271 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6274 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6276 hci_dev_unlock(hdev);
6281 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6284 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6286 bt_dev_dbg(hdev, "status %d", err);
6289 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6291 struct cmd_lookup match = { NULL, hdev };
6293 struct adv_info *adv_instance;
6294 u8 status = mgmt_status(err);
6297 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6298 cmd_status_rsp, &status);
6302 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6303 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6305 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6307 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6310 new_settings(hdev, match.sk);
6315 /* If "Set Advertising" was just disabled and instance advertising was
6316 * set up earlier, then re-enable multi-instance advertising.
6318 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6319 list_empty(&hdev->adv_instances))
6322 instance = hdev->cur_adv_instance;
6324 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6325 struct adv_info, list);
6329 instance = adv_instance->instance;
6332 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6334 enable_advertising_instance(hdev, err);
6337 static int set_adv_sync(struct hci_dev *hdev, void *data)
6339 struct mgmt_pending_cmd *cmd = data;
6340 struct mgmt_mode *cp = cmd->param;
6343 if (cp->val == 0x02)
6344 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6346 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6348 cancel_adv_timeout(hdev);
6351 /* Switch to instance "0" for the Set Advertising setting.
6352 * We cannot use update_[adv|scan_rsp]_data() here as the
6353 * HCI_ADVERTISING flag is not yet set.
6355 hdev->cur_adv_instance = 0x00;
6357 if (ext_adv_capable(hdev)) {
6358 hci_start_ext_adv_sync(hdev, 0x00);
6360 hci_update_adv_data_sync(hdev, 0x00);
6361 hci_update_scan_rsp_data_sync(hdev, 0x00);
6362 hci_enable_advertising_sync(hdev);
6365 hci_disable_advertising_sync(hdev);
6371 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6374 struct mgmt_mode *cp = data;
6375 struct mgmt_pending_cmd *cmd;
6379 bt_dev_dbg(hdev, "sock %p", sk);
6381 status = mgmt_le_support(hdev);
6383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6386 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6388 MGMT_STATUS_INVALID_PARAMS);
6390 if (hdev->advertising_paused)
6391 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6398 /* The following conditions are ones which mean that we should
6399 * not do any HCI communication but directly send a mgmt
6400 * response to user space (after toggling the flag if
6403 if (!hdev_is_powered(hdev) ||
6404 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6405 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6406 hci_dev_test_flag(hdev, HCI_MESH) ||
6407 hci_conn_num(hdev, LE_LINK) > 0 ||
6408 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6409 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6413 hdev->cur_adv_instance = 0x00;
6414 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6415 if (cp->val == 0x02)
6416 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6418 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6420 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6421 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6424 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6429 err = new_settings(hdev, sk);
6434 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6435 pending_find(MGMT_OP_SET_LE, hdev)) {
6436 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6441 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6445 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6446 set_advertising_complete);
6449 mgmt_pending_remove(cmd);
6452 hci_dev_unlock(hdev);
6456 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6457 void *data, u16 len)
6459 struct mgmt_cp_set_static_address *cp = data;
6462 bt_dev_dbg(hdev, "sock %p", sk);
6464 if (!lmp_le_capable(hdev))
6465 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6466 MGMT_STATUS_NOT_SUPPORTED);
6468 if (hdev_is_powered(hdev))
6469 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6470 MGMT_STATUS_REJECTED);
6472 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6473 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6474 return mgmt_cmd_status(sk, hdev->id,
6475 MGMT_OP_SET_STATIC_ADDRESS,
6476 MGMT_STATUS_INVALID_PARAMS);
6478 /* Two most significant bits shall be set */
6479 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6480 return mgmt_cmd_status(sk, hdev->id,
6481 MGMT_OP_SET_STATIC_ADDRESS,
6482 MGMT_STATUS_INVALID_PARAMS);
6487 bacpy(&hdev->static_addr, &cp->bdaddr);
6489 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6493 err = new_settings(hdev, sk);
6496 hci_dev_unlock(hdev);
6500 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6501 void *data, u16 len)
6503 struct mgmt_cp_set_scan_params *cp = data;
6504 __u16 interval, window;
6507 bt_dev_dbg(hdev, "sock %p", sk);
6509 if (!lmp_le_capable(hdev))
6510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6511 MGMT_STATUS_NOT_SUPPORTED);
6513 interval = __le16_to_cpu(cp->interval);
6515 if (interval < 0x0004 || interval > 0x4000)
6516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6517 MGMT_STATUS_INVALID_PARAMS);
6519 window = __le16_to_cpu(cp->window);
6521 if (window < 0x0004 || window > 0x4000)
6522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6523 MGMT_STATUS_INVALID_PARAMS);
6525 if (window > interval)
6526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6527 MGMT_STATUS_INVALID_PARAMS);
6531 hdev->le_scan_interval = interval;
6532 hdev->le_scan_window = window;
6534 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6537 /* If background scan is running, restart it so new parameters are
6540 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6541 hdev->discovery.state == DISCOVERY_STOPPED)
6542 hci_update_passive_scan(hdev);
6544 hci_dev_unlock(hdev);
6549 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6551 struct mgmt_pending_cmd *cmd = data;
6553 bt_dev_dbg(hdev, "err %d", err);
6556 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6559 struct mgmt_mode *cp = cmd->param;
6562 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6564 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6566 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6567 new_settings(hdev, cmd->sk);
6570 mgmt_pending_free(cmd);
6573 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6575 struct mgmt_pending_cmd *cmd = data;
6576 struct mgmt_mode *cp = cmd->param;
6578 return hci_write_fast_connectable_sync(hdev, cp->val);
6581 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6582 void *data, u16 len)
6584 struct mgmt_mode *cp = data;
6585 struct mgmt_pending_cmd *cmd;
6588 bt_dev_dbg(hdev, "sock %p", sk);
6590 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6591 hdev->hci_ver < BLUETOOTH_VER_1_2)
6592 return mgmt_cmd_status(sk, hdev->id,
6593 MGMT_OP_SET_FAST_CONNECTABLE,
6594 MGMT_STATUS_NOT_SUPPORTED);
6596 if (cp->val != 0x00 && cp->val != 0x01)
6597 return mgmt_cmd_status(sk, hdev->id,
6598 MGMT_OP_SET_FAST_CONNECTABLE,
6599 MGMT_STATUS_INVALID_PARAMS);
6603 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6604 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6608 if (!hdev_is_powered(hdev)) {
6609 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6610 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6611 new_settings(hdev, sk);
6615 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6620 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6621 fast_connectable_complete);
6624 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6625 MGMT_STATUS_FAILED);
6628 mgmt_pending_free(cmd);
6632 hci_dev_unlock(hdev);
6637 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6639 struct mgmt_pending_cmd *cmd = data;
6641 bt_dev_dbg(hdev, "err %d", err);
6644 u8 mgmt_err = mgmt_status(err);
6646 /* We need to restore the flag if related HCI commands
6649 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6651 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6653 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6654 new_settings(hdev, cmd->sk);
6657 mgmt_pending_free(cmd);
6660 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6664 status = hci_write_fast_connectable_sync(hdev, false);
6667 status = hci_update_scan_sync(hdev);
6669 /* Since only the advertising data flags will change, there
6670 * is no need to update the scan response data.
6673 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6678 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6680 struct mgmt_mode *cp = data;
6681 struct mgmt_pending_cmd *cmd;
6684 bt_dev_dbg(hdev, "sock %p", sk);
6686 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6687 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6688 MGMT_STATUS_NOT_SUPPORTED);
6690 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6691 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6692 MGMT_STATUS_REJECTED);
6694 if (cp->val != 0x00 && cp->val != 0x01)
6695 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6696 MGMT_STATUS_INVALID_PARAMS);
6700 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6701 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6705 if (!hdev_is_powered(hdev)) {
6707 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6708 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6709 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6710 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6713 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6715 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6719 err = new_settings(hdev, sk);
6723 /* Reject disabling when powered on */
6725 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6726 MGMT_STATUS_REJECTED);
6729 /* When configuring a dual-mode controller to operate
6730 * with LE only and using a static address, then switching
6731 * BR/EDR back on is not allowed.
6733 * Dual-mode controllers shall operate with the public
6734 * address as its identity address for BR/EDR and LE. So
6735 * reject the attempt to create an invalid configuration.
6737 * The same restrictions applies when secure connections
6738 * has been enabled. For BR/EDR this is a controller feature
6739 * while for LE it is a host stack feature. This means that
6740 * switching BR/EDR back on when secure connections has been
6741 * enabled is not a supported transaction.
6743 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6744 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6745 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6747 MGMT_STATUS_REJECTED);
6752 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6756 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6757 set_bredr_complete);
6760 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6761 MGMT_STATUS_FAILED);
6763 mgmt_pending_free(cmd);
6768 /* We need to flip the bit already here so that
6769 * hci_req_update_adv_data generates the correct flags.
6771 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6774 hci_dev_unlock(hdev);
6778 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6780 struct mgmt_pending_cmd *cmd = data;
6781 struct mgmt_mode *cp;
6783 bt_dev_dbg(hdev, "err %d", err);
6786 u8 mgmt_err = mgmt_status(err);
6788 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6796 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6797 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6800 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6801 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6804 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6805 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6809 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6810 new_settings(hdev, cmd->sk);
6813 mgmt_pending_free(cmd);
6816 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6818 struct mgmt_pending_cmd *cmd = data;
6819 struct mgmt_mode *cp = cmd->param;
6822 /* Force write of val */
6823 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6825 return hci_write_sc_support_sync(hdev, val);
6828 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6829 void *data, u16 len)
6831 struct mgmt_mode *cp = data;
6832 struct mgmt_pending_cmd *cmd;
6836 bt_dev_dbg(hdev, "sock %p", sk);
6838 if (!lmp_sc_capable(hdev) &&
6839 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6841 MGMT_STATUS_NOT_SUPPORTED);
6843 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6844 lmp_sc_capable(hdev) &&
6845 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6847 MGMT_STATUS_REJECTED);
6849 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6851 MGMT_STATUS_INVALID_PARAMS);
6855 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6856 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6860 changed = !hci_dev_test_and_set_flag(hdev,
6862 if (cp->val == 0x02)
6863 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6865 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6867 changed = hci_dev_test_and_clear_flag(hdev,
6869 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6872 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6877 err = new_settings(hdev, sk);
6884 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6885 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6886 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6890 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6894 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6895 set_secure_conn_complete);
6898 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6899 MGMT_STATUS_FAILED);
6901 mgmt_pending_free(cmd);
6905 hci_dev_unlock(hdev);
6909 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6910 void *data, u16 len)
6912 struct mgmt_mode *cp = data;
6913 bool changed, use_changed;
6916 bt_dev_dbg(hdev, "sock %p", sk);
6918 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6919 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6920 MGMT_STATUS_INVALID_PARAMS);
6925 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6927 changed = hci_dev_test_and_clear_flag(hdev,
6928 HCI_KEEP_DEBUG_KEYS);
6930 if (cp->val == 0x02)
6931 use_changed = !hci_dev_test_and_set_flag(hdev,
6932 HCI_USE_DEBUG_KEYS);
6934 use_changed = hci_dev_test_and_clear_flag(hdev,
6935 HCI_USE_DEBUG_KEYS);
6937 if (hdev_is_powered(hdev) && use_changed &&
6938 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6939 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6940 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6941 sizeof(mode), &mode);
6944 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6949 err = new_settings(hdev, sk);
6952 hci_dev_unlock(hdev);
6956 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6959 struct mgmt_cp_set_privacy *cp = cp_data;
6963 bt_dev_dbg(hdev, "sock %p", sk);
6965 if (!lmp_le_capable(hdev))
6966 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6967 MGMT_STATUS_NOT_SUPPORTED);
6969 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6970 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6971 MGMT_STATUS_INVALID_PARAMS);
6973 if (hdev_is_powered(hdev))
6974 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6975 MGMT_STATUS_REJECTED);
6979 /* If user space supports this command it is also expected to
6980 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6982 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6985 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6986 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6987 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6988 hci_adv_instances_set_rpa_expired(hdev, true);
6989 if (cp->privacy == 0x02)
6990 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6992 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6994 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6995 memset(hdev->irk, 0, sizeof(hdev->irk));
6996 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6997 hci_adv_instances_set_rpa_expired(hdev, false);
6998 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7001 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7006 err = new_settings(hdev, sk);
7009 hci_dev_unlock(hdev);
7013 static bool irk_is_valid(struct mgmt_irk_info *irk)
7015 switch (irk->addr.type) {
7016 case BDADDR_LE_PUBLIC:
7019 case BDADDR_LE_RANDOM:
7020 /* Two most significant bits shall be set */
7021 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7029 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7032 struct mgmt_cp_load_irks *cp = cp_data;
7033 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7034 sizeof(struct mgmt_irk_info));
7035 u16 irk_count, expected_len;
7038 bt_dev_dbg(hdev, "sock %p", sk);
7040 if (!lmp_le_capable(hdev))
7041 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7042 MGMT_STATUS_NOT_SUPPORTED);
7044 irk_count = __le16_to_cpu(cp->irk_count);
7045 if (irk_count > max_irk_count) {
7046 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7048 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7049 MGMT_STATUS_INVALID_PARAMS);
7052 expected_len = struct_size(cp, irks, irk_count);
7053 if (expected_len != len) {
7054 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7056 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7057 MGMT_STATUS_INVALID_PARAMS);
7060 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7062 for (i = 0; i < irk_count; i++) {
7063 struct mgmt_irk_info *key = &cp->irks[i];
7065 if (!irk_is_valid(key))
7066 return mgmt_cmd_status(sk, hdev->id,
7068 MGMT_STATUS_INVALID_PARAMS);
7073 hci_smp_irks_clear(hdev);
7075 for (i = 0; i < irk_count; i++) {
7076 struct mgmt_irk_info *irk = &cp->irks[i];
7077 u8 addr_type = le_addr_type(irk->addr.type);
7079 if (hci_is_blocked_key(hdev,
7080 HCI_BLOCKED_KEY_TYPE_IRK,
7082 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7087 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7088 if (irk->addr.type == BDADDR_BREDR)
7089 addr_type = BDADDR_BREDR;
7091 hci_add_irk(hdev, &irk->addr.bdaddr,
7092 addr_type, irk->val,
7096 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7098 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7100 hci_dev_unlock(hdev);
7105 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7107 if (key->initiator != 0x00 && key->initiator != 0x01)
7110 switch (key->addr.type) {
7111 case BDADDR_LE_PUBLIC:
7114 case BDADDR_LE_RANDOM:
7115 /* Two most significant bits shall be set */
7116 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7124 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7125 void *cp_data, u16 len)
7127 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7128 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7129 sizeof(struct mgmt_ltk_info));
7130 u16 key_count, expected_len;
7133 bt_dev_dbg(hdev, "sock %p", sk);
7135 if (!lmp_le_capable(hdev))
7136 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7137 MGMT_STATUS_NOT_SUPPORTED);
7139 key_count = __le16_to_cpu(cp->key_count);
7140 if (key_count > max_key_count) {
7141 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7143 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7144 MGMT_STATUS_INVALID_PARAMS);
7147 expected_len = struct_size(cp, keys, key_count);
7148 if (expected_len != len) {
7149 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7151 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7152 MGMT_STATUS_INVALID_PARAMS);
7155 bt_dev_dbg(hdev, "key_count %u", key_count);
7157 for (i = 0; i < key_count; i++) {
7158 struct mgmt_ltk_info *key = &cp->keys[i];
7160 if (!ltk_is_valid(key))
7161 return mgmt_cmd_status(sk, hdev->id,
7162 MGMT_OP_LOAD_LONG_TERM_KEYS,
7163 MGMT_STATUS_INVALID_PARAMS);
7168 hci_smp_ltks_clear(hdev);
7170 for (i = 0; i < key_count; i++) {
7171 struct mgmt_ltk_info *key = &cp->keys[i];
7172 u8 type, authenticated;
7173 u8 addr_type = le_addr_type(key->addr.type);
7175 if (hci_is_blocked_key(hdev,
7176 HCI_BLOCKED_KEY_TYPE_LTK,
7178 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7183 switch (key->type) {
7184 case MGMT_LTK_UNAUTHENTICATED:
7185 authenticated = 0x00;
7186 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7188 case MGMT_LTK_AUTHENTICATED:
7189 authenticated = 0x01;
7190 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7192 case MGMT_LTK_P256_UNAUTH:
7193 authenticated = 0x00;
7194 type = SMP_LTK_P256;
7196 case MGMT_LTK_P256_AUTH:
7197 authenticated = 0x01;
7198 type = SMP_LTK_P256;
7200 case MGMT_LTK_P256_DEBUG:
7201 authenticated = 0x00;
7202 type = SMP_LTK_P256_DEBUG;
7208 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7209 if (key->addr.type == BDADDR_BREDR)
7210 addr_type = BDADDR_BREDR;
7212 hci_add_ltk(hdev, &key->addr.bdaddr,
7213 addr_type, type, authenticated,
7214 key->val, key->enc_size, key->ediv, key->rand);
7217 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7220 hci_dev_unlock(hdev);
7225 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7227 struct mgmt_pending_cmd *cmd = data;
7228 struct hci_conn *conn = cmd->user_data;
7229 struct mgmt_cp_get_conn_info *cp = cmd->param;
7230 struct mgmt_rp_get_conn_info rp;
7233 bt_dev_dbg(hdev, "err %d", err);
7235 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7237 status = mgmt_status(err);
7238 if (status == MGMT_STATUS_SUCCESS) {
7239 rp.rssi = conn->rssi;
7240 rp.tx_power = conn->tx_power;
7241 rp.max_tx_power = conn->max_tx_power;
7243 rp.rssi = HCI_RSSI_INVALID;
7244 rp.tx_power = HCI_TX_POWER_INVALID;
7245 rp.max_tx_power = HCI_TX_POWER_INVALID;
7248 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7251 mgmt_pending_free(cmd);
7254 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7256 struct mgmt_pending_cmd *cmd = data;
7257 struct mgmt_cp_get_conn_info *cp = cmd->param;
7258 struct hci_conn *conn;
7262 /* Make sure we are still connected */
7263 if (cp->addr.type == BDADDR_BREDR)
7264 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7267 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7269 if (!conn || conn->state != BT_CONNECTED)
7270 return MGMT_STATUS_NOT_CONNECTED;
7272 cmd->user_data = conn;
7273 handle = cpu_to_le16(conn->handle);
7275 /* Refresh RSSI each time */
7276 err = hci_read_rssi_sync(hdev, handle);
7278 /* For LE links TX power does not change thus we don't need to
7279 * query for it once value is known.
7281 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7282 conn->tx_power == HCI_TX_POWER_INVALID))
7283 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7285 /* Max TX power needs to be read only once per connection */
7286 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7287 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7292 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7295 struct mgmt_cp_get_conn_info *cp = data;
7296 struct mgmt_rp_get_conn_info rp;
7297 struct hci_conn *conn;
7298 unsigned long conn_info_age;
7301 bt_dev_dbg(hdev, "sock %p", sk);
7303 memset(&rp, 0, sizeof(rp));
7304 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7305 rp.addr.type = cp->addr.type;
7307 if (!bdaddr_type_is_valid(cp->addr.type))
7308 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7309 MGMT_STATUS_INVALID_PARAMS,
7314 if (!hdev_is_powered(hdev)) {
7315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7316 MGMT_STATUS_NOT_POWERED, &rp,
7321 if (cp->addr.type == BDADDR_BREDR)
7322 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7325 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7327 if (!conn || conn->state != BT_CONNECTED) {
7328 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7329 MGMT_STATUS_NOT_CONNECTED, &rp,
7334 /* To avoid client trying to guess when to poll again for information we
7335 * calculate conn info age as random value between min/max set in hdev.
7337 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7338 hdev->conn_info_max_age - 1);
7340 /* Query controller to refresh cached values if they are too old or were
7343 if (time_after(jiffies, conn->conn_info_timestamp +
7344 msecs_to_jiffies(conn_info_age)) ||
7345 !conn->conn_info_timestamp) {
7346 struct mgmt_pending_cmd *cmd;
7348 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7353 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7354 cmd, get_conn_info_complete);
7358 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7359 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7362 mgmt_pending_free(cmd);
7367 conn->conn_info_timestamp = jiffies;
7369 /* Cache is valid, just reply with values cached in hci_conn */
7370 rp.rssi = conn->rssi;
7371 rp.tx_power = conn->tx_power;
7372 rp.max_tx_power = conn->max_tx_power;
7374 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7375 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7379 hci_dev_unlock(hdev);
7383 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7385 struct mgmt_pending_cmd *cmd = data;
7386 struct mgmt_cp_get_clock_info *cp = cmd->param;
7387 struct mgmt_rp_get_clock_info rp;
7388 struct hci_conn *conn = cmd->user_data;
7389 u8 status = mgmt_status(err);
7391 bt_dev_dbg(hdev, "err %d", err);
7393 memset(&rp, 0, sizeof(rp));
7394 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7395 rp.addr.type = cp->addr.type;
7400 rp.local_clock = cpu_to_le32(hdev->clock);
7403 rp.piconet_clock = cpu_to_le32(conn->clock);
7404 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7408 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7411 mgmt_pending_free(cmd);
7414 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7416 struct mgmt_pending_cmd *cmd = data;
7417 struct mgmt_cp_get_clock_info *cp = cmd->param;
7418 struct hci_cp_read_clock hci_cp;
7419 struct hci_conn *conn;
7421 memset(&hci_cp, 0, sizeof(hci_cp));
7422 hci_read_clock_sync(hdev, &hci_cp);
7424 /* Make sure connection still exists */
7425 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7426 if (!conn || conn->state != BT_CONNECTED)
7427 return MGMT_STATUS_NOT_CONNECTED;
7429 cmd->user_data = conn;
7430 hci_cp.handle = cpu_to_le16(conn->handle);
7431 hci_cp.which = 0x01; /* Piconet clock */
7433 return hci_read_clock_sync(hdev, &hci_cp);
7436 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7439 struct mgmt_cp_get_clock_info *cp = data;
7440 struct mgmt_rp_get_clock_info rp;
7441 struct mgmt_pending_cmd *cmd;
7442 struct hci_conn *conn;
7445 bt_dev_dbg(hdev, "sock %p", sk);
7447 memset(&rp, 0, sizeof(rp));
7448 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7449 rp.addr.type = cp->addr.type;
7451 if (cp->addr.type != BDADDR_BREDR)
7452 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7453 MGMT_STATUS_INVALID_PARAMS,
7458 if (!hdev_is_powered(hdev)) {
7459 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7460 MGMT_STATUS_NOT_POWERED, &rp,
7465 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7466 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7468 if (!conn || conn->state != BT_CONNECTED) {
7469 err = mgmt_cmd_complete(sk, hdev->id,
7470 MGMT_OP_GET_CLOCK_INFO,
7471 MGMT_STATUS_NOT_CONNECTED,
7479 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7483 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7484 get_clock_info_complete);
7487 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7488 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7491 mgmt_pending_free(cmd);
7496 hci_dev_unlock(hdev);
7500 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7502 struct hci_conn *conn;
7504 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7508 if (conn->dst_type != type)
7511 if (conn->state != BT_CONNECTED)
7517 /* This function requires the caller holds hdev->lock */
7518 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7519 u8 addr_type, u8 auto_connect)
7521 struct hci_conn_params *params;
7523 params = hci_conn_params_add(hdev, addr, addr_type);
7527 if (params->auto_connect == auto_connect)
7530 hci_pend_le_list_del_init(params);
7532 switch (auto_connect) {
7533 case HCI_AUTO_CONN_DISABLED:
7534 case HCI_AUTO_CONN_LINK_LOSS:
7535 /* If auto connect is being disabled when we're trying to
7536 * connect to device, keep connecting.
7538 if (params->explicit_connect)
7539 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7541 case HCI_AUTO_CONN_REPORT:
7542 if (params->explicit_connect)
7543 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7545 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7547 case HCI_AUTO_CONN_DIRECT:
7548 case HCI_AUTO_CONN_ALWAYS:
7549 if (!is_connected(hdev, addr, addr_type))
7550 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7554 params->auto_connect = auto_connect;
7556 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7557 addr, addr_type, auto_connect);
7562 static void device_added(struct sock *sk, struct hci_dev *hdev,
7563 bdaddr_t *bdaddr, u8 type, u8 action)
7565 struct mgmt_ev_device_added ev;
7567 bacpy(&ev.addr.bdaddr, bdaddr);
7568 ev.addr.type = type;
7571 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7574 static int add_device_sync(struct hci_dev *hdev, void *data)
7576 return hci_update_passive_scan_sync(hdev);
7579 static int add_device(struct sock *sk, struct hci_dev *hdev,
7580 void *data, u16 len)
7582 struct mgmt_cp_add_device *cp = data;
7583 u8 auto_conn, addr_type;
7584 struct hci_conn_params *params;
7586 u32 current_flags = 0;
7587 u32 supported_flags;
7589 bt_dev_dbg(hdev, "sock %p", sk);
7591 if (!bdaddr_type_is_valid(cp->addr.type) ||
7592 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7593 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7594 MGMT_STATUS_INVALID_PARAMS,
7595 &cp->addr, sizeof(cp->addr));
7597 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7598 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7599 MGMT_STATUS_INVALID_PARAMS,
7600 &cp->addr, sizeof(cp->addr));
7604 if (cp->addr.type == BDADDR_BREDR) {
7605 /* Only incoming connections action is supported for now */
7606 if (cp->action != 0x01) {
7607 err = mgmt_cmd_complete(sk, hdev->id,
7609 MGMT_STATUS_INVALID_PARAMS,
7610 &cp->addr, sizeof(cp->addr));
7614 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7620 hci_update_scan(hdev);
7625 addr_type = le_addr_type(cp->addr.type);
7627 if (cp->action == 0x02)
7628 auto_conn = HCI_AUTO_CONN_ALWAYS;
7629 else if (cp->action == 0x01)
7630 auto_conn = HCI_AUTO_CONN_DIRECT;
7632 auto_conn = HCI_AUTO_CONN_REPORT;
7634 /* Kernel internally uses conn_params with resolvable private
7635 * address, but Add Device allows only identity addresses.
7636 * Make sure it is enforced before calling
7637 * hci_conn_params_lookup.
7639 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7640 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7641 MGMT_STATUS_INVALID_PARAMS,
7642 &cp->addr, sizeof(cp->addr));
7646 /* If the connection parameters don't exist for this device,
7647 * they will be created and configured with defaults.
7649 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7651 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7652 MGMT_STATUS_FAILED, &cp->addr,
7656 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7659 current_flags = params->flags;
7662 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7667 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7668 supported_flags = hdev->conn_flags;
7669 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7670 supported_flags, current_flags);
7672 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7673 MGMT_STATUS_SUCCESS, &cp->addr,
7677 hci_dev_unlock(hdev);
7681 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7682 bdaddr_t *bdaddr, u8 type)
7684 struct mgmt_ev_device_removed ev;
7686 bacpy(&ev.addr.bdaddr, bdaddr);
7687 ev.addr.type = type;
7689 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7692 static int remove_device_sync(struct hci_dev *hdev, void *data)
7694 return hci_update_passive_scan_sync(hdev);
7697 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7698 void *data, u16 len)
7700 struct mgmt_cp_remove_device *cp = data;
7703 bt_dev_dbg(hdev, "sock %p", sk);
7707 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7708 struct hci_conn_params *params;
7711 if (!bdaddr_type_is_valid(cp->addr.type)) {
7712 err = mgmt_cmd_complete(sk, hdev->id,
7713 MGMT_OP_REMOVE_DEVICE,
7714 MGMT_STATUS_INVALID_PARAMS,
7715 &cp->addr, sizeof(cp->addr));
7719 if (cp->addr.type == BDADDR_BREDR) {
7720 err = hci_bdaddr_list_del(&hdev->accept_list,
7724 err = mgmt_cmd_complete(sk, hdev->id,
7725 MGMT_OP_REMOVE_DEVICE,
7726 MGMT_STATUS_INVALID_PARAMS,
7732 hci_update_scan(hdev);
7734 device_removed(sk, hdev, &cp->addr.bdaddr,
7739 addr_type = le_addr_type(cp->addr.type);
7741 /* Kernel internally uses conn_params with resolvable private
7742 * address, but Remove Device allows only identity addresses.
7743 * Make sure it is enforced before calling
7744 * hci_conn_params_lookup.
7746 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7747 err = mgmt_cmd_complete(sk, hdev->id,
7748 MGMT_OP_REMOVE_DEVICE,
7749 MGMT_STATUS_INVALID_PARAMS,
7750 &cp->addr, sizeof(cp->addr));
7754 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7757 err = mgmt_cmd_complete(sk, hdev->id,
7758 MGMT_OP_REMOVE_DEVICE,
7759 MGMT_STATUS_INVALID_PARAMS,
7760 &cp->addr, sizeof(cp->addr));
7764 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7765 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7766 err = mgmt_cmd_complete(sk, hdev->id,
7767 MGMT_OP_REMOVE_DEVICE,
7768 MGMT_STATUS_INVALID_PARAMS,
7769 &cp->addr, sizeof(cp->addr));
7773 hci_conn_params_free(params);
7775 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7777 struct hci_conn_params *p, *tmp;
7778 struct bdaddr_list *b, *btmp;
7780 if (cp->addr.type) {
7781 err = mgmt_cmd_complete(sk, hdev->id,
7782 MGMT_OP_REMOVE_DEVICE,
7783 MGMT_STATUS_INVALID_PARAMS,
7784 &cp->addr, sizeof(cp->addr));
7788 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7789 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7794 hci_update_scan(hdev);
7796 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7797 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7799 device_removed(sk, hdev, &p->addr, p->addr_type);
7800 if (p->explicit_connect) {
7801 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7804 hci_conn_params_free(p);
7807 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7810 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7813 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7814 MGMT_STATUS_SUCCESS, &cp->addr,
7817 hci_dev_unlock(hdev);
7821 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7824 struct mgmt_cp_load_conn_param *cp = data;
7825 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7826 sizeof(struct mgmt_conn_param));
7827 u16 param_count, expected_len;
7830 if (!lmp_le_capable(hdev))
7831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7832 MGMT_STATUS_NOT_SUPPORTED);
7834 param_count = __le16_to_cpu(cp->param_count);
7835 if (param_count > max_param_count) {
7836 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7839 MGMT_STATUS_INVALID_PARAMS);
7842 expected_len = struct_size(cp, params, param_count);
7843 if (expected_len != len) {
7844 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7847 MGMT_STATUS_INVALID_PARAMS);
7850 bt_dev_dbg(hdev, "param_count %u", param_count);
7854 hci_conn_params_clear_disabled(hdev);
7856 for (i = 0; i < param_count; i++) {
7857 struct mgmt_conn_param *param = &cp->params[i];
7858 struct hci_conn_params *hci_param;
7859 u16 min, max, latency, timeout;
7862 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7865 if (param->addr.type == BDADDR_LE_PUBLIC) {
7866 addr_type = ADDR_LE_DEV_PUBLIC;
7867 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7868 addr_type = ADDR_LE_DEV_RANDOM;
7870 bt_dev_err(hdev, "ignoring invalid connection parameters");
7874 min = le16_to_cpu(param->min_interval);
7875 max = le16_to_cpu(param->max_interval);
7876 latency = le16_to_cpu(param->latency);
7877 timeout = le16_to_cpu(param->timeout);
7879 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7880 min, max, latency, timeout);
7882 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7883 bt_dev_err(hdev, "ignoring invalid connection parameters");
7887 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7890 bt_dev_err(hdev, "failed to add connection parameters");
7894 hci_param->conn_min_interval = min;
7895 hci_param->conn_max_interval = max;
7896 hci_param->conn_latency = latency;
7897 hci_param->supervision_timeout = timeout;
7900 hci_dev_unlock(hdev);
7902 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7906 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7907 void *data, u16 len)
7909 struct mgmt_cp_set_external_config *cp = data;
7913 bt_dev_dbg(hdev, "sock %p", sk);
7915 if (hdev_is_powered(hdev))
7916 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7917 MGMT_STATUS_REJECTED);
7919 if (cp->config != 0x00 && cp->config != 0x01)
7920 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7921 MGMT_STATUS_INVALID_PARAMS);
7923 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7924 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7925 MGMT_STATUS_NOT_SUPPORTED);
7930 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7932 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7934 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7941 err = new_options(hdev, sk);
7943 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7944 mgmt_index_removed(hdev);
7946 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7947 hci_dev_set_flag(hdev, HCI_CONFIG);
7948 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7950 queue_work(hdev->req_workqueue, &hdev->power_on);
7952 set_bit(HCI_RAW, &hdev->flags);
7953 mgmt_index_added(hdev);
7958 hci_dev_unlock(hdev);
7962 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7963 void *data, u16 len)
7965 struct mgmt_cp_set_public_address *cp = data;
7969 bt_dev_dbg(hdev, "sock %p", sk);
7971 if (hdev_is_powered(hdev))
7972 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7973 MGMT_STATUS_REJECTED);
7975 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7977 MGMT_STATUS_INVALID_PARAMS);
7979 if (!hdev->set_bdaddr)
7980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7981 MGMT_STATUS_NOT_SUPPORTED);
7985 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7986 bacpy(&hdev->public_addr, &cp->bdaddr);
7988 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7995 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7996 err = new_options(hdev, sk);
7998 if (is_configured(hdev)) {
7999 mgmt_index_removed(hdev);
8001 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8003 hci_dev_set_flag(hdev, HCI_CONFIG);
8004 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8006 queue_work(hdev->req_workqueue, &hdev->power_on);
8010 hci_dev_unlock(hdev);
8014 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8017 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8018 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8019 u8 *h192, *r192, *h256, *r256;
8020 struct mgmt_pending_cmd *cmd = data;
8021 struct sk_buff *skb = cmd->skb;
8022 u8 status = mgmt_status(err);
8025 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8030 status = MGMT_STATUS_FAILED;
8031 else if (IS_ERR(skb))
8032 status = mgmt_status(PTR_ERR(skb));
8034 status = mgmt_status(skb->data[0]);
8037 bt_dev_dbg(hdev, "status %u", status);
8039 mgmt_cp = cmd->param;
8042 status = mgmt_status(status);
8049 } else if (!bredr_sc_enabled(hdev)) {
8050 struct hci_rp_read_local_oob_data *rp;
8052 if (skb->len != sizeof(*rp)) {
8053 status = MGMT_STATUS_FAILED;
8056 status = MGMT_STATUS_SUCCESS;
8057 rp = (void *)skb->data;
8059 eir_len = 5 + 18 + 18;
8066 struct hci_rp_read_local_oob_ext_data *rp;
8068 if (skb->len != sizeof(*rp)) {
8069 status = MGMT_STATUS_FAILED;
8072 status = MGMT_STATUS_SUCCESS;
8073 rp = (void *)skb->data;
8075 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8076 eir_len = 5 + 18 + 18;
8080 eir_len = 5 + 18 + 18 + 18 + 18;
8090 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8097 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8098 hdev->dev_class, 3);
8101 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8102 EIR_SSP_HASH_C192, h192, 16);
8103 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8104 EIR_SSP_RAND_R192, r192, 16);
8108 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8109 EIR_SSP_HASH_C256, h256, 16);
8110 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8111 EIR_SSP_RAND_R256, r256, 16);
8115 mgmt_rp->type = mgmt_cp->type;
8116 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8118 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8119 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8120 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8121 if (err < 0 || status)
8124 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8126 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8127 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8128 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8130 if (skb && !IS_ERR(skb))
8134 mgmt_pending_remove(cmd);
8137 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8138 struct mgmt_cp_read_local_oob_ext_data *cp)
8140 struct mgmt_pending_cmd *cmd;
8143 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8148 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8149 read_local_oob_ext_data_complete);
8152 mgmt_pending_remove(cmd);
8159 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8160 void *data, u16 data_len)
8162 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8163 struct mgmt_rp_read_local_oob_ext_data *rp;
8166 u8 status, flags, role, addr[7], hash[16], rand[16];
8169 bt_dev_dbg(hdev, "sock %p", sk);
8171 if (hdev_is_powered(hdev)) {
8173 case BIT(BDADDR_BREDR):
8174 status = mgmt_bredr_support(hdev);
8180 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8181 status = mgmt_le_support(hdev);
8185 eir_len = 9 + 3 + 18 + 18 + 3;
8188 status = MGMT_STATUS_INVALID_PARAMS;
8193 status = MGMT_STATUS_NOT_POWERED;
8197 rp_len = sizeof(*rp) + eir_len;
8198 rp = kmalloc(rp_len, GFP_ATOMIC);
8202 if (!status && !lmp_ssp_capable(hdev)) {
8203 status = MGMT_STATUS_NOT_SUPPORTED;
8214 case BIT(BDADDR_BREDR):
8215 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8216 err = read_local_ssp_oob_req(hdev, sk, cp);
8217 hci_dev_unlock(hdev);
8221 status = MGMT_STATUS_FAILED;
8224 eir_len = eir_append_data(rp->eir, eir_len,
8226 hdev->dev_class, 3);
8229 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8230 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8231 smp_generate_oob(hdev, hash, rand) < 0) {
8232 hci_dev_unlock(hdev);
8233 status = MGMT_STATUS_FAILED;
8237 /* This should return the active RPA, but since the RPA
8238 * is only programmed on demand, it is really hard to fill
8239 * this in at the moment. For now disallow retrieving
8240 * local out-of-band data when privacy is in use.
8242 * Returning the identity address will not help here since
8243 * pairing happens before the identity resolving key is
8244 * known and thus the connection establishment happens
8245 * based on the RPA and not the identity address.
8247 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8248 hci_dev_unlock(hdev);
8249 status = MGMT_STATUS_REJECTED;
8253 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8254 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8255 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8256 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8257 memcpy(addr, &hdev->static_addr, 6);
8260 memcpy(addr, &hdev->bdaddr, 6);
8264 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8265 addr, sizeof(addr));
8267 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8272 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8273 &role, sizeof(role));
8275 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8276 eir_len = eir_append_data(rp->eir, eir_len,
8278 hash, sizeof(hash));
8280 eir_len = eir_append_data(rp->eir, eir_len,
8282 rand, sizeof(rand));
8285 flags = mgmt_get_adv_discov_flags(hdev);
8287 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8288 flags |= LE_AD_NO_BREDR;
8290 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8291 &flags, sizeof(flags));
8295 hci_dev_unlock(hdev);
8297 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8299 status = MGMT_STATUS_SUCCESS;
8302 rp->type = cp->type;
8303 rp->eir_len = cpu_to_le16(eir_len);
8305 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8306 status, rp, sizeof(*rp) + eir_len);
8307 if (err < 0 || status)
8310 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8311 rp, sizeof(*rp) + eir_len,
8312 HCI_MGMT_OOB_DATA_EVENTS, sk);
8320 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8324 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8325 flags |= MGMT_ADV_FLAG_DISCOV;
8326 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8327 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8328 flags |= MGMT_ADV_FLAG_APPEARANCE;
8329 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8330 flags |= MGMT_ADV_PARAM_DURATION;
8331 flags |= MGMT_ADV_PARAM_TIMEOUT;
8332 flags |= MGMT_ADV_PARAM_INTERVALS;
8333 flags |= MGMT_ADV_PARAM_TX_POWER;
8334 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8336 /* In extended adv TX_POWER returned from Set Adv Param
8337 * will be always valid.
8339 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8340 flags |= MGMT_ADV_FLAG_TX_POWER;
8342 if (ext_adv_capable(hdev)) {
8343 flags |= MGMT_ADV_FLAG_SEC_1M;
8344 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8345 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8347 if (le_2m_capable(hdev))
8348 flags |= MGMT_ADV_FLAG_SEC_2M;
8350 if (le_coded_capable(hdev))
8351 flags |= MGMT_ADV_FLAG_SEC_CODED;
8357 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8358 void *data, u16 data_len)
8360 struct mgmt_rp_read_adv_features *rp;
8363 struct adv_info *adv_instance;
8364 u32 supported_flags;
8367 bt_dev_dbg(hdev, "sock %p", sk);
8369 if (!lmp_le_capable(hdev))
8370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8371 MGMT_STATUS_REJECTED);
8375 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8376 rp = kmalloc(rp_len, GFP_ATOMIC);
8378 hci_dev_unlock(hdev);
8382 supported_flags = get_supported_adv_flags(hdev);
8384 rp->supported_flags = cpu_to_le32(supported_flags);
8385 rp->max_adv_data_len = max_adv_len(hdev);
8386 rp->max_scan_rsp_len = max_adv_len(hdev);
8387 rp->max_instances = hdev->le_num_of_adv_sets;
8388 rp->num_instances = hdev->adv_instance_cnt;
8390 instance = rp->instance;
8391 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8392 /* Only instances 1-le_num_of_adv_sets are externally visible */
8393 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8394 *instance = adv_instance->instance;
8397 rp->num_instances--;
8402 hci_dev_unlock(hdev);
8404 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8405 MGMT_STATUS_SUCCESS, rp, rp_len);
8412 static u8 calculate_name_len(struct hci_dev *hdev)
8414 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8416 return eir_append_local_name(hdev, buf, 0);
8419 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8422 u8 max_len = max_adv_len(hdev);
8425 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8426 MGMT_ADV_FLAG_LIMITED_DISCOV |
8427 MGMT_ADV_FLAG_MANAGED_FLAGS))
8430 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8433 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8434 max_len -= calculate_name_len(hdev);
8436 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8443 static bool flags_managed(u32 adv_flags)
8445 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8446 MGMT_ADV_FLAG_LIMITED_DISCOV |
8447 MGMT_ADV_FLAG_MANAGED_FLAGS);
8450 static bool tx_power_managed(u32 adv_flags)
8452 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8455 static bool name_managed(u32 adv_flags)
8457 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8460 static bool appearance_managed(u32 adv_flags)
8462 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8465 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8466 u8 len, bool is_adv_data)
8471 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8476 /* Make sure that the data is correctly formatted. */
8477 for (i = 0; i < len; i += (cur_len + 1)) {
8483 if (data[i + 1] == EIR_FLAGS &&
8484 (!is_adv_data || flags_managed(adv_flags)))
8487 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8490 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8493 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8496 if (data[i + 1] == EIR_APPEARANCE &&
8497 appearance_managed(adv_flags))
8500 /* If the current field length would exceed the total data
8501 * length, then it's invalid.
8503 if (i + cur_len >= len)
8510 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8512 u32 supported_flags, phy_flags;
8514 /* The current implementation only supports a subset of the specified
8515 * flags. Also need to check mutual exclusiveness of sec flags.
8517 supported_flags = get_supported_adv_flags(hdev);
8518 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8519 if (adv_flags & ~supported_flags ||
8520 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8526 static bool adv_busy(struct hci_dev *hdev)
8528 return pending_find(MGMT_OP_SET_LE, hdev);
8531 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8534 struct adv_info *adv, *n;
8536 bt_dev_dbg(hdev, "err %d", err);
8540 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8547 adv->pending = false;
8551 instance = adv->instance;
8553 if (hdev->cur_adv_instance == instance)
8554 cancel_adv_timeout(hdev);
8556 hci_remove_adv_instance(hdev, instance);
8557 mgmt_advertising_removed(sk, hdev, instance);
8560 hci_dev_unlock(hdev);
8563 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8565 struct mgmt_pending_cmd *cmd = data;
8566 struct mgmt_cp_add_advertising *cp = cmd->param;
8567 struct mgmt_rp_add_advertising rp;
8569 memset(&rp, 0, sizeof(rp));
8571 rp.instance = cp->instance;
8574 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8577 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8578 mgmt_status(err), &rp, sizeof(rp));
8580 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8582 mgmt_pending_free(cmd);
8585 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8587 struct mgmt_pending_cmd *cmd = data;
8588 struct mgmt_cp_add_advertising *cp = cmd->param;
8590 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8593 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8594 void *data, u16 data_len)
8596 struct mgmt_cp_add_advertising *cp = data;
8597 struct mgmt_rp_add_advertising rp;
8600 u16 timeout, duration;
8601 unsigned int prev_instance_cnt;
8602 u8 schedule_instance = 0;
8603 struct adv_info *adv, *next_instance;
8605 struct mgmt_pending_cmd *cmd;
8607 bt_dev_dbg(hdev, "sock %p", sk);
8609 status = mgmt_le_support(hdev);
8611 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8614 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8615 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8616 MGMT_STATUS_INVALID_PARAMS);
8618 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8619 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8620 MGMT_STATUS_INVALID_PARAMS);
8622 flags = __le32_to_cpu(cp->flags);
8623 timeout = __le16_to_cpu(cp->timeout);
8624 duration = __le16_to_cpu(cp->duration);
8626 if (!requested_adv_flags_are_valid(hdev, flags))
8627 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8628 MGMT_STATUS_INVALID_PARAMS);
8632 if (timeout && !hdev_is_powered(hdev)) {
8633 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8634 MGMT_STATUS_REJECTED);
8638 if (adv_busy(hdev)) {
8639 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8644 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8645 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8646 cp->scan_rsp_len, false)) {
8647 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8648 MGMT_STATUS_INVALID_PARAMS);
8652 prev_instance_cnt = hdev->adv_instance_cnt;
8654 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8655 cp->adv_data_len, cp->data,
8657 cp->data + cp->adv_data_len,
8659 HCI_ADV_TX_POWER_NO_PREFERENCE,
8660 hdev->le_adv_min_interval,
8661 hdev->le_adv_max_interval, 0);
8663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8664 MGMT_STATUS_FAILED);
8668 /* Only trigger an advertising added event if a new instance was
8671 if (hdev->adv_instance_cnt > prev_instance_cnt)
8672 mgmt_advertising_added(sk, hdev, cp->instance);
8674 if (hdev->cur_adv_instance == cp->instance) {
8675 /* If the currently advertised instance is being changed then
8676 * cancel the current advertising and schedule the next
8677 * instance. If there is only one instance then the overridden
8678 * advertising data will be visible right away.
8680 cancel_adv_timeout(hdev);
8682 next_instance = hci_get_next_instance(hdev, cp->instance);
8684 schedule_instance = next_instance->instance;
8685 } else if (!hdev->adv_instance_timeout) {
8686 /* Immediately advertise the new instance if no other
8687 * instance is currently being advertised.
8689 schedule_instance = cp->instance;
8692 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8693 * there is no instance to be advertised then we have no HCI
8694 * communication to make. Simply return.
8696 if (!hdev_is_powered(hdev) ||
8697 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8698 !schedule_instance) {
8699 rp.instance = cp->instance;
8700 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8701 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8705 /* We're good to go, update advertising data, parameters, and start
8708 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8715 cp->instance = schedule_instance;
8717 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8718 add_advertising_complete);
8720 mgmt_pending_free(cmd);
8723 hci_dev_unlock(hdev);
8728 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8731 struct mgmt_pending_cmd *cmd = data;
8732 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8733 struct mgmt_rp_add_ext_adv_params rp;
8734 struct adv_info *adv;
8737 BT_DBG("%s", hdev->name);
8741 adv = hci_find_adv_instance(hdev, cp->instance);
8745 rp.instance = cp->instance;
8746 rp.tx_power = adv->tx_power;
8748 /* While we're at it, inform userspace of the available space for this
8749 * advertisement, given the flags that will be used.
8751 flags = __le32_to_cpu(cp->flags);
8752 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8753 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8756 /* If this advertisement was previously advertising and we
8757 * failed to update it, we signal that it has been removed and
8758 * delete its structure
8761 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8763 hci_remove_adv_instance(hdev, cp->instance);
8765 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8768 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8769 mgmt_status(err), &rp, sizeof(rp));
8774 mgmt_pending_free(cmd);
8776 hci_dev_unlock(hdev);
8779 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8781 struct mgmt_pending_cmd *cmd = data;
8782 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8784 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8787 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8788 void *data, u16 data_len)
8790 struct mgmt_cp_add_ext_adv_params *cp = data;
8791 struct mgmt_rp_add_ext_adv_params rp;
8792 struct mgmt_pending_cmd *cmd = NULL;
8793 struct adv_info *adv;
8794 u32 flags, min_interval, max_interval;
8795 u16 timeout, duration;
8800 BT_DBG("%s", hdev->name);
8802 status = mgmt_le_support(hdev);
8804 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8807 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8809 MGMT_STATUS_INVALID_PARAMS);
8811 /* The purpose of breaking add_advertising into two separate MGMT calls
8812 * for params and data is to allow more parameters to be added to this
8813 * structure in the future. For this reason, we verify that we have the
8814 * bare minimum structure we know of when the interface was defined. Any
8815 * extra parameters we don't know about will be ignored in this request.
8817 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8819 MGMT_STATUS_INVALID_PARAMS);
8821 flags = __le32_to_cpu(cp->flags);
8823 if (!requested_adv_flags_are_valid(hdev, flags))
8824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8825 MGMT_STATUS_INVALID_PARAMS);
8829 /* In new interface, we require that we are powered to register */
8830 if (!hdev_is_powered(hdev)) {
8831 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8832 MGMT_STATUS_REJECTED);
8836 if (adv_busy(hdev)) {
8837 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8842 /* Parse defined parameters from request, use defaults otherwise */
8843 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8844 __le16_to_cpu(cp->timeout) : 0;
8846 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8847 __le16_to_cpu(cp->duration) :
8848 hdev->def_multi_adv_rotation_duration;
8850 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8851 __le32_to_cpu(cp->min_interval) :
8852 hdev->le_adv_min_interval;
8854 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8855 __le32_to_cpu(cp->max_interval) :
8856 hdev->le_adv_max_interval;
8858 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8860 HCI_ADV_TX_POWER_NO_PREFERENCE;
8862 /* Create advertising instance with no advertising or response data */
8863 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8864 timeout, duration, tx_power, min_interval,
8868 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8869 MGMT_STATUS_FAILED);
8873 /* Submit request for advertising params if ext adv available */
8874 if (ext_adv_capable(hdev)) {
8875 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8879 hci_remove_adv_instance(hdev, cp->instance);
8883 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8884 add_ext_adv_params_complete);
8886 mgmt_pending_free(cmd);
8888 rp.instance = cp->instance;
8889 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8890 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8891 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8892 err = mgmt_cmd_complete(sk, hdev->id,
8893 MGMT_OP_ADD_EXT_ADV_PARAMS,
8894 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8898 hci_dev_unlock(hdev);
8903 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8905 struct mgmt_pending_cmd *cmd = data;
8906 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8907 struct mgmt_rp_add_advertising rp;
8909 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8911 memset(&rp, 0, sizeof(rp));
8913 rp.instance = cp->instance;
8916 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8919 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8920 mgmt_status(err), &rp, sizeof(rp));
8922 mgmt_pending_free(cmd);
8925 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8927 struct mgmt_pending_cmd *cmd = data;
8928 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8931 if (ext_adv_capable(hdev)) {
8932 err = hci_update_adv_data_sync(hdev, cp->instance);
8936 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8940 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8943 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8946 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8949 struct mgmt_cp_add_ext_adv_data *cp = data;
8950 struct mgmt_rp_add_ext_adv_data rp;
8951 u8 schedule_instance = 0;
8952 struct adv_info *next_instance;
8953 struct adv_info *adv_instance;
8955 struct mgmt_pending_cmd *cmd;
8957 BT_DBG("%s", hdev->name);
8961 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8963 if (!adv_instance) {
8964 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8965 MGMT_STATUS_INVALID_PARAMS);
8969 /* In new interface, we require that we are powered to register */
8970 if (!hdev_is_powered(hdev)) {
8971 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8972 MGMT_STATUS_REJECTED);
8973 goto clear_new_instance;
8976 if (adv_busy(hdev)) {
8977 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8979 goto clear_new_instance;
8982 /* Validate new data */
8983 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8984 cp->adv_data_len, true) ||
8985 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8986 cp->adv_data_len, cp->scan_rsp_len, false)) {
8987 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8988 MGMT_STATUS_INVALID_PARAMS);
8989 goto clear_new_instance;
8992 /* Set the data in the advertising instance */
8993 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8994 cp->data, cp->scan_rsp_len,
8995 cp->data + cp->adv_data_len);
8997 /* If using software rotation, determine next instance to use */
8998 if (hdev->cur_adv_instance == cp->instance) {
8999 /* If the currently advertised instance is being changed
9000 * then cancel the current advertising and schedule the
9001 * next instance. If there is only one instance then the
9002 * overridden advertising data will be visible right
9005 cancel_adv_timeout(hdev);
9007 next_instance = hci_get_next_instance(hdev, cp->instance);
9009 schedule_instance = next_instance->instance;
9010 } else if (!hdev->adv_instance_timeout) {
9011 /* Immediately advertise the new instance if no other
9012 * instance is currently being advertised.
9014 schedule_instance = cp->instance;
9017 /* If the HCI_ADVERTISING flag is set or there is no instance to
9018 * be advertised then we have no HCI communication to make.
9021 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9022 if (adv_instance->pending) {
9023 mgmt_advertising_added(sk, hdev, cp->instance);
9024 adv_instance->pending = false;
9026 rp.instance = cp->instance;
9027 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9028 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9032 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9036 goto clear_new_instance;
9039 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9040 add_ext_adv_data_complete);
9042 mgmt_pending_free(cmd);
9043 goto clear_new_instance;
9046 /* We were successful in updating data, so trigger advertising_added
9047 * event if this is an instance that wasn't previously advertising. If
9048 * a failure occurs in the requests we initiated, we will remove the
9049 * instance again in add_advertising_complete
9051 if (adv_instance->pending)
9052 mgmt_advertising_added(sk, hdev, cp->instance);
9057 hci_remove_adv_instance(hdev, cp->instance);
9060 hci_dev_unlock(hdev);
9065 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9068 struct mgmt_pending_cmd *cmd = data;
9069 struct mgmt_cp_remove_advertising *cp = cmd->param;
9070 struct mgmt_rp_remove_advertising rp;
9072 bt_dev_dbg(hdev, "err %d", err);
9074 memset(&rp, 0, sizeof(rp));
9075 rp.instance = cp->instance;
9078 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9081 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9082 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9084 mgmt_pending_free(cmd);
9087 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9089 struct mgmt_pending_cmd *cmd = data;
9090 struct mgmt_cp_remove_advertising *cp = cmd->param;
9093 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9097 if (list_empty(&hdev->adv_instances))
9098 err = hci_disable_advertising_sync(hdev);
9103 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9104 void *data, u16 data_len)
9106 struct mgmt_cp_remove_advertising *cp = data;
9107 struct mgmt_pending_cmd *cmd;
9110 bt_dev_dbg(hdev, "sock %p", sk);
9114 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9115 err = mgmt_cmd_status(sk, hdev->id,
9116 MGMT_OP_REMOVE_ADVERTISING,
9117 MGMT_STATUS_INVALID_PARAMS);
9121 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9122 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9127 if (list_empty(&hdev->adv_instances)) {
9128 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9129 MGMT_STATUS_INVALID_PARAMS);
9133 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9140 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9141 remove_advertising_complete);
9143 mgmt_pending_free(cmd);
9146 hci_dev_unlock(hdev);
9151 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9152 void *data, u16 data_len)
9154 struct mgmt_cp_get_adv_size_info *cp = data;
9155 struct mgmt_rp_get_adv_size_info rp;
9156 u32 flags, supported_flags;
9158 bt_dev_dbg(hdev, "sock %p", sk);
9160 if (!lmp_le_capable(hdev))
9161 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9162 MGMT_STATUS_REJECTED);
9164 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9165 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9166 MGMT_STATUS_INVALID_PARAMS);
9168 flags = __le32_to_cpu(cp->flags);
9170 /* The current implementation only supports a subset of the specified
9173 supported_flags = get_supported_adv_flags(hdev);
9174 if (flags & ~supported_flags)
9175 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9176 MGMT_STATUS_INVALID_PARAMS);
9178 rp.instance = cp->instance;
9179 rp.flags = cp->flags;
9180 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9181 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9183 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9184 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9187 static const struct hci_mgmt_handler mgmt_handlers[] = {
9188 { NULL }, /* 0x0000 (no command) */
9189 { read_version, MGMT_READ_VERSION_SIZE,
9191 HCI_MGMT_UNTRUSTED },
9192 { read_commands, MGMT_READ_COMMANDS_SIZE,
9194 HCI_MGMT_UNTRUSTED },
9195 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9197 HCI_MGMT_UNTRUSTED },
9198 { read_controller_info, MGMT_READ_INFO_SIZE,
9199 HCI_MGMT_UNTRUSTED },
9200 { set_powered, MGMT_SETTING_SIZE },
9201 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9202 { set_connectable, MGMT_SETTING_SIZE },
9203 { set_fast_connectable, MGMT_SETTING_SIZE },
9204 { set_bondable, MGMT_SETTING_SIZE },
9205 { set_link_security, MGMT_SETTING_SIZE },
9206 { set_ssp, MGMT_SETTING_SIZE },
9207 { set_hs, MGMT_SETTING_SIZE },
9208 { set_le, MGMT_SETTING_SIZE },
9209 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9210 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9211 { add_uuid, MGMT_ADD_UUID_SIZE },
9212 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9213 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9215 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9217 { disconnect, MGMT_DISCONNECT_SIZE },
9218 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9219 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9220 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9221 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9222 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9223 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9224 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9225 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9226 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9227 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9228 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9229 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9230 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9232 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9233 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9234 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9235 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9236 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9237 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9238 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9239 { set_advertising, MGMT_SETTING_SIZE },
9240 { set_bredr, MGMT_SETTING_SIZE },
9241 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9242 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9243 { set_secure_conn, MGMT_SETTING_SIZE },
9244 { set_debug_keys, MGMT_SETTING_SIZE },
9245 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9246 { load_irks, MGMT_LOAD_IRKS_SIZE,
9248 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9249 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9250 { add_device, MGMT_ADD_DEVICE_SIZE },
9251 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9252 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9254 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9256 HCI_MGMT_UNTRUSTED },
9257 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9258 HCI_MGMT_UNCONFIGURED |
9259 HCI_MGMT_UNTRUSTED },
9260 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9261 HCI_MGMT_UNCONFIGURED },
9262 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9263 HCI_MGMT_UNCONFIGURED },
9264 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9266 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9267 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9269 HCI_MGMT_UNTRUSTED },
9270 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9271 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9273 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9274 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9275 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9276 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9277 HCI_MGMT_UNTRUSTED },
9278 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9279 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9280 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9281 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9283 { set_wideband_speech, MGMT_SETTING_SIZE },
9284 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9285 HCI_MGMT_UNTRUSTED },
9286 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9287 HCI_MGMT_UNTRUSTED |
9288 HCI_MGMT_HDEV_OPTIONAL },
9289 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9291 HCI_MGMT_HDEV_OPTIONAL },
9292 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9293 HCI_MGMT_UNTRUSTED },
9294 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9296 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9297 HCI_MGMT_UNTRUSTED },
9298 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9300 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9301 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9302 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9303 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9305 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9306 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9308 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9310 { add_adv_patterns_monitor_rssi,
9311 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9313 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9315 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9316 { mesh_send, MGMT_MESH_SEND_SIZE,
9318 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9321 void mgmt_index_added(struct hci_dev *hdev)
9323 struct mgmt_ev_ext_index ev;
9325 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9328 switch (hdev->dev_type) {
9330 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9331 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9332 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9335 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9336 HCI_MGMT_INDEX_EVENTS);
9349 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9350 HCI_MGMT_EXT_INDEX_EVENTS);
9353 void mgmt_index_removed(struct hci_dev *hdev)
9355 struct mgmt_ev_ext_index ev;
9356 u8 status = MGMT_STATUS_INVALID_INDEX;
9358 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9361 switch (hdev->dev_type) {
9363 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9365 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9366 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9367 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9370 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9371 HCI_MGMT_INDEX_EVENTS);
9384 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9385 HCI_MGMT_EXT_INDEX_EVENTS);
9387 /* Cancel any remaining timed work */
9388 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9390 cancel_delayed_work_sync(&hdev->discov_off);
9391 cancel_delayed_work_sync(&hdev->service_cache);
9392 cancel_delayed_work_sync(&hdev->rpa_expired);
9395 void mgmt_power_on(struct hci_dev *hdev, int err)
9397 struct cmd_lookup match = { NULL, hdev };
9399 bt_dev_dbg(hdev, "err %d", err);
9404 restart_le_actions(hdev);
9405 hci_update_passive_scan(hdev);
9408 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9410 new_settings(hdev, match.sk);
9415 hci_dev_unlock(hdev);
9418 void __mgmt_power_off(struct hci_dev *hdev)
9420 struct cmd_lookup match = { NULL, hdev };
9421 u8 status, zero_cod[] = { 0, 0, 0 };
9423 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9425 /* If the power off is because of hdev unregistration let
9426 * use the appropriate INVALID_INDEX status. Otherwise use
9427 * NOT_POWERED. We cover both scenarios here since later in
9428 * mgmt_index_removed() any hci_conn callbacks will have already
9429 * been triggered, potentially causing misleading DISCONNECTED
9432 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9433 status = MGMT_STATUS_INVALID_INDEX;
9435 status = MGMT_STATUS_NOT_POWERED;
9437 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9439 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9440 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9441 zero_cod, sizeof(zero_cod),
9442 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9443 ext_info_changed(hdev, NULL);
9446 new_settings(hdev, match.sk);
9452 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9454 struct mgmt_pending_cmd *cmd;
9457 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9461 if (err == -ERFKILL)
9462 status = MGMT_STATUS_RFKILLED;
9464 status = MGMT_STATUS_FAILED;
9466 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9468 mgmt_pending_remove(cmd);
9471 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9474 struct mgmt_ev_new_link_key ev;
9476 memset(&ev, 0, sizeof(ev));
9478 ev.store_hint = persistent;
9479 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9480 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9481 ev.key.type = key->type;
9482 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9483 ev.key.pin_len = key->pin_len;
9485 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9488 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9490 switch (ltk->type) {
9492 case SMP_LTK_RESPONDER:
9493 if (ltk->authenticated)
9494 return MGMT_LTK_AUTHENTICATED;
9495 return MGMT_LTK_UNAUTHENTICATED;
9497 if (ltk->authenticated)
9498 return MGMT_LTK_P256_AUTH;
9499 return MGMT_LTK_P256_UNAUTH;
9500 case SMP_LTK_P256_DEBUG:
9501 return MGMT_LTK_P256_DEBUG;
9504 return MGMT_LTK_UNAUTHENTICATED;
9507 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9509 struct mgmt_ev_new_long_term_key ev;
9511 memset(&ev, 0, sizeof(ev));
9513 /* Devices using resolvable or non-resolvable random addresses
9514 * without providing an identity resolving key don't require
9515 * to store long term keys. Their addresses will change the
9518 * Only when a remote device provides an identity address
9519 * make sure the long term key is stored. If the remote
9520 * identity is known, the long term keys are internally
9521 * mapped to the identity address. So allow static random
9522 * and public addresses here.
9524 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9525 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9526 ev.store_hint = 0x00;
9528 ev.store_hint = persistent;
9530 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9531 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9532 ev.key.type = mgmt_ltk_type(key);
9533 ev.key.enc_size = key->enc_size;
9534 ev.key.ediv = key->ediv;
9535 ev.key.rand = key->rand;
9537 if (key->type == SMP_LTK)
9538 ev.key.initiator = 1;
9540 /* Make sure we copy only the significant bytes based on the
9541 * encryption key size, and set the rest of the value to zeroes.
9543 memcpy(ev.key.val, key->val, key->enc_size);
9544 memset(ev.key.val + key->enc_size, 0,
9545 sizeof(ev.key.val) - key->enc_size);
9547 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9550 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9552 struct mgmt_ev_new_irk ev;
9554 memset(&ev, 0, sizeof(ev));
9556 ev.store_hint = persistent;
9558 bacpy(&ev.rpa, &irk->rpa);
9559 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9560 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9561 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9563 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9566 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9569 struct mgmt_ev_new_csrk ev;
9571 memset(&ev, 0, sizeof(ev));
9573 /* Devices using resolvable or non-resolvable random addresses
9574 * without providing an identity resolving key don't require
9575 * to store signature resolving keys. Their addresses will change
9576 * the next time around.
9578 * Only when a remote device provides an identity address
9579 * make sure the signature resolving key is stored. So allow
9580 * static random and public addresses here.
9582 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9583 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9584 ev.store_hint = 0x00;
9586 ev.store_hint = persistent;
9588 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9589 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9590 ev.key.type = csrk->type;
9591 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9593 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9596 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9597 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9598 u16 max_interval, u16 latency, u16 timeout)
9600 struct mgmt_ev_new_conn_param ev;
9602 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9605 memset(&ev, 0, sizeof(ev));
9606 bacpy(&ev.addr.bdaddr, bdaddr);
9607 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9608 ev.store_hint = store_hint;
9609 ev.min_interval = cpu_to_le16(min_interval);
9610 ev.max_interval = cpu_to_le16(max_interval);
9611 ev.latency = cpu_to_le16(latency);
9612 ev.timeout = cpu_to_le16(timeout);
9614 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9617 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9618 u8 *name, u8 name_len)
9620 struct sk_buff *skb;
9621 struct mgmt_ev_device_connected *ev;
9625 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9628 /* allocate buff for LE or BR/EDR adv */
9629 if (conn->le_adv_data_len > 0)
9630 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9631 sizeof(*ev) + conn->le_adv_data_len);
9633 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9634 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9635 eir_precalc_len(sizeof(conn->dev_class)));
9637 ev = skb_put(skb, sizeof(*ev));
9638 bacpy(&ev->addr.bdaddr, &conn->dst);
9639 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9642 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9644 ev->flags = __cpu_to_le32(flags);
9646 /* We must ensure that the EIR Data fields are ordered and
9647 * unique. Keep it simple for now and avoid the problem by not
9648 * adding any BR/EDR data to the LE adv.
9650 if (conn->le_adv_data_len > 0) {
9651 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9652 eir_len = conn->le_adv_data_len;
9655 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9657 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9658 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9659 conn->dev_class, sizeof(conn->dev_class));
9662 ev->eir_len = cpu_to_le16(eir_len);
9664 mgmt_event_skb(skb, NULL);
9667 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9669 struct sock **sk = data;
9671 cmd->cmd_complete(cmd, 0);
9676 mgmt_pending_remove(cmd);
9679 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9681 struct hci_dev *hdev = data;
9682 struct mgmt_cp_unpair_device *cp = cmd->param;
9684 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9686 cmd->cmd_complete(cmd, 0);
9687 mgmt_pending_remove(cmd);
9690 bool mgmt_powering_down(struct hci_dev *hdev)
9692 struct mgmt_pending_cmd *cmd;
9693 struct mgmt_mode *cp;
9695 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9706 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9707 u8 link_type, u8 addr_type, u8 reason,
9708 bool mgmt_connected)
9710 struct mgmt_ev_device_disconnected ev;
9711 struct sock *sk = NULL;
9713 if (!mgmt_connected)
9716 if (link_type != ACL_LINK && link_type != LE_LINK)
9719 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9721 bacpy(&ev.addr.bdaddr, bdaddr);
9722 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9725 /* Report disconnects due to suspend */
9726 if (hdev->suspended)
9727 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9729 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9734 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9738 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9739 u8 link_type, u8 addr_type, u8 status)
9741 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9742 struct mgmt_cp_disconnect *cp;
9743 struct mgmt_pending_cmd *cmd;
9745 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9748 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9754 if (bacmp(bdaddr, &cp->addr.bdaddr))
9757 if (cp->addr.type != bdaddr_type)
9760 cmd->cmd_complete(cmd, mgmt_status(status));
9761 mgmt_pending_remove(cmd);
9764 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9765 u8 addr_type, u8 status)
9767 struct mgmt_ev_connect_failed ev;
9769 bacpy(&ev.addr.bdaddr, bdaddr);
9770 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9771 ev.status = mgmt_status(status);
9773 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9776 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9778 struct mgmt_ev_pin_code_request ev;
9780 bacpy(&ev.addr.bdaddr, bdaddr);
9781 ev.addr.type = BDADDR_BREDR;
9784 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9787 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9790 struct mgmt_pending_cmd *cmd;
9792 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9796 cmd->cmd_complete(cmd, mgmt_status(status));
9797 mgmt_pending_remove(cmd);
9800 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9803 struct mgmt_pending_cmd *cmd;
9805 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9809 cmd->cmd_complete(cmd, mgmt_status(status));
9810 mgmt_pending_remove(cmd);
9813 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9814 u8 link_type, u8 addr_type, u32 value,
9817 struct mgmt_ev_user_confirm_request ev;
9819 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9821 bacpy(&ev.addr.bdaddr, bdaddr);
9822 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9823 ev.confirm_hint = confirm_hint;
9824 ev.value = cpu_to_le32(value);
9826 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9830 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9831 u8 link_type, u8 addr_type)
9833 struct mgmt_ev_user_passkey_request ev;
9835 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9837 bacpy(&ev.addr.bdaddr, bdaddr);
9838 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9840 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9844 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9845 u8 link_type, u8 addr_type, u8 status,
9848 struct mgmt_pending_cmd *cmd;
9850 cmd = pending_find(opcode, hdev);
9854 cmd->cmd_complete(cmd, mgmt_status(status));
9855 mgmt_pending_remove(cmd);
9860 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9861 u8 link_type, u8 addr_type, u8 status)
9863 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9864 status, MGMT_OP_USER_CONFIRM_REPLY);
9867 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9868 u8 link_type, u8 addr_type, u8 status)
9870 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9872 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9875 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9876 u8 link_type, u8 addr_type, u8 status)
9878 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9879 status, MGMT_OP_USER_PASSKEY_REPLY);
9882 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9883 u8 link_type, u8 addr_type, u8 status)
9885 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9887 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9890 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9891 u8 link_type, u8 addr_type, u32 passkey,
9894 struct mgmt_ev_passkey_notify ev;
9896 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9898 bacpy(&ev.addr.bdaddr, bdaddr);
9899 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9900 ev.passkey = __cpu_to_le32(passkey);
9901 ev.entered = entered;
9903 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9906 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9908 struct mgmt_ev_auth_failed ev;
9909 struct mgmt_pending_cmd *cmd;
9910 u8 status = mgmt_status(hci_status);
9912 bacpy(&ev.addr.bdaddr, &conn->dst);
9913 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9916 cmd = find_pairing(conn);
9918 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9919 cmd ? cmd->sk : NULL);
9922 cmd->cmd_complete(cmd, status);
9923 mgmt_pending_remove(cmd);
9927 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9929 struct cmd_lookup match = { NULL, hdev };
9933 u8 mgmt_err = mgmt_status(status);
9934 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9935 cmd_status_rsp, &mgmt_err);
9939 if (test_bit(HCI_AUTH, &hdev->flags))
9940 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9942 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9944 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9948 new_settings(hdev, match.sk);
9954 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9956 struct cmd_lookup *match = data;
9958 if (match->sk == NULL) {
9959 match->sk = cmd->sk;
9960 sock_hold(match->sk);
9964 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9967 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9969 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9970 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9971 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9974 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9975 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9976 ext_info_changed(hdev, NULL);
9983 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9985 struct mgmt_cp_set_local_name ev;
9986 struct mgmt_pending_cmd *cmd;
9991 memset(&ev, 0, sizeof(ev));
9992 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9993 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9995 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9997 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9999 /* If this is a HCI command related to powering on the
10000 * HCI dev don't send any mgmt signals.
10002 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10006 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10007 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10008 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10011 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10015 for (i = 0; i < uuid_count; i++) {
10016 if (!memcmp(uuid, uuids[i], 16))
10023 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10027 while (parsed < eir_len) {
10028 u8 field_len = eir[0];
10032 if (field_len == 0)
10035 if (eir_len - parsed < field_len + 1)
10039 case EIR_UUID16_ALL:
10040 case EIR_UUID16_SOME:
10041 for (i = 0; i + 3 <= field_len; i += 2) {
10042 memcpy(uuid, bluetooth_base_uuid, 16);
10043 uuid[13] = eir[i + 3];
10044 uuid[12] = eir[i + 2];
10045 if (has_uuid(uuid, uuid_count, uuids))
10049 case EIR_UUID32_ALL:
10050 case EIR_UUID32_SOME:
10051 for (i = 0; i + 5 <= field_len; i += 4) {
10052 memcpy(uuid, bluetooth_base_uuid, 16);
10053 uuid[15] = eir[i + 5];
10054 uuid[14] = eir[i + 4];
10055 uuid[13] = eir[i + 3];
10056 uuid[12] = eir[i + 2];
10057 if (has_uuid(uuid, uuid_count, uuids))
10061 case EIR_UUID128_ALL:
10062 case EIR_UUID128_SOME:
10063 for (i = 0; i + 17 <= field_len; i += 16) {
10064 memcpy(uuid, eir + i + 2, 16);
10065 if (has_uuid(uuid, uuid_count, uuids))
10071 parsed += field_len + 1;
10072 eir += field_len + 1;
10078 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10079 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10081 /* If a RSSI threshold has been specified, and
10082 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10083 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10084 * is set, let it through for further processing, as we might need to
10085 * restart the scan.
10087 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10088 * the results are also dropped.
10090 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10091 (rssi == HCI_RSSI_INVALID ||
10092 (rssi < hdev->discovery.rssi &&
10093 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10096 if (hdev->discovery.uuid_count != 0) {
10097 /* If a list of UUIDs is provided in filter, results with no
10098 * matching UUID should be dropped.
10100 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10101 hdev->discovery.uuids) &&
10102 !eir_has_uuids(scan_rsp, scan_rsp_len,
10103 hdev->discovery.uuid_count,
10104 hdev->discovery.uuids))
10108 /* If duplicate filtering does not report RSSI changes, then restart
10109 * scanning to ensure updated result with updated RSSI values.
10111 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10112 /* Validate RSSI value against the RSSI threshold once more. */
10113 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10114 rssi < hdev->discovery.rssi)
10121 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10122 bdaddr_t *bdaddr, u8 addr_type)
10124 struct mgmt_ev_adv_monitor_device_lost ev;
10126 ev.monitor_handle = cpu_to_le16(handle);
10127 bacpy(&ev.addr.bdaddr, bdaddr);
10128 ev.addr.type = addr_type;
10130 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10134 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10135 struct sk_buff *skb,
10136 struct sock *skip_sk,
10139 struct sk_buff *advmon_skb;
10140 size_t advmon_skb_len;
10141 __le16 *monitor_handle;
10146 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10147 sizeof(struct mgmt_ev_device_found)) + skb->len;
10148 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10153 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10154 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10155 * store monitor_handle of the matched monitor.
10157 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10158 *monitor_handle = cpu_to_le16(handle);
10159 skb_put_data(advmon_skb, skb->data, skb->len);
10161 mgmt_event_skb(advmon_skb, skip_sk);
10164 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10165 bdaddr_t *bdaddr, bool report_device,
10166 struct sk_buff *skb,
10167 struct sock *skip_sk)
10169 struct monitored_device *dev, *tmp;
10170 bool matched = false;
10171 bool notified = false;
10173 /* We have received the Advertisement Report because:
10174 * 1. the kernel has initiated active discovery
10175 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10177 * 3. if none of the above is true, we have one or more active
10178 * Advertisement Monitor
10180 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10181 * and report ONLY one advertisement per device for the matched Monitor
10182 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10184 * For case 3, since we are not active scanning and all advertisements
10185 * received are due to a matched Advertisement Monitor, report all
10186 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10188 if (report_device && !hdev->advmon_pend_notify) {
10189 mgmt_event_skb(skb, skip_sk);
10193 hdev->advmon_pend_notify = false;
10195 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10196 if (!bacmp(&dev->bdaddr, bdaddr)) {
10199 if (!dev->notified) {
10200 mgmt_send_adv_monitor_device_found(hdev, skb,
10204 dev->notified = true;
10208 if (!dev->notified)
10209 hdev->advmon_pend_notify = true;
10212 if (!report_device &&
10213 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10214 /* Handle 0 indicates that we are not active scanning and this
10215 * is a subsequent advertisement report for an already matched
10216 * Advertisement Monitor or the controller offloading support
10217 * is not available.
10219 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10223 mgmt_event_skb(skb, skip_sk);
10228 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10229 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10230 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10233 struct sk_buff *skb;
10234 struct mgmt_ev_mesh_device_found *ev;
10237 if (!hdev->mesh_ad_types[0])
10240 /* Scan for requested AD types */
10242 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10243 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10244 if (!hdev->mesh_ad_types[j])
10247 if (hdev->mesh_ad_types[j] == eir[i + 1])
10253 if (scan_rsp_len > 0) {
10254 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10255 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10256 if (!hdev->mesh_ad_types[j])
10259 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10268 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10269 sizeof(*ev) + eir_len + scan_rsp_len);
10273 ev = skb_put(skb, sizeof(*ev));
10275 bacpy(&ev->addr.bdaddr, bdaddr);
10276 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10278 ev->flags = cpu_to_le32(flags);
10279 ev->instant = cpu_to_le64(instant);
10282 /* Copy EIR or advertising data into event */
10283 skb_put_data(skb, eir, eir_len);
10285 if (scan_rsp_len > 0)
10286 /* Append scan response data to event */
10287 skb_put_data(skb, scan_rsp, scan_rsp_len);
10289 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10291 mgmt_event_skb(skb, NULL);
10294 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10295 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10296 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10299 struct sk_buff *skb;
10300 struct mgmt_ev_device_found *ev;
10301 bool report_device = hci_discovery_active(hdev);
10303 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10304 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10305 eir, eir_len, scan_rsp, scan_rsp_len,
10308 /* Don't send events for a non-kernel initiated discovery. With
10309 * LE one exception is if we have pend_le_reports > 0 in which
10310 * case we're doing passive scanning and want these events.
10312 if (!hci_discovery_active(hdev)) {
10313 if (link_type == ACL_LINK)
10315 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10316 report_device = true;
10317 else if (!hci_is_adv_monitoring(hdev))
10321 if (hdev->discovery.result_filtering) {
10322 /* We are using service discovery */
10323 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10328 if (hdev->discovery.limited) {
10329 /* Check for limited discoverable bit */
10331 if (!(dev_class[1] & 0x20))
10334 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10335 if (!flags || !(flags[0] & LE_AD_LIMITED))
10340 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10341 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10342 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10346 ev = skb_put(skb, sizeof(*ev));
10348 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10349 * RSSI value was reported as 0 when not available. This behavior
10350 * is kept when using device discovery. This is required for full
10351 * backwards compatibility with the API.
10353 * However when using service discovery, the value 127 will be
10354 * returned when the RSSI is not available.
10356 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10357 link_type == ACL_LINK)
10360 bacpy(&ev->addr.bdaddr, bdaddr);
10361 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10363 ev->flags = cpu_to_le32(flags);
10366 /* Copy EIR or advertising data into event */
10367 skb_put_data(skb, eir, eir_len);
10369 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10372 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10374 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10377 if (scan_rsp_len > 0)
10378 /* Append scan response data to event */
10379 skb_put_data(skb, scan_rsp, scan_rsp_len);
10381 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10383 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10386 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10387 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10389 struct sk_buff *skb;
10390 struct mgmt_ev_device_found *ev;
10394 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10395 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10397 ev = skb_put(skb, sizeof(*ev));
10398 bacpy(&ev->addr.bdaddr, bdaddr);
10399 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10403 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10405 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10407 ev->eir_len = cpu_to_le16(eir_len);
10408 ev->flags = cpu_to_le32(flags);
10410 mgmt_event_skb(skb, NULL);
10413 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10415 struct mgmt_ev_discovering ev;
10417 bt_dev_dbg(hdev, "discovering %u", discovering);
10419 memset(&ev, 0, sizeof(ev));
10420 ev.type = hdev->discovery.type;
10421 ev.discovering = discovering;
10423 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10426 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10428 struct mgmt_ev_controller_suspend ev;
10430 ev.suspend_state = state;
10431 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10434 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10437 struct mgmt_ev_controller_resume ev;
10439 ev.wake_reason = reason;
10441 bacpy(&ev.addr.bdaddr, bdaddr);
10442 ev.addr.type = addr_type;
10444 memset(&ev.addr, 0, sizeof(ev.addr));
10447 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10450 static struct hci_mgmt_chan chan = {
10451 .channel = HCI_CHANNEL_CONTROL,
10452 .handler_count = ARRAY_SIZE(mgmt_handlers),
10453 .handlers = mgmt_handlers,
10454 .hdev_init = mgmt_init_hdev,
10457 int mgmt_init(void)
10459 return hci_mgmt_chan_register(&chan);
10462 void mgmt_exit(void)
10464 hci_mgmt_chan_unregister(&chan);
10467 void mgmt_cleanup(struct sock *sk)
10469 struct mgmt_mesh_tx *mesh_tx;
10470 struct hci_dev *hdev;
10472 read_lock(&hci_dev_list_lock);
10474 list_for_each_entry(hdev, &hci_dev_list, list) {
10476 mesh_tx = mgmt_mesh_next(hdev, sk);
10479 mesh_send_complete(hdev, mesh_tx, true);
10483 read_unlock(&hci_dev_list_lock);