2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 18
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_LINK_SECURITY,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_SECURITY_INFO,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
127 static const u16 mgmt_events[] = {
128 MGMT_EV_CONTROLLER_ERROR,
130 MGMT_EV_INDEX_REMOVED,
131 MGMT_EV_NEW_SETTINGS,
132 MGMT_EV_CLASS_OF_DEV_CHANGED,
133 MGMT_EV_LOCAL_NAME_CHANGED,
134 MGMT_EV_NEW_LINK_KEY,
135 MGMT_EV_NEW_LONG_TERM_KEY,
136 MGMT_EV_DEVICE_CONNECTED,
137 MGMT_EV_DEVICE_DISCONNECTED,
138 MGMT_EV_CONNECT_FAILED,
139 MGMT_EV_PIN_CODE_REQUEST,
140 MGMT_EV_USER_CONFIRM_REQUEST,
141 MGMT_EV_USER_PASSKEY_REQUEST,
143 MGMT_EV_DEVICE_FOUND,
145 MGMT_EV_DEVICE_BLOCKED,
146 MGMT_EV_DEVICE_UNBLOCKED,
147 MGMT_EV_DEVICE_UNPAIRED,
148 MGMT_EV_PASSKEY_NOTIFY,
151 MGMT_EV_DEVICE_ADDED,
152 MGMT_EV_DEVICE_REMOVED,
153 MGMT_EV_NEW_CONN_PARAM,
154 MGMT_EV_UNCONF_INDEX_ADDED,
155 MGMT_EV_UNCONF_INDEX_REMOVED,
156 MGMT_EV_NEW_CONFIG_OPTIONS,
157 MGMT_EV_EXT_INDEX_ADDED,
158 MGMT_EV_EXT_INDEX_REMOVED,
159 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 MGMT_EV_ADVERTISING_ADDED,
161 MGMT_EV_ADVERTISING_REMOVED,
162 MGMT_EV_EXT_INFO_CHANGED,
163 MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 MGMT_EV_EXP_FEATURE_CHANGED,
165 MGMT_EV_DEVICE_FLAGS_CHANGED,
166 MGMT_EV_CONTROLLER_SUSPEND,
167 MGMT_EV_CONTROLLER_RESUME,
170 static const u16 mgmt_untrusted_commands[] = {
171 MGMT_OP_READ_INDEX_LIST,
173 MGMT_OP_READ_UNCONF_INDEX_LIST,
174 MGMT_OP_READ_CONFIG_INFO,
175 MGMT_OP_READ_EXT_INDEX_LIST,
176 MGMT_OP_READ_EXT_INFO,
177 MGMT_OP_READ_SECURITY_INFO,
178 MGMT_OP_READ_EXP_FEATURES_INFO,
179 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
180 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
183 static const u16 mgmt_untrusted_events[] = {
185 MGMT_EV_INDEX_REMOVED,
186 MGMT_EV_NEW_SETTINGS,
187 MGMT_EV_CLASS_OF_DEV_CHANGED,
188 MGMT_EV_LOCAL_NAME_CHANGED,
189 MGMT_EV_UNCONF_INDEX_ADDED,
190 MGMT_EV_UNCONF_INDEX_REMOVED,
191 MGMT_EV_NEW_CONFIG_OPTIONS,
192 MGMT_EV_EXT_INDEX_ADDED,
193 MGMT_EV_EXT_INDEX_REMOVED,
194 MGMT_EV_EXT_INFO_CHANGED,
195 MGMT_EV_EXP_FEATURE_CHANGED,
196 MGMT_EV_ADV_MONITOR_ADDED,
197 MGMT_EV_ADV_MONITOR_REMOVED,
200 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
202 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
203 "\x00\x00\x00\x00\x00\x00\x00\x00"
205 /* HCI to MGMT error code conversion table */
206 static const u8 mgmt_status_table[] = {
208 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
209 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
210 MGMT_STATUS_FAILED, /* Hardware Failure */
211 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
212 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
213 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
214 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
215 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
216 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
217 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
218 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
219 MGMT_STATUS_BUSY, /* Command Disallowed */
220 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
221 MGMT_STATUS_REJECTED, /* Rejected Security */
222 MGMT_STATUS_REJECTED, /* Rejected Personal */
223 MGMT_STATUS_TIMEOUT, /* Host Timeout */
224 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
225 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
226 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
227 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
228 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
229 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
230 MGMT_STATUS_BUSY, /* Repeated Attempts */
231 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
232 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
234 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
235 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
236 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
238 MGMT_STATUS_FAILED, /* Unspecified Error */
239 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
240 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
241 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
242 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
243 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
244 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
245 MGMT_STATUS_FAILED, /* Unit Link Key Used */
246 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
247 MGMT_STATUS_TIMEOUT, /* Instant Passed */
248 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
249 MGMT_STATUS_FAILED, /* Transaction Collision */
250 MGMT_STATUS_FAILED, /* Reserved for future use */
251 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
252 MGMT_STATUS_REJECTED, /* QoS Rejected */
253 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
254 MGMT_STATUS_REJECTED, /* Insufficient Security */
255 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
256 MGMT_STATUS_FAILED, /* Reserved for future use */
257 MGMT_STATUS_BUSY, /* Role Switch Pending */
258 MGMT_STATUS_FAILED, /* Reserved for future use */
259 MGMT_STATUS_FAILED, /* Slot Violation */
260 MGMT_STATUS_FAILED, /* Role Switch Failed */
261 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
262 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
263 MGMT_STATUS_BUSY, /* Host Busy Pairing */
264 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
265 MGMT_STATUS_BUSY, /* Controller Busy */
266 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
267 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
268 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
269 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
270 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
273 static u8 mgmt_status(u8 hci_status)
275 if (hci_status < ARRAY_SIZE(mgmt_status_table))
276 return mgmt_status_table[hci_status];
278 return MGMT_STATUS_FAILED;
281 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
284 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
288 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
289 u16 len, int flag, struct sock *skip_sk)
291 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
295 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
296 struct sock *skip_sk)
298 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
299 HCI_SOCK_TRUSTED, skip_sk);
302 static u8 le_addr_type(u8 mgmt_addr_type)
304 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
305 return ADDR_LE_DEV_PUBLIC;
307 return ADDR_LE_DEV_RANDOM;
310 void mgmt_fill_version_info(void *ver)
312 struct mgmt_rp_read_version *rp = ver;
314 rp->version = MGMT_VERSION;
315 rp->revision = cpu_to_le16(MGMT_REVISION);
318 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
321 struct mgmt_rp_read_version rp;
323 bt_dev_dbg(hdev, "sock %p", sk);
325 mgmt_fill_version_info(&rp);
327 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
331 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
334 struct mgmt_rp_read_commands *rp;
335 u16 num_commands, num_events;
339 bt_dev_dbg(hdev, "sock %p", sk);
341 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
342 num_commands = ARRAY_SIZE(mgmt_commands);
343 num_events = ARRAY_SIZE(mgmt_events);
345 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
346 num_events = ARRAY_SIZE(mgmt_untrusted_events);
349 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
351 rp = kmalloc(rp_size, GFP_KERNEL);
355 rp->num_commands = cpu_to_le16(num_commands);
356 rp->num_events = cpu_to_le16(num_events);
358 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
359 __le16 *opcode = rp->opcodes;
361 for (i = 0; i < num_commands; i++, opcode++)
362 put_unaligned_le16(mgmt_commands[i], opcode);
364 for (i = 0; i < num_events; i++, opcode++)
365 put_unaligned_le16(mgmt_events[i], opcode);
367 __le16 *opcode = rp->opcodes;
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
376 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
383 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
386 struct mgmt_rp_read_index_list *rp;
392 bt_dev_dbg(hdev, "sock %p", sk);
394 read_lock(&hci_dev_list_lock);
397 list_for_each_entry(d, &hci_dev_list, list) {
398 if (d->dev_type == HCI_PRIMARY &&
399 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
403 rp_len = sizeof(*rp) + (2 * count);
404 rp = kmalloc(rp_len, GFP_ATOMIC);
406 read_unlock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (hci_dev_test_flag(d, HCI_SETUP) ||
413 hci_dev_test_flag(d, HCI_CONFIG) ||
414 hci_dev_test_flag(d, HCI_USER_CHANNEL))
417 /* Devices marked as raw-only are neither configured
418 * nor unconfigured controllers.
420 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
423 if (d->dev_type == HCI_PRIMARY &&
424 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
425 rp->index[count++] = cpu_to_le16(d->id);
426 bt_dev_dbg(hdev, "Added hci%u", d->id);
430 rp->num_controllers = cpu_to_le16(count);
431 rp_len = sizeof(*rp) + (2 * count);
433 read_unlock(&hci_dev_list_lock);
435 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
443 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
444 void *data, u16 data_len)
446 struct mgmt_rp_read_unconf_index_list *rp;
452 bt_dev_dbg(hdev, "sock %p", sk);
454 read_lock(&hci_dev_list_lock);
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (d->dev_type == HCI_PRIMARY &&
459 hci_dev_test_flag(d, HCI_UNCONFIGURED))
463 rp_len = sizeof(*rp) + (2 * count);
464 rp = kmalloc(rp_len, GFP_ATOMIC);
466 read_unlock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (hci_dev_test_flag(d, HCI_SETUP) ||
473 hci_dev_test_flag(d, HCI_CONFIG) ||
474 hci_dev_test_flag(d, HCI_USER_CHANNEL))
477 /* Devices marked as raw-only are neither configured
478 * nor unconfigured controllers.
480 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
483 if (d->dev_type == HCI_PRIMARY &&
484 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
485 rp->index[count++] = cpu_to_le16(d->id);
486 bt_dev_dbg(hdev, "Added hci%u", d->id);
490 rp->num_controllers = cpu_to_le16(count);
491 rp_len = sizeof(*rp) + (2 * count);
493 read_unlock(&hci_dev_list_lock);
495 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
496 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
503 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
504 void *data, u16 data_len)
506 struct mgmt_rp_read_ext_index_list *rp;
511 bt_dev_dbg(hdev, "sock %p", sk);
513 read_lock(&hci_dev_list_lock);
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
521 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
523 read_unlock(&hci_dev_list_lock);
528 list_for_each_entry(d, &hci_dev_list, list) {
529 if (hci_dev_test_flag(d, HCI_SETUP) ||
530 hci_dev_test_flag(d, HCI_CONFIG) ||
531 hci_dev_test_flag(d, HCI_USER_CHANNEL))
534 /* Devices marked as raw-only are neither configured
535 * nor unconfigured controllers.
537 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
540 if (d->dev_type == HCI_PRIMARY) {
541 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
542 rp->entry[count].type = 0x01;
544 rp->entry[count].type = 0x00;
545 } else if (d->dev_type == HCI_AMP) {
546 rp->entry[count].type = 0x02;
551 rp->entry[count].bus = d->bus;
552 rp->entry[count++].index = cpu_to_le16(d->id);
553 bt_dev_dbg(hdev, "Added hci%u", d->id);
556 rp->num_controllers = cpu_to_le16(count);
558 read_unlock(&hci_dev_list_lock);
560 /* If this command is called at least once, then all the
561 * default index and unconfigured index events are disabled
562 * and from now on only extended index events are used.
564 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
565 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
566 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
568 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
569 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
570 struct_size(rp, entry, count));
577 static bool is_configured(struct hci_dev *hdev)
579 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
580 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
583 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
584 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
585 !bacmp(&hdev->public_addr, BDADDR_ANY))
591 static __le32 get_missing_options(struct hci_dev *hdev)
595 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
596 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
597 options |= MGMT_OPTION_EXTERNAL_CONFIG;
599 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
600 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
601 !bacmp(&hdev->public_addr, BDADDR_ANY))
602 options |= MGMT_OPTION_PUBLIC_ADDRESS;
604 return cpu_to_le32(options);
607 static int new_options(struct hci_dev *hdev, struct sock *skip)
609 __le32 options = get_missing_options(hdev);
611 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
612 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
615 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
617 __le32 options = get_missing_options(hdev);
619 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
623 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
624 void *data, u16 data_len)
626 struct mgmt_rp_read_config_info rp;
629 bt_dev_dbg(hdev, "sock %p", sk);
633 memset(&rp, 0, sizeof(rp));
634 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
636 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
637 options |= MGMT_OPTION_EXTERNAL_CONFIG;
639 if (hdev->set_bdaddr)
640 options |= MGMT_OPTION_PUBLIC_ADDRESS;
642 rp.supported_options = cpu_to_le32(options);
643 rp.missing_options = get_missing_options(hdev);
645 hci_dev_unlock(hdev);
647 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
651 static u32 get_supported_phys(struct hci_dev *hdev)
653 u32 supported_phys = 0;
655 if (lmp_bredr_capable(hdev)) {
656 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
658 if (hdev->features[0][0] & LMP_3SLOT)
659 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
661 if (hdev->features[0][0] & LMP_5SLOT)
662 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
664 if (lmp_edr_2m_capable(hdev)) {
665 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
667 if (lmp_edr_3slot_capable(hdev))
668 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
670 if (lmp_edr_5slot_capable(hdev))
671 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
673 if (lmp_edr_3m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
685 if (lmp_le_capable(hdev)) {
686 supported_phys |= MGMT_PHY_LE_1M_TX;
687 supported_phys |= MGMT_PHY_LE_1M_RX;
689 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
690 supported_phys |= MGMT_PHY_LE_2M_TX;
691 supported_phys |= MGMT_PHY_LE_2M_RX;
694 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
695 supported_phys |= MGMT_PHY_LE_CODED_TX;
696 supported_phys |= MGMT_PHY_LE_CODED_RX;
700 return supported_phys;
703 static u32 get_selected_phys(struct hci_dev *hdev)
705 u32 selected_phys = 0;
707 if (lmp_bredr_capable(hdev)) {
708 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
710 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
711 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
713 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
714 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
716 if (lmp_edr_2m_capable(hdev)) {
717 if (!(hdev->pkt_type & HCI_2DH1))
718 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
720 if (lmp_edr_3slot_capable(hdev) &&
721 !(hdev->pkt_type & HCI_2DH3))
722 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
724 if (lmp_edr_5slot_capable(hdev) &&
725 !(hdev->pkt_type & HCI_2DH5))
726 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
728 if (lmp_edr_3m_capable(hdev)) {
729 if (!(hdev->pkt_type & HCI_3DH1))
730 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
732 if (lmp_edr_3slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_3DH3))
734 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
736 if (lmp_edr_5slot_capable(hdev) &&
737 !(hdev->pkt_type & HCI_3DH5))
738 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
743 if (lmp_le_capable(hdev)) {
744 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
745 selected_phys |= MGMT_PHY_LE_1M_TX;
747 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
748 selected_phys |= MGMT_PHY_LE_1M_RX;
750 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
751 selected_phys |= MGMT_PHY_LE_2M_TX;
753 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
754 selected_phys |= MGMT_PHY_LE_2M_RX;
756 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
757 selected_phys |= MGMT_PHY_LE_CODED_TX;
759 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
760 selected_phys |= MGMT_PHY_LE_CODED_RX;
763 return selected_phys;
766 static u32 get_configurable_phys(struct hci_dev *hdev)
768 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
769 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
772 static u32 get_supported_settings(struct hci_dev *hdev)
776 settings |= MGMT_SETTING_POWERED;
777 settings |= MGMT_SETTING_BONDABLE;
778 settings |= MGMT_SETTING_DEBUG_KEYS;
779 settings |= MGMT_SETTING_CONNECTABLE;
780 settings |= MGMT_SETTING_DISCOVERABLE;
782 if (lmp_bredr_capable(hdev)) {
783 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
784 settings |= MGMT_SETTING_FAST_CONNECTABLE;
785 settings |= MGMT_SETTING_BREDR;
786 settings |= MGMT_SETTING_LINK_SECURITY;
788 if (lmp_ssp_capable(hdev)) {
789 settings |= MGMT_SETTING_SSP;
790 if (IS_ENABLED(CONFIG_BT_HS))
791 settings |= MGMT_SETTING_HS;
794 if (lmp_sc_capable(hdev))
795 settings |= MGMT_SETTING_SECURE_CONN;
797 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
799 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
802 if (lmp_le_capable(hdev)) {
803 settings |= MGMT_SETTING_LE;
804 settings |= MGMT_SETTING_SECURE_CONN;
805 settings |= MGMT_SETTING_PRIVACY;
806 settings |= MGMT_SETTING_STATIC_ADDRESS;
808 /* When the experimental feature for LL Privacy support is
809 * enabled, then advertising is no longer supported.
811 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
812 settings |= MGMT_SETTING_ADVERTISING;
815 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
817 settings |= MGMT_SETTING_CONFIGURATION;
819 settings |= MGMT_SETTING_PHY_CONFIGURATION;
824 static u32 get_current_settings(struct hci_dev *hdev)
828 if (hdev_is_powered(hdev))
829 settings |= MGMT_SETTING_POWERED;
831 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
832 settings |= MGMT_SETTING_CONNECTABLE;
834 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
835 settings |= MGMT_SETTING_FAST_CONNECTABLE;
837 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
838 settings |= MGMT_SETTING_DISCOVERABLE;
840 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
841 settings |= MGMT_SETTING_BONDABLE;
843 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
844 settings |= MGMT_SETTING_BREDR;
846 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
847 settings |= MGMT_SETTING_LE;
849 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
850 settings |= MGMT_SETTING_LINK_SECURITY;
852 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
853 settings |= MGMT_SETTING_SSP;
855 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
856 settings |= MGMT_SETTING_HS;
858 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
859 settings |= MGMT_SETTING_ADVERTISING;
861 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
862 settings |= MGMT_SETTING_SECURE_CONN;
864 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
865 settings |= MGMT_SETTING_DEBUG_KEYS;
867 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
868 settings |= MGMT_SETTING_PRIVACY;
870 /* The current setting for static address has two purposes. The
871 * first is to indicate if the static address will be used and
872 * the second is to indicate if it is actually set.
874 * This means if the static address is not configured, this flag
875 * will never be set. If the address is configured, then if the
876 * address is actually used decides if the flag is set or not.
878 * For single mode LE only controllers and dual-mode controllers
879 * with BR/EDR disabled, the existence of the static address will
882 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
883 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
884 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
885 if (bacmp(&hdev->static_addr, BDADDR_ANY))
886 settings |= MGMT_SETTING_STATIC_ADDRESS;
889 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
890 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
895 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
897 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
900 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
901 struct hci_dev *hdev,
904 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
907 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
909 struct mgmt_pending_cmd *cmd;
911 /* If there's a pending mgmt command the flags will not yet have
912 * their final values, so check for this first.
914 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
916 struct mgmt_mode *cp = cmd->param;
918 return LE_AD_GENERAL;
919 else if (cp->val == 0x02)
920 return LE_AD_LIMITED;
922 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
923 return LE_AD_LIMITED;
924 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
925 return LE_AD_GENERAL;
931 bool mgmt_get_connectable(struct hci_dev *hdev)
933 struct mgmt_pending_cmd *cmd;
935 /* If there's a pending mgmt command the flag will not yet have
936 * it's final value, so check for this first.
938 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
940 struct mgmt_mode *cp = cmd->param;
945 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
948 static void service_cache_off(struct work_struct *work)
950 struct hci_dev *hdev = container_of(work, struct hci_dev,
952 struct hci_request req;
954 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
957 hci_req_init(&req, hdev);
961 __hci_req_update_eir(&req);
962 __hci_req_update_class(&req);
964 hci_dev_unlock(hdev);
966 hci_req_run(&req, NULL);
969 static void rpa_expired(struct work_struct *work)
971 struct hci_dev *hdev = container_of(work, struct hci_dev,
973 struct hci_request req;
975 bt_dev_dbg(hdev, "");
977 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
979 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
982 /* The generation of a new RPA and programming it into the
983 * controller happens in the hci_req_enable_advertising()
986 hci_req_init(&req, hdev);
987 if (ext_adv_capable(hdev))
988 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
990 __hci_req_enable_advertising(&req);
991 hci_req_run(&req, NULL);
994 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
996 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
999 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1000 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1002 /* Non-mgmt controlled devices get this bit set
1003 * implicitly so that pairing works for them, however
1004 * for mgmt we require user-space to explicitly enable
1007 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1010 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1011 void *data, u16 data_len)
1013 struct mgmt_rp_read_info rp;
1015 bt_dev_dbg(hdev, "sock %p", sk);
1019 memset(&rp, 0, sizeof(rp));
1021 bacpy(&rp.bdaddr, &hdev->bdaddr);
1023 rp.version = hdev->hci_ver;
1024 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1026 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1027 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1029 memcpy(rp.dev_class, hdev->dev_class, 3);
1031 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1032 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1034 hci_dev_unlock(hdev);
1036 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1040 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1045 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1046 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1047 hdev->dev_class, 3);
1049 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1050 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1053 name_len = strlen(hdev->dev_name);
1054 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1055 hdev->dev_name, name_len);
1057 name_len = strlen(hdev->short_name);
1058 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1059 hdev->short_name, name_len);
1064 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1065 void *data, u16 data_len)
1068 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1071 bt_dev_dbg(hdev, "sock %p", sk);
1073 memset(&buf, 0, sizeof(buf));
1077 bacpy(&rp->bdaddr, &hdev->bdaddr);
1079 rp->version = hdev->hci_ver;
1080 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1082 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1083 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1086 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1087 rp->eir_len = cpu_to_le16(eir_len);
1089 hci_dev_unlock(hdev);
1091 /* If this command is called at least once, then the events
1092 * for class of device and local name changes are disabled
1093 * and only the new extended controller information event
1096 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1097 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1098 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1100 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1101 sizeof(*rp) + eir_len);
1104 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1107 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1110 memset(buf, 0, sizeof(buf));
1112 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1113 ev->eir_len = cpu_to_le16(eir_len);
1115 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1116 sizeof(*ev) + eir_len,
1117 HCI_MGMT_EXT_INFO_EVENTS, skip);
1120 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1122 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1124 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1128 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1130 bt_dev_dbg(hdev, "status 0x%02x", status);
1132 if (hci_conn_count(hdev) == 0) {
1133 cancel_delayed_work(&hdev->power_off);
1134 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1138 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1140 struct mgmt_ev_advertising_added ev;
1142 ev.instance = instance;
1144 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1147 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1150 struct mgmt_ev_advertising_removed ev;
1152 ev.instance = instance;
1154 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1157 static void cancel_adv_timeout(struct hci_dev *hdev)
1159 if (hdev->adv_instance_timeout) {
1160 hdev->adv_instance_timeout = 0;
1161 cancel_delayed_work(&hdev->adv_instance_expire);
1165 static int clean_up_hci_state(struct hci_dev *hdev)
1167 struct hci_request req;
1168 struct hci_conn *conn;
1169 bool discov_stopped;
1172 hci_req_init(&req, hdev);
1174 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1175 test_bit(HCI_PSCAN, &hdev->flags)) {
1177 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1180 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1182 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1183 __hci_req_disable_advertising(&req);
1185 discov_stopped = hci_req_stop_discovery(&req);
1187 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1188 /* 0x15 == Terminated due to Power Off */
1189 __hci_abort_conn(&req, conn, 0x15);
1192 err = hci_req_run(&req, clean_up_hci_complete);
1193 if (!err && discov_stopped)
1194 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1199 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1202 struct mgmt_mode *cp = data;
1203 struct mgmt_pending_cmd *cmd;
1206 bt_dev_dbg(hdev, "sock %p", sk);
1208 if (cp->val != 0x00 && cp->val != 0x01)
1209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210 MGMT_STATUS_INVALID_PARAMS);
1214 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1220 if (!!cp->val == hdev_is_powered(hdev)) {
1221 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1225 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1232 queue_work(hdev->req_workqueue, &hdev->power_on);
1235 /* Disconnect connections, stop scans, etc */
1236 err = clean_up_hci_state(hdev);
1238 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1239 HCI_POWER_OFF_TIMEOUT);
1241 /* ENODATA means there were no HCI commands queued */
1242 if (err == -ENODATA) {
1243 cancel_delayed_work(&hdev->power_off);
1244 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1250 hci_dev_unlock(hdev);
1254 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1256 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1258 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1259 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1262 int mgmt_new_settings(struct hci_dev *hdev)
1264 return new_settings(hdev, NULL);
1269 struct hci_dev *hdev;
1273 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1275 struct cmd_lookup *match = data;
1277 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1279 list_del(&cmd->list);
1281 if (match->sk == NULL) {
1282 match->sk = cmd->sk;
1283 sock_hold(match->sk);
1286 mgmt_pending_free(cmd);
1289 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1293 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1294 mgmt_pending_remove(cmd);
1297 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1299 if (cmd->cmd_complete) {
1302 cmd->cmd_complete(cmd, *status);
1303 mgmt_pending_remove(cmd);
1308 cmd_status_rsp(cmd, data);
1311 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1313 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1314 cmd->param, cmd->param_len);
1317 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1319 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1320 cmd->param, sizeof(struct mgmt_addr_info));
1323 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1325 if (!lmp_bredr_capable(hdev))
1326 return MGMT_STATUS_NOT_SUPPORTED;
1327 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1328 return MGMT_STATUS_REJECTED;
1330 return MGMT_STATUS_SUCCESS;
1333 static u8 mgmt_le_support(struct hci_dev *hdev)
1335 if (!lmp_le_capable(hdev))
1336 return MGMT_STATUS_NOT_SUPPORTED;
1337 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1338 return MGMT_STATUS_REJECTED;
1340 return MGMT_STATUS_SUCCESS;
1343 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1345 struct mgmt_pending_cmd *cmd;
1347 bt_dev_dbg(hdev, "status 0x%02x", status);
1351 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1356 u8 mgmt_err = mgmt_status(status);
1357 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1358 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1362 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1363 hdev->discov_timeout > 0) {
1364 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1365 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1368 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1369 new_settings(hdev, cmd->sk);
1372 mgmt_pending_remove(cmd);
1375 hci_dev_unlock(hdev);
1378 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1381 struct mgmt_cp_set_discoverable *cp = data;
1382 struct mgmt_pending_cmd *cmd;
1386 bt_dev_dbg(hdev, "sock %p", sk);
1388 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1389 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 MGMT_STATUS_REJECTED);
1393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1395 MGMT_STATUS_INVALID_PARAMS);
1397 timeout = __le16_to_cpu(cp->timeout);
1399 /* Disabling discoverable requires that no timeout is set,
1400 * and enabling limited discoverable requires a timeout.
1402 if ((cp->val == 0x00 && timeout > 0) ||
1403 (cp->val == 0x02 && timeout == 0))
1404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 MGMT_STATUS_INVALID_PARAMS);
1409 if (!hdev_is_powered(hdev) && timeout > 0) {
1410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 MGMT_STATUS_NOT_POWERED);
1415 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1416 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1422 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 MGMT_STATUS_REJECTED);
1428 if (hdev->advertising_paused) {
1429 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1434 if (!hdev_is_powered(hdev)) {
1435 bool changed = false;
1437 /* Setting limited discoverable when powered off is
1438 * not a valid operation since it requires a timeout
1439 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1441 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1442 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1446 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1451 err = new_settings(hdev, sk);
1456 /* If the current mode is the same, then just update the timeout
1457 * value with the new value. And if only the timeout gets updated,
1458 * then no need for any HCI transactions.
1460 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1461 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1462 HCI_LIMITED_DISCOVERABLE)) {
1463 cancel_delayed_work(&hdev->discov_off);
1464 hdev->discov_timeout = timeout;
1466 if (cp->val && hdev->discov_timeout > 0) {
1467 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1468 queue_delayed_work(hdev->req_workqueue,
1469 &hdev->discov_off, to);
1472 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1476 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1482 /* Cancel any potential discoverable timeout that might be
1483 * still active and store new timeout value. The arming of
1484 * the timeout happens in the complete handler.
1486 cancel_delayed_work(&hdev->discov_off);
1487 hdev->discov_timeout = timeout;
1490 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1492 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1494 /* Limited discoverable mode */
1495 if (cp->val == 0x02)
1496 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1498 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1500 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1504 hci_dev_unlock(hdev);
1508 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1510 struct mgmt_pending_cmd *cmd;
1512 bt_dev_dbg(hdev, "status 0x%02x", status);
1516 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1521 u8 mgmt_err = mgmt_status(status);
1522 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1526 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1527 new_settings(hdev, cmd->sk);
1530 mgmt_pending_remove(cmd);
1533 hci_dev_unlock(hdev);
1536 static int set_connectable_update_settings(struct hci_dev *hdev,
1537 struct sock *sk, u8 val)
1539 bool changed = false;
1542 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1546 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1548 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1549 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1552 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1557 hci_req_update_scan(hdev);
1558 hci_update_background_scan(hdev);
1559 return new_settings(hdev, sk);
1565 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1568 struct mgmt_mode *cp = data;
1569 struct mgmt_pending_cmd *cmd;
1572 bt_dev_dbg(hdev, "sock %p", sk);
1574 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1575 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1577 MGMT_STATUS_REJECTED);
1579 if (cp->val != 0x00 && cp->val != 0x01)
1580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1585 if (!hdev_is_powered(hdev)) {
1586 err = set_connectable_update_settings(hdev, sk, cp->val);
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1597 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1604 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1606 if (hdev->discov_timeout > 0)
1607 cancel_delayed_work(&hdev->discov_off);
1609 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1610 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1611 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1614 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1618 hci_dev_unlock(hdev);
1622 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1625 struct mgmt_mode *cp = data;
1629 bt_dev_dbg(hdev, "sock %p", sk);
1631 if (cp->val != 0x00 && cp->val != 0x01)
1632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1633 MGMT_STATUS_INVALID_PARAMS);
1638 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1640 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1642 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1647 /* In limited privacy mode the change of bondable mode
1648 * may affect the local advertising address.
1650 if (hdev_is_powered(hdev) &&
1651 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1652 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1653 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1654 queue_work(hdev->req_workqueue,
1655 &hdev->discoverable_update);
1657 err = new_settings(hdev, sk);
1661 hci_dev_unlock(hdev);
1665 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1668 struct mgmt_mode *cp = data;
1669 struct mgmt_pending_cmd *cmd;
1673 bt_dev_dbg(hdev, "sock %p", sk);
1675 status = mgmt_bredr_support(hdev);
1677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1680 if (cp->val != 0x00 && cp->val != 0x01)
1681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1682 MGMT_STATUS_INVALID_PARAMS);
1686 if (!hdev_is_powered(hdev)) {
1687 bool changed = false;
1689 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1690 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1694 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1699 err = new_settings(hdev, sk);
1704 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1712 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1713 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1717 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1723 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1725 mgmt_pending_remove(cmd);
1730 hci_dev_unlock(hdev);
1734 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1736 struct mgmt_mode *cp = data;
1737 struct mgmt_pending_cmd *cmd;
1741 bt_dev_dbg(hdev, "sock %p", sk);
1743 status = mgmt_bredr_support(hdev);
1745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1747 if (!lmp_ssp_capable(hdev))
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1749 MGMT_STATUS_NOT_SUPPORTED);
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1753 MGMT_STATUS_INVALID_PARAMS);
1757 if (!hdev_is_powered(hdev)) {
1761 changed = !hci_dev_test_and_set_flag(hdev,
1764 changed = hci_dev_test_and_clear_flag(hdev,
1767 changed = hci_dev_test_and_clear_flag(hdev,
1770 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1773 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1778 err = new_settings(hdev, sk);
1783 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1789 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1790 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1794 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1800 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1801 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1802 sizeof(cp->val), &cp->val);
1804 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1806 mgmt_pending_remove(cmd);
1811 hci_dev_unlock(hdev);
1815 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1817 struct mgmt_mode *cp = data;
1822 bt_dev_dbg(hdev, "sock %p", sk);
1824 if (!IS_ENABLED(CONFIG_BT_HS))
1825 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1826 MGMT_STATUS_NOT_SUPPORTED);
1828 status = mgmt_bredr_support(hdev);
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1832 if (!lmp_ssp_capable(hdev))
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 MGMT_STATUS_NOT_SUPPORTED);
1836 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1838 MGMT_STATUS_REJECTED);
1840 if (cp->val != 0x00 && cp->val != 0x01)
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 MGMT_STATUS_INVALID_PARAMS);
1846 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1847 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1853 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1855 if (hdev_is_powered(hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1857 MGMT_STATUS_REJECTED);
1861 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1864 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1869 err = new_settings(hdev, sk);
1872 hci_dev_unlock(hdev);
1876 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1878 struct cmd_lookup match = { NULL, hdev };
1883 u8 mgmt_err = mgmt_status(status);
1885 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1890 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1892 new_settings(hdev, match.sk);
1897 /* Make sure the controller has a good default for
1898 * advertising data. Restrict the update to when LE
1899 * has actually been enabled. During power on, the
1900 * update in powered_update_hci will take care of it.
1902 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1903 struct hci_request req;
1904 hci_req_init(&req, hdev);
1905 if (ext_adv_capable(hdev)) {
1908 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1910 __hci_req_update_scan_rsp_data(&req, 0x00);
1912 __hci_req_update_adv_data(&req, 0x00);
1913 __hci_req_update_scan_rsp_data(&req, 0x00);
1915 hci_req_run(&req, NULL);
1916 hci_update_background_scan(hdev);
1920 hci_dev_unlock(hdev);
1923 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1925 struct mgmt_mode *cp = data;
1926 struct hci_cp_write_le_host_supported hci_cp;
1927 struct mgmt_pending_cmd *cmd;
1928 struct hci_request req;
1932 bt_dev_dbg(hdev, "sock %p", sk);
1934 if (!lmp_le_capable(hdev))
1935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1936 MGMT_STATUS_NOT_SUPPORTED);
1938 if (cp->val != 0x00 && cp->val != 0x01)
1939 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1940 MGMT_STATUS_INVALID_PARAMS);
1942 /* Bluetooth single mode LE only controllers or dual-mode
1943 * controllers configured as LE only devices, do not allow
1944 * switching LE off. These have either LE enabled explicitly
1945 * or BR/EDR has been previously switched off.
1947 * When trying to enable an already enabled LE, then gracefully
1948 * send a positive response. Trying to disable it however will
1949 * result into rejection.
1951 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1952 if (cp->val == 0x01)
1953 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1955 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 MGMT_STATUS_REJECTED);
1962 enabled = lmp_host_le_capable(hdev);
1965 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1967 if (!hdev_is_powered(hdev) || val == enabled) {
1968 bool changed = false;
1970 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1971 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1975 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1976 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1980 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1985 err = new_settings(hdev, sk);
1990 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1991 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1992 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1997 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2003 hci_req_init(&req, hdev);
2005 memset(&hci_cp, 0, sizeof(hci_cp));
2009 hci_cp.simul = 0x00;
2011 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2012 __hci_req_disable_advertising(&req);
2014 if (ext_adv_capable(hdev))
2015 __hci_req_clear_ext_adv_sets(&req);
2018 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2021 err = hci_req_run(&req, le_enable_complete);
2023 mgmt_pending_remove(cmd);
2026 hci_dev_unlock(hdev);
2030 /* This is a helper function to test for pending mgmt commands that can
2031 * cause CoD or EIR HCI commands. We can only allow one such pending
2032 * mgmt command at a time since otherwise we cannot easily track what
2033 * the current values are, will be, and based on that calculate if a new
2034 * HCI command needs to be sent and if yes with what value.
2036 static bool pending_eir_or_class(struct hci_dev *hdev)
2038 struct mgmt_pending_cmd *cmd;
2040 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2041 switch (cmd->opcode) {
2042 case MGMT_OP_ADD_UUID:
2043 case MGMT_OP_REMOVE_UUID:
2044 case MGMT_OP_SET_DEV_CLASS:
2045 case MGMT_OP_SET_POWERED:
2053 static const u8 bluetooth_base_uuid[] = {
2054 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2055 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2058 static u8 get_uuid_size(const u8 *uuid)
2062 if (memcmp(uuid, bluetooth_base_uuid, 12))
2065 val = get_unaligned_le32(&uuid[12]);
2072 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2074 struct mgmt_pending_cmd *cmd;
2078 cmd = pending_find(mgmt_op, hdev);
2082 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2083 mgmt_status(status), hdev->dev_class, 3);
2085 mgmt_pending_remove(cmd);
2088 hci_dev_unlock(hdev);
2091 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2093 bt_dev_dbg(hdev, "status 0x%02x", status);
2095 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2098 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2100 struct mgmt_cp_add_uuid *cp = data;
2101 struct mgmt_pending_cmd *cmd;
2102 struct hci_request req;
2103 struct bt_uuid *uuid;
2106 bt_dev_dbg(hdev, "sock %p", sk);
2110 if (pending_eir_or_class(hdev)) {
2111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2116 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2122 memcpy(uuid->uuid, cp->uuid, 16);
2123 uuid->svc_hint = cp->svc_hint;
2124 uuid->size = get_uuid_size(cp->uuid);
2126 list_add_tail(&uuid->list, &hdev->uuids);
2128 hci_req_init(&req, hdev);
2130 __hci_req_update_class(&req);
2131 __hci_req_update_eir(&req);
2133 err = hci_req_run(&req, add_uuid_complete);
2135 if (err != -ENODATA)
2138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2139 hdev->dev_class, 3);
2143 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2152 hci_dev_unlock(hdev);
2156 static bool enable_service_cache(struct hci_dev *hdev)
2158 if (!hdev_is_powered(hdev))
2161 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2162 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2170 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2172 bt_dev_dbg(hdev, "status 0x%02x", status);
2174 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2177 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2180 struct mgmt_cp_remove_uuid *cp = data;
2181 struct mgmt_pending_cmd *cmd;
2182 struct bt_uuid *match, *tmp;
2183 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2184 struct hci_request req;
2187 bt_dev_dbg(hdev, "sock %p", sk);
2191 if (pending_eir_or_class(hdev)) {
2192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2197 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2198 hci_uuids_clear(hdev);
2200 if (enable_service_cache(hdev)) {
2201 err = mgmt_cmd_complete(sk, hdev->id,
2202 MGMT_OP_REMOVE_UUID,
2203 0, hdev->dev_class, 3);
2212 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2213 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2216 list_del(&match->list);
2222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2223 MGMT_STATUS_INVALID_PARAMS);
2228 hci_req_init(&req, hdev);
2230 __hci_req_update_class(&req);
2231 __hci_req_update_eir(&req);
2233 err = hci_req_run(&req, remove_uuid_complete);
2235 if (err != -ENODATA)
2238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2239 hdev->dev_class, 3);
2243 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2252 hci_dev_unlock(hdev);
2256 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2258 bt_dev_dbg(hdev, "status 0x%02x", status);
2260 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2263 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2266 struct mgmt_cp_set_dev_class *cp = data;
2267 struct mgmt_pending_cmd *cmd;
2268 struct hci_request req;
2271 bt_dev_dbg(hdev, "sock %p", sk);
2273 if (!lmp_bredr_capable(hdev))
2274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2275 MGMT_STATUS_NOT_SUPPORTED);
2279 if (pending_eir_or_class(hdev)) {
2280 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2285 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2287 MGMT_STATUS_INVALID_PARAMS);
2291 hdev->major_class = cp->major;
2292 hdev->minor_class = cp->minor;
2294 if (!hdev_is_powered(hdev)) {
2295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2296 hdev->dev_class, 3);
2300 hci_req_init(&req, hdev);
2302 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2303 hci_dev_unlock(hdev);
2304 cancel_delayed_work_sync(&hdev->service_cache);
2306 __hci_req_update_eir(&req);
2309 __hci_req_update_class(&req);
2311 err = hci_req_run(&req, set_class_complete);
2313 if (err != -ENODATA)
2316 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2317 hdev->dev_class, 3);
2321 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2330 hci_dev_unlock(hdev);
2334 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2337 struct mgmt_cp_load_link_keys *cp = data;
2338 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2339 sizeof(struct mgmt_link_key_info));
2340 u16 key_count, expected_len;
2344 bt_dev_dbg(hdev, "sock %p", sk);
2346 if (!lmp_bredr_capable(hdev))
2347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 MGMT_STATUS_NOT_SUPPORTED);
2350 key_count = __le16_to_cpu(cp->key_count);
2351 if (key_count > max_key_count) {
2352 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2355 MGMT_STATUS_INVALID_PARAMS);
2358 expected_len = struct_size(cp, keys, key_count);
2359 if (expected_len != len) {
2360 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2366 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2368 MGMT_STATUS_INVALID_PARAMS);
2370 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2373 for (i = 0; i < key_count; i++) {
2374 struct mgmt_link_key_info *key = &cp->keys[i];
2376 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2377 if (key->type > 0x08)
2378 return mgmt_cmd_status(sk, hdev->id,
2379 MGMT_OP_LOAD_LINK_KEYS,
2380 MGMT_STATUS_INVALID_PARAMS);
2385 hci_link_keys_clear(hdev);
2388 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2390 changed = hci_dev_test_and_clear_flag(hdev,
2391 HCI_KEEP_DEBUG_KEYS);
2394 new_settings(hdev, NULL);
2396 for (i = 0; i < key_count; i++) {
2397 struct mgmt_link_key_info *key = &cp->keys[i];
2399 if (hci_is_blocked_key(hdev,
2400 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2402 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2407 /* Always ignore debug keys and require a new pairing if
2408 * the user wants to use them.
2410 if (key->type == HCI_LK_DEBUG_COMBINATION)
2413 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2414 key->type, key->pin_len, NULL);
2417 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2419 hci_dev_unlock(hdev);
2424 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2425 u8 addr_type, struct sock *skip_sk)
2427 struct mgmt_ev_device_unpaired ev;
2429 bacpy(&ev.addr.bdaddr, bdaddr);
2430 ev.addr.type = addr_type;
2432 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2436 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2439 struct mgmt_cp_unpair_device *cp = data;
2440 struct mgmt_rp_unpair_device rp;
2441 struct hci_conn_params *params;
2442 struct mgmt_pending_cmd *cmd;
2443 struct hci_conn *conn;
2447 memset(&rp, 0, sizeof(rp));
2448 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2449 rp.addr.type = cp->addr.type;
2451 if (!bdaddr_type_is_valid(cp->addr.type))
2452 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2453 MGMT_STATUS_INVALID_PARAMS,
2456 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2457 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2458 MGMT_STATUS_INVALID_PARAMS,
2463 if (!hdev_is_powered(hdev)) {
2464 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 MGMT_STATUS_NOT_POWERED, &rp,
2470 if (cp->addr.type == BDADDR_BREDR) {
2471 /* If disconnection is requested, then look up the
2472 * connection. If the remote device is connected, it
2473 * will be later used to terminate the link.
2475 * Setting it to NULL explicitly will cause no
2476 * termination of the link.
2479 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2484 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2486 err = mgmt_cmd_complete(sk, hdev->id,
2487 MGMT_OP_UNPAIR_DEVICE,
2488 MGMT_STATUS_NOT_PAIRED, &rp,
2496 /* LE address type */
2497 addr_type = le_addr_type(cp->addr.type);
2499 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2500 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2502 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2503 MGMT_STATUS_NOT_PAIRED, &rp,
2508 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2510 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2515 /* Defer clearing up the connection parameters until closing to
2516 * give a chance of keeping them if a repairing happens.
2518 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2520 /* Disable auto-connection parameters if present */
2521 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2523 if (params->explicit_connect)
2524 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2526 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2529 /* If disconnection is not requested, then clear the connection
2530 * variable so that the link is not terminated.
2532 if (!cp->disconnect)
2536 /* If the connection variable is set, then termination of the
2537 * link is requested.
2540 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2542 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2546 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2553 cmd->cmd_complete = addr_cmd_complete;
2555 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2557 mgmt_pending_remove(cmd);
2560 hci_dev_unlock(hdev);
2564 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2567 struct mgmt_cp_disconnect *cp = data;
2568 struct mgmt_rp_disconnect rp;
2569 struct mgmt_pending_cmd *cmd;
2570 struct hci_conn *conn;
2573 bt_dev_dbg(hdev, "sock %p", sk);
2575 memset(&rp, 0, sizeof(rp));
2576 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2577 rp.addr.type = cp->addr.type;
2579 if (!bdaddr_type_is_valid(cp->addr.type))
2580 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2581 MGMT_STATUS_INVALID_PARAMS,
2586 if (!test_bit(HCI_UP, &hdev->flags)) {
2587 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2588 MGMT_STATUS_NOT_POWERED, &rp,
2593 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2599 if (cp->addr.type == BDADDR_BREDR)
2600 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2603 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2604 le_addr_type(cp->addr.type));
2606 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2608 MGMT_STATUS_NOT_CONNECTED, &rp,
2613 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2619 cmd->cmd_complete = generic_cmd_complete;
2621 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2623 mgmt_pending_remove(cmd);
2626 hci_dev_unlock(hdev);
2630 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2632 switch (link_type) {
2634 switch (addr_type) {
2635 case ADDR_LE_DEV_PUBLIC:
2636 return BDADDR_LE_PUBLIC;
2639 /* Fallback to LE Random address type */
2640 return BDADDR_LE_RANDOM;
2644 /* Fallback to BR/EDR type */
2645 return BDADDR_BREDR;
2649 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2652 struct mgmt_rp_get_connections *rp;
2657 bt_dev_dbg(hdev, "sock %p", sk);
2661 if (!hdev_is_powered(hdev)) {
2662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2663 MGMT_STATUS_NOT_POWERED);
2668 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2669 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2673 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2680 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2681 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2683 bacpy(&rp->addr[i].bdaddr, &c->dst);
2684 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2685 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2690 rp->conn_count = cpu_to_le16(i);
2692 /* Recalculate length in case of filtered SCO connections, etc */
2693 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2694 struct_size(rp, addr, i));
2699 hci_dev_unlock(hdev);
2703 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2704 struct mgmt_cp_pin_code_neg_reply *cp)
2706 struct mgmt_pending_cmd *cmd;
2709 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2714 cmd->cmd_complete = addr_cmd_complete;
2716 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2717 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2719 mgmt_pending_remove(cmd);
2724 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2727 struct hci_conn *conn;
2728 struct mgmt_cp_pin_code_reply *cp = data;
2729 struct hci_cp_pin_code_reply reply;
2730 struct mgmt_pending_cmd *cmd;
2733 bt_dev_dbg(hdev, "sock %p", sk);
2737 if (!hdev_is_powered(hdev)) {
2738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2739 MGMT_STATUS_NOT_POWERED);
2743 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2746 MGMT_STATUS_NOT_CONNECTED);
2750 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2751 struct mgmt_cp_pin_code_neg_reply ncp;
2753 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2755 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2757 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2760 MGMT_STATUS_INVALID_PARAMS);
2765 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2771 cmd->cmd_complete = addr_cmd_complete;
2773 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2774 reply.pin_len = cp->pin_len;
2775 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2777 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2779 mgmt_pending_remove(cmd);
2782 hci_dev_unlock(hdev);
2786 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2789 struct mgmt_cp_set_io_capability *cp = data;
2791 bt_dev_dbg(hdev, "sock %p", sk);
2793 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2795 MGMT_STATUS_INVALID_PARAMS);
2799 hdev->io_capability = cp->io_capability;
2801 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2803 hci_dev_unlock(hdev);
2805 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2809 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2811 struct hci_dev *hdev = conn->hdev;
2812 struct mgmt_pending_cmd *cmd;
2814 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2815 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2818 if (cmd->user_data != conn)
2827 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2829 struct mgmt_rp_pair_device rp;
2830 struct hci_conn *conn = cmd->user_data;
2833 bacpy(&rp.addr.bdaddr, &conn->dst);
2834 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2836 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2837 status, &rp, sizeof(rp));
2839 /* So we don't get further callbacks for this connection */
2840 conn->connect_cfm_cb = NULL;
2841 conn->security_cfm_cb = NULL;
2842 conn->disconn_cfm_cb = NULL;
2844 hci_conn_drop(conn);
2846 /* The device is paired so there is no need to remove
2847 * its connection parameters anymore.
2849 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2856 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2858 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2859 struct mgmt_pending_cmd *cmd;
2861 cmd = find_pairing(conn);
2863 cmd->cmd_complete(cmd, status);
2864 mgmt_pending_remove(cmd);
2868 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2870 struct mgmt_pending_cmd *cmd;
2872 BT_DBG("status %u", status);
2874 cmd = find_pairing(conn);
2876 BT_DBG("Unable to find a pending command");
2880 cmd->cmd_complete(cmd, mgmt_status(status));
2881 mgmt_pending_remove(cmd);
2884 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2886 struct mgmt_pending_cmd *cmd;
2888 BT_DBG("status %u", status);
2893 cmd = find_pairing(conn);
2895 BT_DBG("Unable to find a pending command");
2899 cmd->cmd_complete(cmd, mgmt_status(status));
2900 mgmt_pending_remove(cmd);
2903 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2906 struct mgmt_cp_pair_device *cp = data;
2907 struct mgmt_rp_pair_device rp;
2908 struct mgmt_pending_cmd *cmd;
2909 u8 sec_level, auth_type;
2910 struct hci_conn *conn;
2913 bt_dev_dbg(hdev, "sock %p", sk);
2915 memset(&rp, 0, sizeof(rp));
2916 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2917 rp.addr.type = cp->addr.type;
2919 if (!bdaddr_type_is_valid(cp->addr.type))
2920 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2921 MGMT_STATUS_INVALID_PARAMS,
2924 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2925 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 MGMT_STATUS_INVALID_PARAMS,
2931 if (!hdev_is_powered(hdev)) {
2932 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 MGMT_STATUS_NOT_POWERED, &rp,
2938 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 MGMT_STATUS_ALREADY_PAIRED, &rp,
2945 sec_level = BT_SECURITY_MEDIUM;
2946 auth_type = HCI_AT_DEDICATED_BONDING;
2948 if (cp->addr.type == BDADDR_BREDR) {
2949 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2950 auth_type, CONN_REASON_PAIR_DEVICE);
2952 u8 addr_type = le_addr_type(cp->addr.type);
2953 struct hci_conn_params *p;
2955 /* When pairing a new device, it is expected to remember
2956 * this device for future connections. Adding the connection
2957 * parameter information ahead of time allows tracking
2958 * of the slave preferred values and will speed up any
2959 * further connection establishment.
2961 * If connection parameters already exist, then they
2962 * will be kept and this function does nothing.
2964 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2966 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2967 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2969 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2970 sec_level, HCI_LE_CONN_TIMEOUT,
2971 CONN_REASON_PAIR_DEVICE);
2977 if (PTR_ERR(conn) == -EBUSY)
2978 status = MGMT_STATUS_BUSY;
2979 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2980 status = MGMT_STATUS_NOT_SUPPORTED;
2981 else if (PTR_ERR(conn) == -ECONNREFUSED)
2982 status = MGMT_STATUS_REJECTED;
2984 status = MGMT_STATUS_CONNECT_FAILED;
2986 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2987 status, &rp, sizeof(rp));
2991 if (conn->connect_cfm_cb) {
2992 hci_conn_drop(conn);
2993 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2994 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2998 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3001 hci_conn_drop(conn);
3005 cmd->cmd_complete = pairing_complete;
3007 /* For LE, just connecting isn't a proof that the pairing finished */
3008 if (cp->addr.type == BDADDR_BREDR) {
3009 conn->connect_cfm_cb = pairing_complete_cb;
3010 conn->security_cfm_cb = pairing_complete_cb;
3011 conn->disconn_cfm_cb = pairing_complete_cb;
3013 conn->connect_cfm_cb = le_pairing_complete_cb;
3014 conn->security_cfm_cb = le_pairing_complete_cb;
3015 conn->disconn_cfm_cb = le_pairing_complete_cb;
3018 conn->io_capability = cp->io_cap;
3019 cmd->user_data = hci_conn_get(conn);
3021 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3022 hci_conn_security(conn, sec_level, auth_type, true)) {
3023 cmd->cmd_complete(cmd, 0);
3024 mgmt_pending_remove(cmd);
3030 hci_dev_unlock(hdev);
3034 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3037 struct mgmt_addr_info *addr = data;
3038 struct mgmt_pending_cmd *cmd;
3039 struct hci_conn *conn;
3042 bt_dev_dbg(hdev, "sock %p", sk);
3046 if (!hdev_is_powered(hdev)) {
3047 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3048 MGMT_STATUS_NOT_POWERED);
3052 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3055 MGMT_STATUS_INVALID_PARAMS);
3059 conn = cmd->user_data;
3061 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3063 MGMT_STATUS_INVALID_PARAMS);
3067 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3068 mgmt_pending_remove(cmd);
3070 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3071 addr, sizeof(*addr));
3073 /* Since user doesn't want to proceed with the connection, abort any
3074 * ongoing pairing and then terminate the link if it was created
3075 * because of the pair device action.
3077 if (addr->type == BDADDR_BREDR)
3078 hci_remove_link_key(hdev, &addr->bdaddr);
3080 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3081 le_addr_type(addr->type));
3083 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3084 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3087 hci_dev_unlock(hdev);
3091 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3092 struct mgmt_addr_info *addr, u16 mgmt_op,
3093 u16 hci_op, __le32 passkey)
3095 struct mgmt_pending_cmd *cmd;
3096 struct hci_conn *conn;
3101 if (!hdev_is_powered(hdev)) {
3102 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3103 MGMT_STATUS_NOT_POWERED, addr,
3108 if (addr->type == BDADDR_BREDR)
3109 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3111 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3112 le_addr_type(addr->type));
3115 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3116 MGMT_STATUS_NOT_CONNECTED, addr,
3121 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3122 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3124 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3125 MGMT_STATUS_SUCCESS, addr,
3128 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3129 MGMT_STATUS_FAILED, addr,
3135 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3141 cmd->cmd_complete = addr_cmd_complete;
3143 /* Continue with pairing via HCI */
3144 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3145 struct hci_cp_user_passkey_reply cp;
3147 bacpy(&cp.bdaddr, &addr->bdaddr);
3148 cp.passkey = passkey;
3149 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3151 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3155 mgmt_pending_remove(cmd);
3158 hci_dev_unlock(hdev);
3162 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3163 void *data, u16 len)
3165 struct mgmt_cp_pin_code_neg_reply *cp = data;
3167 bt_dev_dbg(hdev, "sock %p", sk);
3169 return user_pairing_resp(sk, hdev, &cp->addr,
3170 MGMT_OP_PIN_CODE_NEG_REPLY,
3171 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3174 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3177 struct mgmt_cp_user_confirm_reply *cp = data;
3179 bt_dev_dbg(hdev, "sock %p", sk);
3181 if (len != sizeof(*cp))
3182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3183 MGMT_STATUS_INVALID_PARAMS);
3185 return user_pairing_resp(sk, hdev, &cp->addr,
3186 MGMT_OP_USER_CONFIRM_REPLY,
3187 HCI_OP_USER_CONFIRM_REPLY, 0);
3190 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3191 void *data, u16 len)
3193 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3195 bt_dev_dbg(hdev, "sock %p", sk);
3197 return user_pairing_resp(sk, hdev, &cp->addr,
3198 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3199 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3202 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3205 struct mgmt_cp_user_passkey_reply *cp = data;
3207 bt_dev_dbg(hdev, "sock %p", sk);
3209 return user_pairing_resp(sk, hdev, &cp->addr,
3210 MGMT_OP_USER_PASSKEY_REPLY,
3211 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3214 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3215 void *data, u16 len)
3217 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3219 bt_dev_dbg(hdev, "sock %p", sk);
3221 return user_pairing_resp(sk, hdev, &cp->addr,
3222 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3223 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3226 static void adv_expire(struct hci_dev *hdev, u32 flags)
3228 struct adv_info *adv_instance;
3229 struct hci_request req;
3232 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3236 /* stop if current instance doesn't need to be changed */
3237 if (!(adv_instance->flags & flags))
3240 cancel_adv_timeout(hdev);
3242 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3246 hci_req_init(&req, hdev);
3247 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3252 hci_req_run(&req, NULL);
3255 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3257 struct mgmt_cp_set_local_name *cp;
3258 struct mgmt_pending_cmd *cmd;
3260 bt_dev_dbg(hdev, "status 0x%02x", status);
3264 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3271 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3272 mgmt_status(status));
3274 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3277 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3278 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3281 mgmt_pending_remove(cmd);
3284 hci_dev_unlock(hdev);
3287 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3290 struct mgmt_cp_set_local_name *cp = data;
3291 struct mgmt_pending_cmd *cmd;
3292 struct hci_request req;
3295 bt_dev_dbg(hdev, "sock %p", sk);
3299 /* If the old values are the same as the new ones just return a
3300 * direct command complete event.
3302 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3303 !memcmp(hdev->short_name, cp->short_name,
3304 sizeof(hdev->short_name))) {
3305 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3310 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3312 if (!hdev_is_powered(hdev)) {
3313 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3320 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3321 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3322 ext_info_changed(hdev, sk);
3327 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3333 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3335 hci_req_init(&req, hdev);
3337 if (lmp_bredr_capable(hdev)) {
3338 __hci_req_update_name(&req);
3339 __hci_req_update_eir(&req);
3342 /* The name is stored in the scan response data and so
3343 * no need to udpate the advertising data here.
3345 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3346 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3348 err = hci_req_run(&req, set_name_complete);
3350 mgmt_pending_remove(cmd);
3353 hci_dev_unlock(hdev);
3357 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3360 struct mgmt_cp_set_appearance *cp = data;
3364 bt_dev_dbg(hdev, "sock %p", sk);
3366 if (!lmp_le_capable(hdev))
3367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3368 MGMT_STATUS_NOT_SUPPORTED);
3370 appearance = le16_to_cpu(cp->appearance);
3374 if (hdev->appearance != appearance) {
3375 hdev->appearance = appearance;
3377 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3378 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3380 ext_info_changed(hdev, sk);
3383 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3386 hci_dev_unlock(hdev);
3391 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3392 void *data, u16 len)
3394 struct mgmt_rp_get_phy_confguration rp;
3396 bt_dev_dbg(hdev, "sock %p", sk);
3400 memset(&rp, 0, sizeof(rp));
3402 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3403 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3404 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3406 hci_dev_unlock(hdev);
3408 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3412 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3414 struct mgmt_ev_phy_configuration_changed ev;
3416 memset(&ev, 0, sizeof(ev));
3418 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3420 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3424 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3425 u16 opcode, struct sk_buff *skb)
3427 struct mgmt_pending_cmd *cmd;
3429 bt_dev_dbg(hdev, "status 0x%02x", status);
3433 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3438 mgmt_cmd_status(cmd->sk, hdev->id,
3439 MGMT_OP_SET_PHY_CONFIGURATION,
3440 mgmt_status(status));
3442 mgmt_cmd_complete(cmd->sk, hdev->id,
3443 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3446 mgmt_phy_configuration_changed(hdev, cmd->sk);
3449 mgmt_pending_remove(cmd);
3452 hci_dev_unlock(hdev);
3455 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3456 void *data, u16 len)
3458 struct mgmt_cp_set_phy_confguration *cp = data;
3459 struct hci_cp_le_set_default_phy cp_phy;
3460 struct mgmt_pending_cmd *cmd;
3461 struct hci_request req;
3462 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3463 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3464 bool changed = false;
3467 bt_dev_dbg(hdev, "sock %p", sk);
3469 configurable_phys = get_configurable_phys(hdev);
3470 supported_phys = get_supported_phys(hdev);
3471 selected_phys = __le32_to_cpu(cp->selected_phys);
3473 if (selected_phys & ~supported_phys)
3474 return mgmt_cmd_status(sk, hdev->id,
3475 MGMT_OP_SET_PHY_CONFIGURATION,
3476 MGMT_STATUS_INVALID_PARAMS);
3478 unconfigure_phys = supported_phys & ~configurable_phys;
3480 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3481 return mgmt_cmd_status(sk, hdev->id,
3482 MGMT_OP_SET_PHY_CONFIGURATION,
3483 MGMT_STATUS_INVALID_PARAMS);
3485 if (selected_phys == get_selected_phys(hdev))
3486 return mgmt_cmd_complete(sk, hdev->id,
3487 MGMT_OP_SET_PHY_CONFIGURATION,
3492 if (!hdev_is_powered(hdev)) {
3493 err = mgmt_cmd_status(sk, hdev->id,
3494 MGMT_OP_SET_PHY_CONFIGURATION,
3495 MGMT_STATUS_REJECTED);
3499 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3500 err = mgmt_cmd_status(sk, hdev->id,
3501 MGMT_OP_SET_PHY_CONFIGURATION,
3506 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3507 pkt_type |= (HCI_DH3 | HCI_DM3);
3509 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3511 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3512 pkt_type |= (HCI_DH5 | HCI_DM5);
3514 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3516 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3517 pkt_type &= ~HCI_2DH1;
3519 pkt_type |= HCI_2DH1;
3521 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3522 pkt_type &= ~HCI_2DH3;
3524 pkt_type |= HCI_2DH3;
3526 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3527 pkt_type &= ~HCI_2DH5;
3529 pkt_type |= HCI_2DH5;
3531 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3532 pkt_type &= ~HCI_3DH1;
3534 pkt_type |= HCI_3DH1;
3536 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3537 pkt_type &= ~HCI_3DH3;
3539 pkt_type |= HCI_3DH3;
3541 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3542 pkt_type &= ~HCI_3DH5;
3544 pkt_type |= HCI_3DH5;
3546 if (pkt_type != hdev->pkt_type) {
3547 hdev->pkt_type = pkt_type;
3551 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3552 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3554 mgmt_phy_configuration_changed(hdev, sk);
3556 err = mgmt_cmd_complete(sk, hdev->id,
3557 MGMT_OP_SET_PHY_CONFIGURATION,
3563 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3570 hci_req_init(&req, hdev);
3572 memset(&cp_phy, 0, sizeof(cp_phy));
3574 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3575 cp_phy.all_phys |= 0x01;
3577 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3578 cp_phy.all_phys |= 0x02;
3580 if (selected_phys & MGMT_PHY_LE_1M_TX)
3581 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3583 if (selected_phys & MGMT_PHY_LE_2M_TX)
3584 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3586 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3587 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3589 if (selected_phys & MGMT_PHY_LE_1M_RX)
3590 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3592 if (selected_phys & MGMT_PHY_LE_2M_RX)
3593 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3595 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3596 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3598 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3600 err = hci_req_run_skb(&req, set_default_phy_complete);
3602 mgmt_pending_remove(cmd);
3605 hci_dev_unlock(hdev);
3610 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3613 int err = MGMT_STATUS_SUCCESS;
3614 struct mgmt_cp_set_blocked_keys *keys = data;
3615 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3616 sizeof(struct mgmt_blocked_key_info));
3617 u16 key_count, expected_len;
3620 bt_dev_dbg(hdev, "sock %p", sk);
3622 key_count = __le16_to_cpu(keys->key_count);
3623 if (key_count > max_key_count) {
3624 bt_dev_err(hdev, "too big key_count value %u", key_count);
3625 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3626 MGMT_STATUS_INVALID_PARAMS);
3629 expected_len = struct_size(keys, keys, key_count);
3630 if (expected_len != len) {
3631 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3634 MGMT_STATUS_INVALID_PARAMS);
3639 hci_blocked_keys_clear(hdev);
3641 for (i = 0; i < keys->key_count; ++i) {
3642 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3645 err = MGMT_STATUS_NO_RESOURCES;
3649 b->type = keys->keys[i].type;
3650 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3651 list_add_rcu(&b->list, &hdev->blocked_keys);
3653 hci_dev_unlock(hdev);
3655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3659 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3660 void *data, u16 len)
3662 struct mgmt_mode *cp = data;
3664 bool changed = false;
3666 bt_dev_dbg(hdev, "sock %p", sk);
3668 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3669 return mgmt_cmd_status(sk, hdev->id,
3670 MGMT_OP_SET_WIDEBAND_SPEECH,
3671 MGMT_STATUS_NOT_SUPPORTED);
3673 if (cp->val != 0x00 && cp->val != 0x01)
3674 return mgmt_cmd_status(sk, hdev->id,
3675 MGMT_OP_SET_WIDEBAND_SPEECH,
3676 MGMT_STATUS_INVALID_PARAMS);
3680 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3681 err = mgmt_cmd_status(sk, hdev->id,
3682 MGMT_OP_SET_WIDEBAND_SPEECH,
3687 if (hdev_is_powered(hdev) &&
3688 !!cp->val != hci_dev_test_flag(hdev,
3689 HCI_WIDEBAND_SPEECH_ENABLED)) {
3690 err = mgmt_cmd_status(sk, hdev->id,
3691 MGMT_OP_SET_WIDEBAND_SPEECH,
3692 MGMT_STATUS_REJECTED);
3697 changed = !hci_dev_test_and_set_flag(hdev,
3698 HCI_WIDEBAND_SPEECH_ENABLED);
3700 changed = hci_dev_test_and_clear_flag(hdev,
3701 HCI_WIDEBAND_SPEECH_ENABLED);
3703 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3708 err = new_settings(hdev, sk);
3711 hci_dev_unlock(hdev);
3715 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3716 void *data, u16 data_len)
3719 struct mgmt_rp_read_security_info *rp = (void *)buf;
3723 bt_dev_dbg(hdev, "sock %p", sk);
3725 memset(&buf, 0, sizeof(buf));
3729 /* When the Read Simple Pairing Options command is supported, then
3730 * the remote public key validation is supported.
3732 if (hdev->commands[41] & 0x08)
3733 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3735 flags |= 0x02; /* Remote public key validation (LE) */
3737 /* When the Read Encryption Key Size command is supported, then the
3738 * encryption key size is enforced.
3740 if (hdev->commands[20] & 0x10)
3741 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3743 flags |= 0x08; /* Encryption key size enforcement (LE) */
3745 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3747 /* When the Read Simple Pairing Options command is supported, then
3748 * also max encryption key size information is provided.
3750 if (hdev->commands[41] & 0x08)
3751 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3752 hdev->max_enc_key_size);
3754 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3756 rp->sec_len = cpu_to_le16(sec_len);
3758 hci_dev_unlock(hdev);
3760 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3761 rp, sizeof(*rp) + sec_len);
3764 #ifdef CONFIG_BT_FEATURE_DEBUG
3765 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3766 static const u8 debug_uuid[16] = {
3767 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3768 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3772 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3773 static const u8 simult_central_periph_uuid[16] = {
3774 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3775 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3778 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3779 static const u8 rpa_resolution_uuid[16] = {
3780 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3781 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3784 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3785 void *data, u16 data_len)
3787 char buf[62]; /* Enough space for 3 features */
3788 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3792 bt_dev_dbg(hdev, "sock %p", sk);
3794 memset(&buf, 0, sizeof(buf));
3796 #ifdef CONFIG_BT_FEATURE_DEBUG
3798 flags = bt_dbg_get() ? BIT(0) : 0;
3800 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3801 rp->features[idx].flags = cpu_to_le32(flags);
3807 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3808 (hdev->le_states[4] & 0x08) && /* Central */
3809 (hdev->le_states[4] & 0x40) && /* Peripheral */
3810 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3815 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3816 rp->features[idx].flags = cpu_to_le32(flags);
3820 if (hdev && use_ll_privacy(hdev)) {
3821 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3822 flags = BIT(0) | BIT(1);
3826 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3827 rp->features[idx].flags = cpu_to_le32(flags);
3831 rp->feature_count = cpu_to_le16(idx);
3833 /* After reading the experimental features information, enable
3834 * the events to update client on any future change.
3836 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3838 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3839 MGMT_OP_READ_EXP_FEATURES_INFO,
3840 0, rp, sizeof(*rp) + (20 * idx));
3843 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3846 struct mgmt_ev_exp_feature_changed ev;
3848 memset(&ev, 0, sizeof(ev));
3849 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3850 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3852 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3854 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3858 #ifdef CONFIG_BT_FEATURE_DEBUG
3859 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3861 struct mgmt_ev_exp_feature_changed ev;
3863 memset(&ev, 0, sizeof(ev));
3864 memcpy(ev.uuid, debug_uuid, 16);
3865 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3867 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3869 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3873 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3874 void *data, u16 data_len)
3876 struct mgmt_cp_set_exp_feature *cp = data;
3877 struct mgmt_rp_set_exp_feature rp;
3879 bt_dev_dbg(hdev, "sock %p", sk);
3881 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3882 memset(rp.uuid, 0, 16);
3883 rp.flags = cpu_to_le32(0);
3885 #ifdef CONFIG_BT_FEATURE_DEBUG
3887 bool changed = bt_dbg_get();
3892 exp_debug_feature_changed(false, sk);
3896 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3897 bool changed = hci_dev_test_flag(hdev,
3898 HCI_ENABLE_LL_PRIVACY);
3900 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3903 exp_ll_privacy_feature_changed(false, hdev, sk);
3906 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3908 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3909 MGMT_OP_SET_EXP_FEATURE, 0,
3913 #ifdef CONFIG_BT_FEATURE_DEBUG
3914 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3918 /* Command requires to use the non-controller index */
3920 return mgmt_cmd_status(sk, hdev->id,
3921 MGMT_OP_SET_EXP_FEATURE,
3922 MGMT_STATUS_INVALID_INDEX);
3924 /* Parameters are limited to a single octet */
3925 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3926 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3927 MGMT_OP_SET_EXP_FEATURE,
3928 MGMT_STATUS_INVALID_PARAMS);
3930 /* Only boolean on/off is supported */
3931 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3932 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3933 MGMT_OP_SET_EXP_FEATURE,
3934 MGMT_STATUS_INVALID_PARAMS);
3936 val = !!cp->param[0];
3937 changed = val ? !bt_dbg_get() : bt_dbg_get();
3940 memcpy(rp.uuid, debug_uuid, 16);
3941 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3943 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3945 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3946 MGMT_OP_SET_EXP_FEATURE, 0,
3950 exp_debug_feature_changed(val, sk);
3956 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3961 /* Command requires to use the controller index */
3963 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3964 MGMT_OP_SET_EXP_FEATURE,
3965 MGMT_STATUS_INVALID_INDEX);
3967 /* Changes can only be made when controller is powered down */
3968 if (hdev_is_powered(hdev))
3969 return mgmt_cmd_status(sk, hdev->id,
3970 MGMT_OP_SET_EXP_FEATURE,
3971 MGMT_STATUS_NOT_POWERED);
3973 /* Parameters are limited to a single octet */
3974 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3975 return mgmt_cmd_status(sk, hdev->id,
3976 MGMT_OP_SET_EXP_FEATURE,
3977 MGMT_STATUS_INVALID_PARAMS);
3979 /* Only boolean on/off is supported */
3980 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3981 return mgmt_cmd_status(sk, hdev->id,
3982 MGMT_OP_SET_EXP_FEATURE,
3983 MGMT_STATUS_INVALID_PARAMS);
3985 val = !!cp->param[0];
3988 changed = !hci_dev_test_flag(hdev,
3989 HCI_ENABLE_LL_PRIVACY);
3990 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3991 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3993 /* Enable LL privacy + supported settings changed */
3994 flags = BIT(0) | BIT(1);
3996 changed = hci_dev_test_flag(hdev,
3997 HCI_ENABLE_LL_PRIVACY);
3998 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4000 /* Disable LL privacy + supported settings changed */
4004 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4005 rp.flags = cpu_to_le32(flags);
4007 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4009 err = mgmt_cmd_complete(sk, hdev->id,
4010 MGMT_OP_SET_EXP_FEATURE, 0,
4014 exp_ll_privacy_feature_changed(val, hdev, sk);
4019 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4020 MGMT_OP_SET_EXP_FEATURE,
4021 MGMT_STATUS_NOT_SUPPORTED);
4024 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4026 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4029 struct mgmt_cp_get_device_flags *cp = data;
4030 struct mgmt_rp_get_device_flags rp;
4031 struct bdaddr_list_with_flags *br_params;
4032 struct hci_conn_params *params;
4033 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4034 u32 current_flags = 0;
4035 u8 status = MGMT_STATUS_INVALID_PARAMS;
4037 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4038 &cp->addr.bdaddr, cp->addr.type);
4042 memset(&rp, 0, sizeof(rp));
4044 if (cp->addr.type == BDADDR_BREDR) {
4045 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4051 current_flags = br_params->current_flags;
4053 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4054 le_addr_type(cp->addr.type));
4059 current_flags = params->current_flags;
4062 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4063 rp.addr.type = cp->addr.type;
4064 rp.supported_flags = cpu_to_le32(supported_flags);
4065 rp.current_flags = cpu_to_le32(current_flags);
4067 status = MGMT_STATUS_SUCCESS;
4070 hci_dev_unlock(hdev);
4072 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4076 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4077 bdaddr_t *bdaddr, u8 bdaddr_type,
4078 u32 supported_flags, u32 current_flags)
4080 struct mgmt_ev_device_flags_changed ev;
4082 bacpy(&ev.addr.bdaddr, bdaddr);
4083 ev.addr.type = bdaddr_type;
4084 ev.supported_flags = cpu_to_le32(supported_flags);
4085 ev.current_flags = cpu_to_le32(current_flags);
4087 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4090 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4093 struct mgmt_cp_set_device_flags *cp = data;
4094 struct bdaddr_list_with_flags *br_params;
4095 struct hci_conn_params *params;
4096 u8 status = MGMT_STATUS_INVALID_PARAMS;
4097 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4098 u32 current_flags = __le32_to_cpu(cp->current_flags);
4100 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4101 &cp->addr.bdaddr, cp->addr.type,
4102 __le32_to_cpu(current_flags));
4104 if ((supported_flags | current_flags) != supported_flags) {
4105 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4106 current_flags, supported_flags);
4112 if (cp->addr.type == BDADDR_BREDR) {
4113 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4118 br_params->current_flags = current_flags;
4119 status = MGMT_STATUS_SUCCESS;
4121 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4122 &cp->addr.bdaddr, cp->addr.type);
4125 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4126 le_addr_type(cp->addr.type));
4128 params->current_flags = current_flags;
4129 status = MGMT_STATUS_SUCCESS;
4131 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4133 le_addr_type(cp->addr.type));
4138 hci_dev_unlock(hdev);
4140 if (status == MGMT_STATUS_SUCCESS)
4141 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4142 supported_flags, current_flags);
4144 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4145 &cp->addr, sizeof(cp->addr));
4148 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4151 struct mgmt_ev_adv_monitor_added ev;
4153 ev.monitor_handle = cpu_to_le16(handle);
4155 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4158 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4161 struct mgmt_ev_adv_monitor_added ev;
4163 ev.monitor_handle = cpu_to_le16(handle);
4165 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4168 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4169 void *data, u16 len)
4171 struct adv_monitor *monitor = NULL;
4172 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4175 __u32 supported = 0;
4176 __u16 num_handles = 0;
4177 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4179 BT_DBG("request for %s", hdev->name);
4183 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4184 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4186 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4187 handles[num_handles++] = monitor->handle;
4190 hci_dev_unlock(hdev);
4192 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4193 rp = kmalloc(rp_size, GFP_KERNEL);
4197 /* Once controller-based monitoring is in place, the enabled_features
4198 * should reflect the use.
4200 rp->supported_features = cpu_to_le32(supported);
4201 rp->enabled_features = 0;
4202 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4203 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4204 rp->num_handles = cpu_to_le16(num_handles);
4206 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4208 err = mgmt_cmd_complete(sk, hdev->id,
4209 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4210 MGMT_STATUS_SUCCESS, rp, rp_size);
4217 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4218 void *data, u16 len)
4220 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4221 struct mgmt_rp_add_adv_patterns_monitor rp;
4222 struct adv_monitor *m = NULL;
4223 struct adv_pattern *p = NULL;
4224 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4225 __u8 cp_ofst = 0, cp_len = 0;
4228 BT_DBG("request for %s", hdev->name);
4230 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4231 err = mgmt_cmd_status(sk, hdev->id,
4232 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4233 MGMT_STATUS_INVALID_PARAMS);
4237 m = kmalloc(sizeof(*m), GFP_KERNEL);
4243 INIT_LIST_HEAD(&m->patterns);
4246 for (i = 0; i < cp->pattern_count; i++) {
4247 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4248 err = mgmt_cmd_status(sk, hdev->id,
4249 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4250 MGMT_STATUS_INVALID_PARAMS);
4254 cp_ofst = cp->patterns[i].offset;
4255 cp_len = cp->patterns[i].length;
4256 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4257 cp_len > HCI_MAX_AD_LENGTH ||
4258 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4259 err = mgmt_cmd_status(sk, hdev->id,
4260 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4261 MGMT_STATUS_INVALID_PARAMS);
4265 p = kmalloc(sizeof(*p), GFP_KERNEL);
4271 p->ad_type = cp->patterns[i].ad_type;
4272 p->offset = cp->patterns[i].offset;
4273 p->length = cp->patterns[i].length;
4274 memcpy(p->value, cp->patterns[i].value, p->length);
4276 INIT_LIST_HEAD(&p->list);
4277 list_add(&p->list, &m->patterns);
4280 if (mp_cnt != cp->pattern_count) {
4281 err = mgmt_cmd_status(sk, hdev->id,
4282 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4283 MGMT_STATUS_INVALID_PARAMS);
4289 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4291 err = hci_add_adv_monitor(hdev, m);
4293 if (err == -ENOSPC) {
4294 mgmt_cmd_status(sk, hdev->id,
4295 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4296 MGMT_STATUS_NO_RESOURCES);
4301 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4302 mgmt_adv_monitor_added(sk, hdev, m->handle);
4304 hci_dev_unlock(hdev);
4306 rp.monitor_handle = cpu_to_le16(m->handle);
4308 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4309 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4312 hci_dev_unlock(hdev);
4315 hci_free_adv_monitor(m);
4319 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4320 void *data, u16 len)
4322 struct mgmt_cp_remove_adv_monitor *cp = data;
4323 struct mgmt_rp_remove_adv_monitor rp;
4324 unsigned int prev_adv_monitors_cnt;
4328 BT_DBG("request for %s", hdev->name);
4332 handle = __le16_to_cpu(cp->monitor_handle);
4333 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4335 err = hci_remove_adv_monitor(hdev, handle);
4336 if (err == -ENOENT) {
4337 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4338 MGMT_STATUS_INVALID_INDEX);
4342 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4343 mgmt_adv_monitor_removed(sk, hdev, handle);
4345 hci_dev_unlock(hdev);
4347 rp.monitor_handle = cp->monitor_handle;
4349 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4350 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4353 hci_dev_unlock(hdev);
4357 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4358 u16 opcode, struct sk_buff *skb)
4360 struct mgmt_rp_read_local_oob_data mgmt_rp;
4361 size_t rp_size = sizeof(mgmt_rp);
4362 struct mgmt_pending_cmd *cmd;
4364 bt_dev_dbg(hdev, "status %u", status);
4366 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4370 if (status || !skb) {
4371 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4372 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4376 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4378 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4379 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4381 if (skb->len < sizeof(*rp)) {
4382 mgmt_cmd_status(cmd->sk, hdev->id,
4383 MGMT_OP_READ_LOCAL_OOB_DATA,
4384 MGMT_STATUS_FAILED);
4388 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4389 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4391 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4393 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4395 if (skb->len < sizeof(*rp)) {
4396 mgmt_cmd_status(cmd->sk, hdev->id,
4397 MGMT_OP_READ_LOCAL_OOB_DATA,
4398 MGMT_STATUS_FAILED);
4402 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4403 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4405 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4406 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4409 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4410 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4413 mgmt_pending_remove(cmd);
4416 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4417 void *data, u16 data_len)
4419 struct mgmt_pending_cmd *cmd;
4420 struct hci_request req;
4423 bt_dev_dbg(hdev, "sock %p", sk);
4427 if (!hdev_is_powered(hdev)) {
4428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4429 MGMT_STATUS_NOT_POWERED);
4433 if (!lmp_ssp_capable(hdev)) {
4434 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4435 MGMT_STATUS_NOT_SUPPORTED);
4439 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4440 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4445 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4451 hci_req_init(&req, hdev);
4453 if (bredr_sc_enabled(hdev))
4454 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4456 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4458 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4460 mgmt_pending_remove(cmd);
4463 hci_dev_unlock(hdev);
4467 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4468 void *data, u16 len)
4470 struct mgmt_addr_info *addr = data;
4473 bt_dev_dbg(hdev, "sock %p", sk);
4475 if (!bdaddr_type_is_valid(addr->type))
4476 return mgmt_cmd_complete(sk, hdev->id,
4477 MGMT_OP_ADD_REMOTE_OOB_DATA,
4478 MGMT_STATUS_INVALID_PARAMS,
4479 addr, sizeof(*addr));
4483 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4484 struct mgmt_cp_add_remote_oob_data *cp = data;
4487 if (cp->addr.type != BDADDR_BREDR) {
4488 err = mgmt_cmd_complete(sk, hdev->id,
4489 MGMT_OP_ADD_REMOTE_OOB_DATA,
4490 MGMT_STATUS_INVALID_PARAMS,
4491 &cp->addr, sizeof(cp->addr));
4495 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4496 cp->addr.type, cp->hash,
4497 cp->rand, NULL, NULL);
4499 status = MGMT_STATUS_FAILED;
4501 status = MGMT_STATUS_SUCCESS;
4503 err = mgmt_cmd_complete(sk, hdev->id,
4504 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4505 &cp->addr, sizeof(cp->addr));
4506 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4507 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4508 u8 *rand192, *hash192, *rand256, *hash256;
4511 if (bdaddr_type_is_le(cp->addr.type)) {
4512 /* Enforce zero-valued 192-bit parameters as
4513 * long as legacy SMP OOB isn't implemented.
4515 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4516 memcmp(cp->hash192, ZERO_KEY, 16)) {
4517 err = mgmt_cmd_complete(sk, hdev->id,
4518 MGMT_OP_ADD_REMOTE_OOB_DATA,
4519 MGMT_STATUS_INVALID_PARAMS,
4520 addr, sizeof(*addr));
4527 /* In case one of the P-192 values is set to zero,
4528 * then just disable OOB data for P-192.
4530 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4531 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4535 rand192 = cp->rand192;
4536 hash192 = cp->hash192;
4540 /* In case one of the P-256 values is set to zero, then just
4541 * disable OOB data for P-256.
4543 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4544 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4548 rand256 = cp->rand256;
4549 hash256 = cp->hash256;
4552 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4553 cp->addr.type, hash192, rand192,
4556 status = MGMT_STATUS_FAILED;
4558 status = MGMT_STATUS_SUCCESS;
4560 err = mgmt_cmd_complete(sk, hdev->id,
4561 MGMT_OP_ADD_REMOTE_OOB_DATA,
4562 status, &cp->addr, sizeof(cp->addr));
4564 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4566 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4567 MGMT_STATUS_INVALID_PARAMS);
4571 hci_dev_unlock(hdev);
4575 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4576 void *data, u16 len)
4578 struct mgmt_cp_remove_remote_oob_data *cp = data;
4582 bt_dev_dbg(hdev, "sock %p", sk);
4584 if (cp->addr.type != BDADDR_BREDR)
4585 return mgmt_cmd_complete(sk, hdev->id,
4586 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4587 MGMT_STATUS_INVALID_PARAMS,
4588 &cp->addr, sizeof(cp->addr));
4592 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4593 hci_remote_oob_data_clear(hdev);
4594 status = MGMT_STATUS_SUCCESS;
4598 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4600 status = MGMT_STATUS_INVALID_PARAMS;
4602 status = MGMT_STATUS_SUCCESS;
4605 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4606 status, &cp->addr, sizeof(cp->addr));
4608 hci_dev_unlock(hdev);
4612 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4614 struct mgmt_pending_cmd *cmd;
4616 bt_dev_dbg(hdev, "status %d", status);
4620 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4622 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4625 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4628 cmd->cmd_complete(cmd, mgmt_status(status));
4629 mgmt_pending_remove(cmd);
4632 hci_dev_unlock(hdev);
4634 /* Handle suspend notifier */
4635 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4636 hdev->suspend_tasks)) {
4637 bt_dev_dbg(hdev, "Unpaused discovery");
4638 wake_up(&hdev->suspend_wait_q);
4642 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4643 uint8_t *mgmt_status)
4646 case DISCOV_TYPE_LE:
4647 *mgmt_status = mgmt_le_support(hdev);
4651 case DISCOV_TYPE_INTERLEAVED:
4652 *mgmt_status = mgmt_le_support(hdev);
4656 case DISCOV_TYPE_BREDR:
4657 *mgmt_status = mgmt_bredr_support(hdev);
4662 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4669 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4670 u16 op, void *data, u16 len)
4672 struct mgmt_cp_start_discovery *cp = data;
4673 struct mgmt_pending_cmd *cmd;
4677 bt_dev_dbg(hdev, "sock %p", sk);
4681 if (!hdev_is_powered(hdev)) {
4682 err = mgmt_cmd_complete(sk, hdev->id, op,
4683 MGMT_STATUS_NOT_POWERED,
4684 &cp->type, sizeof(cp->type));
4688 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4689 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4690 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4691 &cp->type, sizeof(cp->type));
4695 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4696 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4697 &cp->type, sizeof(cp->type));
4701 /* Can't start discovery when it is paused */
4702 if (hdev->discovery_paused) {
4703 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4704 &cp->type, sizeof(cp->type));
4708 /* Clear the discovery filter first to free any previously
4709 * allocated memory for the UUID list.
4711 hci_discovery_filter_clear(hdev);
4713 hdev->discovery.type = cp->type;
4714 hdev->discovery.report_invalid_rssi = false;
4715 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4716 hdev->discovery.limited = true;
4718 hdev->discovery.limited = false;
4720 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4726 cmd->cmd_complete = generic_cmd_complete;
4728 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4729 queue_work(hdev->req_workqueue, &hdev->discov_update);
4733 hci_dev_unlock(hdev);
4737 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4738 void *data, u16 len)
4740 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4744 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4745 void *data, u16 len)
4747 return start_discovery_internal(sk, hdev,
4748 MGMT_OP_START_LIMITED_DISCOVERY,
4752 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4755 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4759 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4760 void *data, u16 len)
4762 struct mgmt_cp_start_service_discovery *cp = data;
4763 struct mgmt_pending_cmd *cmd;
4764 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4765 u16 uuid_count, expected_len;
4769 bt_dev_dbg(hdev, "sock %p", sk);
4773 if (!hdev_is_powered(hdev)) {
4774 err = mgmt_cmd_complete(sk, hdev->id,
4775 MGMT_OP_START_SERVICE_DISCOVERY,
4776 MGMT_STATUS_NOT_POWERED,
4777 &cp->type, sizeof(cp->type));
4781 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4782 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4783 err = mgmt_cmd_complete(sk, hdev->id,
4784 MGMT_OP_START_SERVICE_DISCOVERY,
4785 MGMT_STATUS_BUSY, &cp->type,
4790 uuid_count = __le16_to_cpu(cp->uuid_count);
4791 if (uuid_count > max_uuid_count) {
4792 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4794 err = mgmt_cmd_complete(sk, hdev->id,
4795 MGMT_OP_START_SERVICE_DISCOVERY,
4796 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4801 expected_len = sizeof(*cp) + uuid_count * 16;
4802 if (expected_len != len) {
4803 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4805 err = mgmt_cmd_complete(sk, hdev->id,
4806 MGMT_OP_START_SERVICE_DISCOVERY,
4807 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4812 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4813 err = mgmt_cmd_complete(sk, hdev->id,
4814 MGMT_OP_START_SERVICE_DISCOVERY,
4815 status, &cp->type, sizeof(cp->type));
4819 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4826 cmd->cmd_complete = service_discovery_cmd_complete;
4828 /* Clear the discovery filter first to free any previously
4829 * allocated memory for the UUID list.
4831 hci_discovery_filter_clear(hdev);
4833 hdev->discovery.result_filtering = true;
4834 hdev->discovery.type = cp->type;
4835 hdev->discovery.rssi = cp->rssi;
4836 hdev->discovery.uuid_count = uuid_count;
4838 if (uuid_count > 0) {
4839 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4841 if (!hdev->discovery.uuids) {
4842 err = mgmt_cmd_complete(sk, hdev->id,
4843 MGMT_OP_START_SERVICE_DISCOVERY,
4845 &cp->type, sizeof(cp->type));
4846 mgmt_pending_remove(cmd);
4851 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4852 queue_work(hdev->req_workqueue, &hdev->discov_update);
4856 hci_dev_unlock(hdev);
4860 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4862 struct mgmt_pending_cmd *cmd;
4864 bt_dev_dbg(hdev, "status %d", status);
4868 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4870 cmd->cmd_complete(cmd, mgmt_status(status));
4871 mgmt_pending_remove(cmd);
4874 hci_dev_unlock(hdev);
4876 /* Handle suspend notifier */
4877 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4878 bt_dev_dbg(hdev, "Paused discovery");
4879 wake_up(&hdev->suspend_wait_q);
4883 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4886 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4887 struct mgmt_pending_cmd *cmd;
4890 bt_dev_dbg(hdev, "sock %p", sk);
4894 if (!hci_discovery_active(hdev)) {
4895 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4896 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4897 sizeof(mgmt_cp->type));
4901 if (hdev->discovery.type != mgmt_cp->type) {
4902 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4903 MGMT_STATUS_INVALID_PARAMS,
4904 &mgmt_cp->type, sizeof(mgmt_cp->type));
4908 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4914 cmd->cmd_complete = generic_cmd_complete;
4916 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4917 queue_work(hdev->req_workqueue, &hdev->discov_update);
4921 hci_dev_unlock(hdev);
4925 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4928 struct mgmt_cp_confirm_name *cp = data;
4929 struct inquiry_entry *e;
4932 bt_dev_dbg(hdev, "sock %p", sk);
4936 if (!hci_discovery_active(hdev)) {
4937 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4938 MGMT_STATUS_FAILED, &cp->addr,
4943 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4945 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4946 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4951 if (cp->name_known) {
4952 e->name_state = NAME_KNOWN;
4955 e->name_state = NAME_NEEDED;
4956 hci_inquiry_cache_update_resolve(hdev, e);
4959 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4960 &cp->addr, sizeof(cp->addr));
4963 hci_dev_unlock(hdev);
4967 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4970 struct mgmt_cp_block_device *cp = data;
4974 bt_dev_dbg(hdev, "sock %p", sk);
4976 if (!bdaddr_type_is_valid(cp->addr.type))
4977 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4978 MGMT_STATUS_INVALID_PARAMS,
4979 &cp->addr, sizeof(cp->addr));
4983 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
4986 status = MGMT_STATUS_FAILED;
4990 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4992 status = MGMT_STATUS_SUCCESS;
4995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4996 &cp->addr, sizeof(cp->addr));
4998 hci_dev_unlock(hdev);
5003 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5006 struct mgmt_cp_unblock_device *cp = data;
5010 bt_dev_dbg(hdev, "sock %p", sk);
5012 if (!bdaddr_type_is_valid(cp->addr.type))
5013 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5014 MGMT_STATUS_INVALID_PARAMS,
5015 &cp->addr, sizeof(cp->addr));
5019 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5022 status = MGMT_STATUS_INVALID_PARAMS;
5026 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5028 status = MGMT_STATUS_SUCCESS;
5031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5032 &cp->addr, sizeof(cp->addr));
5034 hci_dev_unlock(hdev);
5039 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5042 struct mgmt_cp_set_device_id *cp = data;
5043 struct hci_request req;
5047 bt_dev_dbg(hdev, "sock %p", sk);
5049 source = __le16_to_cpu(cp->source);
5051 if (source > 0x0002)
5052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5053 MGMT_STATUS_INVALID_PARAMS);
5057 hdev->devid_source = source;
5058 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5059 hdev->devid_product = __le16_to_cpu(cp->product);
5060 hdev->devid_version = __le16_to_cpu(cp->version);
5062 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5065 hci_req_init(&req, hdev);
5066 __hci_req_update_eir(&req);
5067 hci_req_run(&req, NULL);
5069 hci_dev_unlock(hdev);
5074 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5077 bt_dev_dbg(hdev, "status %d", status);
5080 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5083 struct cmd_lookup match = { NULL, hdev };
5084 struct hci_request req;
5086 struct adv_info *adv_instance;
5092 u8 mgmt_err = mgmt_status(status);
5094 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5095 cmd_status_rsp, &mgmt_err);
5099 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5100 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5102 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5104 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5107 new_settings(hdev, match.sk);
5112 /* Handle suspend notifier */
5113 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5114 hdev->suspend_tasks)) {
5115 bt_dev_dbg(hdev, "Paused advertising");
5116 wake_up(&hdev->suspend_wait_q);
5117 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5118 hdev->suspend_tasks)) {
5119 bt_dev_dbg(hdev, "Unpaused advertising");
5120 wake_up(&hdev->suspend_wait_q);
5123 /* If "Set Advertising" was just disabled and instance advertising was
5124 * set up earlier, then re-enable multi-instance advertising.
5126 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5127 list_empty(&hdev->adv_instances))
5130 instance = hdev->cur_adv_instance;
5132 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5133 struct adv_info, list);
5137 instance = adv_instance->instance;
5140 hci_req_init(&req, hdev);
5142 err = __hci_req_schedule_adv_instance(&req, instance, true);
5145 err = hci_req_run(&req, enable_advertising_instance);
5148 bt_dev_err(hdev, "failed to re-configure advertising");
5151 hci_dev_unlock(hdev);
5154 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5157 struct mgmt_mode *cp = data;
5158 struct mgmt_pending_cmd *cmd;
5159 struct hci_request req;
5163 bt_dev_dbg(hdev, "sock %p", sk);
5165 status = mgmt_le_support(hdev);
5167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5170 /* Enabling the experimental LL Privay support disables support for
5173 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5175 MGMT_STATUS_NOT_SUPPORTED);
5177 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5179 MGMT_STATUS_INVALID_PARAMS);
5181 if (hdev->advertising_paused)
5182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5189 /* The following conditions are ones which mean that we should
5190 * not do any HCI communication but directly send a mgmt
5191 * response to user space (after toggling the flag if
5194 if (!hdev_is_powered(hdev) ||
5195 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5196 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5197 hci_conn_num(hdev, LE_LINK) > 0 ||
5198 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5199 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5203 hdev->cur_adv_instance = 0x00;
5204 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5205 if (cp->val == 0x02)
5206 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5208 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5210 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5211 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5214 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5219 err = new_settings(hdev, sk);
5224 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5225 pending_find(MGMT_OP_SET_LE, hdev)) {
5226 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5231 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5237 hci_req_init(&req, hdev);
5239 if (cp->val == 0x02)
5240 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5242 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5244 cancel_adv_timeout(hdev);
5247 /* Switch to instance "0" for the Set Advertising setting.
5248 * We cannot use update_[adv|scan_rsp]_data() here as the
5249 * HCI_ADVERTISING flag is not yet set.
5251 hdev->cur_adv_instance = 0x00;
5253 if (ext_adv_capable(hdev)) {
5254 __hci_req_start_ext_adv(&req, 0x00);
5256 __hci_req_update_adv_data(&req, 0x00);
5257 __hci_req_update_scan_rsp_data(&req, 0x00);
5258 __hci_req_enable_advertising(&req);
5261 __hci_req_disable_advertising(&req);
5264 err = hci_req_run(&req, set_advertising_complete);
5266 mgmt_pending_remove(cmd);
5269 hci_dev_unlock(hdev);
5273 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5274 void *data, u16 len)
5276 struct mgmt_cp_set_static_address *cp = data;
5279 bt_dev_dbg(hdev, "sock %p", sk);
5281 if (!lmp_le_capable(hdev))
5282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5283 MGMT_STATUS_NOT_SUPPORTED);
5285 if (hdev_is_powered(hdev))
5286 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5287 MGMT_STATUS_REJECTED);
5289 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5290 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5291 return mgmt_cmd_status(sk, hdev->id,
5292 MGMT_OP_SET_STATIC_ADDRESS,
5293 MGMT_STATUS_INVALID_PARAMS);
5295 /* Two most significant bits shall be set */
5296 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5297 return mgmt_cmd_status(sk, hdev->id,
5298 MGMT_OP_SET_STATIC_ADDRESS,
5299 MGMT_STATUS_INVALID_PARAMS);
5304 bacpy(&hdev->static_addr, &cp->bdaddr);
5306 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5310 err = new_settings(hdev, sk);
5313 hci_dev_unlock(hdev);
5317 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5318 void *data, u16 len)
5320 struct mgmt_cp_set_scan_params *cp = data;
5321 __u16 interval, window;
5324 bt_dev_dbg(hdev, "sock %p", sk);
5326 if (!lmp_le_capable(hdev))
5327 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5328 MGMT_STATUS_NOT_SUPPORTED);
5330 interval = __le16_to_cpu(cp->interval);
5332 if (interval < 0x0004 || interval > 0x4000)
5333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5334 MGMT_STATUS_INVALID_PARAMS);
5336 window = __le16_to_cpu(cp->window);
5338 if (window < 0x0004 || window > 0x4000)
5339 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5340 MGMT_STATUS_INVALID_PARAMS);
5342 if (window > interval)
5343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5344 MGMT_STATUS_INVALID_PARAMS);
5348 hdev->le_scan_interval = interval;
5349 hdev->le_scan_window = window;
5351 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5354 /* If background scan is running, restart it so new parameters are
5357 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5358 hdev->discovery.state == DISCOVERY_STOPPED) {
5359 struct hci_request req;
5361 hci_req_init(&req, hdev);
5363 hci_req_add_le_scan_disable(&req, false);
5364 hci_req_add_le_passive_scan(&req);
5366 hci_req_run(&req, NULL);
5369 hci_dev_unlock(hdev);
5374 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5377 struct mgmt_pending_cmd *cmd;
5379 bt_dev_dbg(hdev, "status 0x%02x", status);
5383 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5388 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5389 mgmt_status(status));
5391 struct mgmt_mode *cp = cmd->param;
5394 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5396 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5398 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5399 new_settings(hdev, cmd->sk);
5402 mgmt_pending_remove(cmd);
5405 hci_dev_unlock(hdev);
5408 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5409 void *data, u16 len)
5411 struct mgmt_mode *cp = data;
5412 struct mgmt_pending_cmd *cmd;
5413 struct hci_request req;
5416 bt_dev_dbg(hdev, "sock %p", sk);
5418 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5419 hdev->hci_ver < BLUETOOTH_VER_1_2)
5420 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5421 MGMT_STATUS_NOT_SUPPORTED);
5423 if (cp->val != 0x00 && cp->val != 0x01)
5424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5425 MGMT_STATUS_INVALID_PARAMS);
5429 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5435 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5436 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5441 if (!hdev_is_powered(hdev)) {
5442 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5443 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5445 new_settings(hdev, sk);
5449 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5456 hci_req_init(&req, hdev);
5458 __hci_req_write_fast_connectable(&req, cp->val);
5460 err = hci_req_run(&req, fast_connectable_complete);
5462 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5463 MGMT_STATUS_FAILED);
5464 mgmt_pending_remove(cmd);
5468 hci_dev_unlock(hdev);
5473 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5475 struct mgmt_pending_cmd *cmd;
5477 bt_dev_dbg(hdev, "status 0x%02x", status);
5481 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5486 u8 mgmt_err = mgmt_status(status);
5488 /* We need to restore the flag if related HCI commands
5491 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5493 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5495 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5496 new_settings(hdev, cmd->sk);
5499 mgmt_pending_remove(cmd);
5502 hci_dev_unlock(hdev);
5505 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5507 struct mgmt_mode *cp = data;
5508 struct mgmt_pending_cmd *cmd;
5509 struct hci_request req;
5512 bt_dev_dbg(hdev, "sock %p", sk);
5514 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5515 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5516 MGMT_STATUS_NOT_SUPPORTED);
5518 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5519 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5520 MGMT_STATUS_REJECTED);
5522 if (cp->val != 0x00 && cp->val != 0x01)
5523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5524 MGMT_STATUS_INVALID_PARAMS);
5528 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5529 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5533 if (!hdev_is_powered(hdev)) {
5535 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5536 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5537 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5538 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5539 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5542 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5544 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5548 err = new_settings(hdev, sk);
5552 /* Reject disabling when powered on */
5554 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5555 MGMT_STATUS_REJECTED);
5558 /* When configuring a dual-mode controller to operate
5559 * with LE only and using a static address, then switching
5560 * BR/EDR back on is not allowed.
5562 * Dual-mode controllers shall operate with the public
5563 * address as its identity address for BR/EDR and LE. So
5564 * reject the attempt to create an invalid configuration.
5566 * The same restrictions applies when secure connections
5567 * has been enabled. For BR/EDR this is a controller feature
5568 * while for LE it is a host stack feature. This means that
5569 * switching BR/EDR back on when secure connections has been
5570 * enabled is not a supported transaction.
5572 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5573 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5574 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5576 MGMT_STATUS_REJECTED);
5581 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5582 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5587 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5593 /* We need to flip the bit already here so that
5594 * hci_req_update_adv_data generates the correct flags.
5596 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5598 hci_req_init(&req, hdev);
5600 __hci_req_write_fast_connectable(&req, false);
5601 __hci_req_update_scan(&req);
5603 /* Since only the advertising data flags will change, there
5604 * is no need to update the scan response data.
5606 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5608 err = hci_req_run(&req, set_bredr_complete);
5610 mgmt_pending_remove(cmd);
5613 hci_dev_unlock(hdev);
5617 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5619 struct mgmt_pending_cmd *cmd;
5620 struct mgmt_mode *cp;
5622 bt_dev_dbg(hdev, "status %u", status);
5626 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5631 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5632 mgmt_status(status));
5640 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5641 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5644 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5645 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5648 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5649 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5653 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5654 new_settings(hdev, cmd->sk);
5657 mgmt_pending_remove(cmd);
5659 hci_dev_unlock(hdev);
5662 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5663 void *data, u16 len)
5665 struct mgmt_mode *cp = data;
5666 struct mgmt_pending_cmd *cmd;
5667 struct hci_request req;
5671 bt_dev_dbg(hdev, "sock %p", sk);
5673 if (!lmp_sc_capable(hdev) &&
5674 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5676 MGMT_STATUS_NOT_SUPPORTED);
5678 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5679 lmp_sc_capable(hdev) &&
5680 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5682 MGMT_STATUS_REJECTED);
5684 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5686 MGMT_STATUS_INVALID_PARAMS);
5690 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5691 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5695 changed = !hci_dev_test_and_set_flag(hdev,
5697 if (cp->val == 0x02)
5698 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5700 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5702 changed = hci_dev_test_and_clear_flag(hdev,
5704 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5707 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5712 err = new_settings(hdev, sk);
5717 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5718 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5725 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5726 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5727 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5731 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5737 hci_req_init(&req, hdev);
5738 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5739 err = hci_req_run(&req, sc_enable_complete);
5741 mgmt_pending_remove(cmd);
5746 hci_dev_unlock(hdev);
5750 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5751 void *data, u16 len)
5753 struct mgmt_mode *cp = data;
5754 bool changed, use_changed;
5757 bt_dev_dbg(hdev, "sock %p", sk);
5759 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5761 MGMT_STATUS_INVALID_PARAMS);
5766 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5768 changed = hci_dev_test_and_clear_flag(hdev,
5769 HCI_KEEP_DEBUG_KEYS);
5771 if (cp->val == 0x02)
5772 use_changed = !hci_dev_test_and_set_flag(hdev,
5773 HCI_USE_DEBUG_KEYS);
5775 use_changed = hci_dev_test_and_clear_flag(hdev,
5776 HCI_USE_DEBUG_KEYS);
5778 if (hdev_is_powered(hdev) && use_changed &&
5779 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5780 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5781 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5782 sizeof(mode), &mode);
5785 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5790 err = new_settings(hdev, sk);
5793 hci_dev_unlock(hdev);
5797 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5800 struct mgmt_cp_set_privacy *cp = cp_data;
5804 bt_dev_dbg(hdev, "sock %p", sk);
5806 if (!lmp_le_capable(hdev))
5807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5808 MGMT_STATUS_NOT_SUPPORTED);
5810 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5811 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5812 MGMT_STATUS_INVALID_PARAMS);
5814 if (hdev_is_powered(hdev))
5815 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5816 MGMT_STATUS_REJECTED);
5820 /* If user space supports this command it is also expected to
5821 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5823 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5826 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5827 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5828 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5829 hci_adv_instances_set_rpa_expired(hdev, true);
5830 if (cp->privacy == 0x02)
5831 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5833 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5835 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5836 memset(hdev->irk, 0, sizeof(hdev->irk));
5837 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5838 hci_adv_instances_set_rpa_expired(hdev, false);
5839 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5842 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5847 err = new_settings(hdev, sk);
5850 hci_dev_unlock(hdev);
5854 static bool irk_is_valid(struct mgmt_irk_info *irk)
5856 switch (irk->addr.type) {
5857 case BDADDR_LE_PUBLIC:
5860 case BDADDR_LE_RANDOM:
5861 /* Two most significant bits shall be set */
5862 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5870 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5873 struct mgmt_cp_load_irks *cp = cp_data;
5874 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5875 sizeof(struct mgmt_irk_info));
5876 u16 irk_count, expected_len;
5879 bt_dev_dbg(hdev, "sock %p", sk);
5881 if (!lmp_le_capable(hdev))
5882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5883 MGMT_STATUS_NOT_SUPPORTED);
5885 irk_count = __le16_to_cpu(cp->irk_count);
5886 if (irk_count > max_irk_count) {
5887 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5890 MGMT_STATUS_INVALID_PARAMS);
5893 expected_len = struct_size(cp, irks, irk_count);
5894 if (expected_len != len) {
5895 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5898 MGMT_STATUS_INVALID_PARAMS);
5901 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5903 for (i = 0; i < irk_count; i++) {
5904 struct mgmt_irk_info *key = &cp->irks[i];
5906 if (!irk_is_valid(key))
5907 return mgmt_cmd_status(sk, hdev->id,
5909 MGMT_STATUS_INVALID_PARAMS);
5914 hci_smp_irks_clear(hdev);
5916 for (i = 0; i < irk_count; i++) {
5917 struct mgmt_irk_info *irk = &cp->irks[i];
5918 u8 addr_type = le_addr_type(irk->addr.type);
5920 if (hci_is_blocked_key(hdev,
5921 HCI_BLOCKED_KEY_TYPE_IRK,
5923 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5928 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
5929 if (irk->addr.type == BDADDR_BREDR)
5930 addr_type = BDADDR_BREDR;
5932 hci_add_irk(hdev, &irk->addr.bdaddr,
5933 addr_type, irk->val,
5937 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5941 hci_dev_unlock(hdev);
5946 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5948 if (key->initiator != 0x00 && key->initiator != 0x01)
5951 switch (key->addr.type) {
5952 case BDADDR_LE_PUBLIC:
5955 case BDADDR_LE_RANDOM:
5956 /* Two most significant bits shall be set */
5957 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5965 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5966 void *cp_data, u16 len)
5968 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5969 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5970 sizeof(struct mgmt_ltk_info));
5971 u16 key_count, expected_len;
5974 bt_dev_dbg(hdev, "sock %p", sk);
5976 if (!lmp_le_capable(hdev))
5977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5978 MGMT_STATUS_NOT_SUPPORTED);
5980 key_count = __le16_to_cpu(cp->key_count);
5981 if (key_count > max_key_count) {
5982 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5985 MGMT_STATUS_INVALID_PARAMS);
5988 expected_len = struct_size(cp, keys, key_count);
5989 if (expected_len != len) {
5990 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5993 MGMT_STATUS_INVALID_PARAMS);
5996 bt_dev_dbg(hdev, "key_count %u", key_count);
5998 for (i = 0; i < key_count; i++) {
5999 struct mgmt_ltk_info *key = &cp->keys[i];
6001 if (!ltk_is_valid(key))
6002 return mgmt_cmd_status(sk, hdev->id,
6003 MGMT_OP_LOAD_LONG_TERM_KEYS,
6004 MGMT_STATUS_INVALID_PARAMS);
6009 hci_smp_ltks_clear(hdev);
6011 for (i = 0; i < key_count; i++) {
6012 struct mgmt_ltk_info *key = &cp->keys[i];
6013 u8 type, authenticated;
6014 u8 addr_type = le_addr_type(key->addr.type);
6016 if (hci_is_blocked_key(hdev,
6017 HCI_BLOCKED_KEY_TYPE_LTK,
6019 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6024 switch (key->type) {
6025 case MGMT_LTK_UNAUTHENTICATED:
6026 authenticated = 0x00;
6027 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6029 case MGMT_LTK_AUTHENTICATED:
6030 authenticated = 0x01;
6031 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6033 case MGMT_LTK_P256_UNAUTH:
6034 authenticated = 0x00;
6035 type = SMP_LTK_P256;
6037 case MGMT_LTK_P256_AUTH:
6038 authenticated = 0x01;
6039 type = SMP_LTK_P256;
6041 case MGMT_LTK_P256_DEBUG:
6042 authenticated = 0x00;
6043 type = SMP_LTK_P256_DEBUG;
6049 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
6050 if (key->addr.type == BDADDR_BREDR)
6051 addr_type = BDADDR_BREDR;
6053 hci_add_ltk(hdev, &key->addr.bdaddr,
6054 addr_type, type, authenticated,
6055 key->val, key->enc_size, key->ediv, key->rand);
6058 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6061 hci_dev_unlock(hdev);
6066 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6068 struct hci_conn *conn = cmd->user_data;
6069 struct mgmt_rp_get_conn_info rp;
6072 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6074 if (status == MGMT_STATUS_SUCCESS) {
6075 rp.rssi = conn->rssi;
6076 rp.tx_power = conn->tx_power;
6077 rp.max_tx_power = conn->max_tx_power;
6079 rp.rssi = HCI_RSSI_INVALID;
6080 rp.tx_power = HCI_TX_POWER_INVALID;
6081 rp.max_tx_power = HCI_TX_POWER_INVALID;
6084 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6085 status, &rp, sizeof(rp));
6087 hci_conn_drop(conn);
6093 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6096 struct hci_cp_read_rssi *cp;
6097 struct mgmt_pending_cmd *cmd;
6098 struct hci_conn *conn;
6102 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6106 /* Commands sent in request are either Read RSSI or Read Transmit Power
6107 * Level so we check which one was last sent to retrieve connection
6108 * handle. Both commands have handle as first parameter so it's safe to
6109 * cast data on the same command struct.
6111 * First command sent is always Read RSSI and we fail only if it fails.
6112 * In other case we simply override error to indicate success as we
6113 * already remembered if TX power value is actually valid.
6115 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6117 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6118 status = MGMT_STATUS_SUCCESS;
6120 status = mgmt_status(hci_status);
6124 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6128 handle = __le16_to_cpu(cp->handle);
6129 conn = hci_conn_hash_lookup_handle(hdev, handle);
6131 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6136 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6140 cmd->cmd_complete(cmd, status);
6141 mgmt_pending_remove(cmd);
6144 hci_dev_unlock(hdev);
6147 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6150 struct mgmt_cp_get_conn_info *cp = data;
6151 struct mgmt_rp_get_conn_info rp;
6152 struct hci_conn *conn;
6153 unsigned long conn_info_age;
6156 bt_dev_dbg(hdev, "sock %p", sk);
6158 memset(&rp, 0, sizeof(rp));
6159 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6160 rp.addr.type = cp->addr.type;
6162 if (!bdaddr_type_is_valid(cp->addr.type))
6163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6164 MGMT_STATUS_INVALID_PARAMS,
6169 if (!hdev_is_powered(hdev)) {
6170 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6171 MGMT_STATUS_NOT_POWERED, &rp,
6176 if (cp->addr.type == BDADDR_BREDR)
6177 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6180 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6182 if (!conn || conn->state != BT_CONNECTED) {
6183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6184 MGMT_STATUS_NOT_CONNECTED, &rp,
6189 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6190 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6191 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6195 /* To avoid client trying to guess when to poll again for information we
6196 * calculate conn info age as random value between min/max set in hdev.
6198 conn_info_age = hdev->conn_info_min_age +
6199 prandom_u32_max(hdev->conn_info_max_age -
6200 hdev->conn_info_min_age);
6202 /* Query controller to refresh cached values if they are too old or were
6205 if (time_after(jiffies, conn->conn_info_timestamp +
6206 msecs_to_jiffies(conn_info_age)) ||
6207 !conn->conn_info_timestamp) {
6208 struct hci_request req;
6209 struct hci_cp_read_tx_power req_txp_cp;
6210 struct hci_cp_read_rssi req_rssi_cp;
6211 struct mgmt_pending_cmd *cmd;
6213 hci_req_init(&req, hdev);
6214 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6215 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6218 /* For LE links TX power does not change thus we don't need to
6219 * query for it once value is known.
6221 if (!bdaddr_type_is_le(cp->addr.type) ||
6222 conn->tx_power == HCI_TX_POWER_INVALID) {
6223 req_txp_cp.handle = cpu_to_le16(conn->handle);
6224 req_txp_cp.type = 0x00;
6225 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6226 sizeof(req_txp_cp), &req_txp_cp);
6229 /* Max TX power needs to be read only once per connection */
6230 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6231 req_txp_cp.handle = cpu_to_le16(conn->handle);
6232 req_txp_cp.type = 0x01;
6233 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6234 sizeof(req_txp_cp), &req_txp_cp);
6237 err = hci_req_run(&req, conn_info_refresh_complete);
6241 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6248 hci_conn_hold(conn);
6249 cmd->user_data = hci_conn_get(conn);
6250 cmd->cmd_complete = conn_info_cmd_complete;
6252 conn->conn_info_timestamp = jiffies;
6254 /* Cache is valid, just reply with values cached in hci_conn */
6255 rp.rssi = conn->rssi;
6256 rp.tx_power = conn->tx_power;
6257 rp.max_tx_power = conn->max_tx_power;
6259 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6260 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6264 hci_dev_unlock(hdev);
6268 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6270 struct hci_conn *conn = cmd->user_data;
6271 struct mgmt_rp_get_clock_info rp;
6272 struct hci_dev *hdev;
6275 memset(&rp, 0, sizeof(rp));
6276 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6281 hdev = hci_dev_get(cmd->index);
6283 rp.local_clock = cpu_to_le32(hdev->clock);
6288 rp.piconet_clock = cpu_to_le32(conn->clock);
6289 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6293 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6297 hci_conn_drop(conn);
6304 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6306 struct hci_cp_read_clock *hci_cp;
6307 struct mgmt_pending_cmd *cmd;
6308 struct hci_conn *conn;
6310 bt_dev_dbg(hdev, "status %u", status);
6314 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6318 if (hci_cp->which) {
6319 u16 handle = __le16_to_cpu(hci_cp->handle);
6320 conn = hci_conn_hash_lookup_handle(hdev, handle);
6325 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6329 cmd->cmd_complete(cmd, mgmt_status(status));
6330 mgmt_pending_remove(cmd);
6333 hci_dev_unlock(hdev);
6336 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6339 struct mgmt_cp_get_clock_info *cp = data;
6340 struct mgmt_rp_get_clock_info rp;
6341 struct hci_cp_read_clock hci_cp;
6342 struct mgmt_pending_cmd *cmd;
6343 struct hci_request req;
6344 struct hci_conn *conn;
6347 bt_dev_dbg(hdev, "sock %p", sk);
6349 memset(&rp, 0, sizeof(rp));
6350 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6351 rp.addr.type = cp->addr.type;
6353 if (cp->addr.type != BDADDR_BREDR)
6354 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6355 MGMT_STATUS_INVALID_PARAMS,
6360 if (!hdev_is_powered(hdev)) {
6361 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6362 MGMT_STATUS_NOT_POWERED, &rp,
6367 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6368 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6370 if (!conn || conn->state != BT_CONNECTED) {
6371 err = mgmt_cmd_complete(sk, hdev->id,
6372 MGMT_OP_GET_CLOCK_INFO,
6373 MGMT_STATUS_NOT_CONNECTED,
6381 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6387 cmd->cmd_complete = clock_info_cmd_complete;
6389 hci_req_init(&req, hdev);
6391 memset(&hci_cp, 0, sizeof(hci_cp));
6392 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6395 hci_conn_hold(conn);
6396 cmd->user_data = hci_conn_get(conn);
6398 hci_cp.handle = cpu_to_le16(conn->handle);
6399 hci_cp.which = 0x01; /* Piconet clock */
6400 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6403 err = hci_req_run(&req, get_clock_info_complete);
6405 mgmt_pending_remove(cmd);
6408 hci_dev_unlock(hdev);
6412 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6414 struct hci_conn *conn;
6416 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6420 if (conn->dst_type != type)
6423 if (conn->state != BT_CONNECTED)
6429 /* This function requires the caller holds hdev->lock */
6430 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6431 u8 addr_type, u8 auto_connect)
6433 struct hci_conn_params *params;
6435 params = hci_conn_params_add(hdev, addr, addr_type);
6439 if (params->auto_connect == auto_connect)
6442 list_del_init(¶ms->action);
6444 switch (auto_connect) {
6445 case HCI_AUTO_CONN_DISABLED:
6446 case HCI_AUTO_CONN_LINK_LOSS:
6447 /* If auto connect is being disabled when we're trying to
6448 * connect to device, keep connecting.
6450 if (params->explicit_connect)
6451 list_add(¶ms->action, &hdev->pend_le_conns);
6453 case HCI_AUTO_CONN_REPORT:
6454 if (params->explicit_connect)
6455 list_add(¶ms->action, &hdev->pend_le_conns);
6457 list_add(¶ms->action, &hdev->pend_le_reports);
6459 case HCI_AUTO_CONN_DIRECT:
6460 case HCI_AUTO_CONN_ALWAYS:
6461 if (!is_connected(hdev, addr, addr_type))
6462 list_add(¶ms->action, &hdev->pend_le_conns);
6466 params->auto_connect = auto_connect;
6468 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6469 addr, addr_type, auto_connect);
6474 static void device_added(struct sock *sk, struct hci_dev *hdev,
6475 bdaddr_t *bdaddr, u8 type, u8 action)
6477 struct mgmt_ev_device_added ev;
6479 bacpy(&ev.addr.bdaddr, bdaddr);
6480 ev.addr.type = type;
6483 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6486 static int add_device(struct sock *sk, struct hci_dev *hdev,
6487 void *data, u16 len)
6489 struct mgmt_cp_add_device *cp = data;
6490 u8 auto_conn, addr_type;
6491 struct hci_conn_params *params;
6493 u32 current_flags = 0;
6495 bt_dev_dbg(hdev, "sock %p", sk);
6497 if (!bdaddr_type_is_valid(cp->addr.type) ||
6498 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6499 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6500 MGMT_STATUS_INVALID_PARAMS,
6501 &cp->addr, sizeof(cp->addr));
6503 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6504 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6505 MGMT_STATUS_INVALID_PARAMS,
6506 &cp->addr, sizeof(cp->addr));
6510 if (cp->addr.type == BDADDR_BREDR) {
6511 /* Only incoming connections action is supported for now */
6512 if (cp->action != 0x01) {
6513 err = mgmt_cmd_complete(sk, hdev->id,
6515 MGMT_STATUS_INVALID_PARAMS,
6516 &cp->addr, sizeof(cp->addr));
6520 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6526 hci_req_update_scan(hdev);
6531 addr_type = le_addr_type(cp->addr.type);
6533 if (cp->action == 0x02)
6534 auto_conn = HCI_AUTO_CONN_ALWAYS;
6535 else if (cp->action == 0x01)
6536 auto_conn = HCI_AUTO_CONN_DIRECT;
6538 auto_conn = HCI_AUTO_CONN_REPORT;
6540 /* Kernel internally uses conn_params with resolvable private
6541 * address, but Add Device allows only identity addresses.
6542 * Make sure it is enforced before calling
6543 * hci_conn_params_lookup.
6545 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6546 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6547 MGMT_STATUS_INVALID_PARAMS,
6548 &cp->addr, sizeof(cp->addr));
6552 /* If the connection parameters don't exist for this device,
6553 * they will be created and configured with defaults.
6555 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6557 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6558 MGMT_STATUS_FAILED, &cp->addr,
6562 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6565 current_flags = params->current_flags;
6568 hci_update_background_scan(hdev);
6571 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6572 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6573 SUPPORTED_DEVICE_FLAGS(), current_flags);
6575 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6576 MGMT_STATUS_SUCCESS, &cp->addr,
6580 hci_dev_unlock(hdev);
6584 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6585 bdaddr_t *bdaddr, u8 type)
6587 struct mgmt_ev_device_removed ev;
6589 bacpy(&ev.addr.bdaddr, bdaddr);
6590 ev.addr.type = type;
6592 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6595 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6596 void *data, u16 len)
6598 struct mgmt_cp_remove_device *cp = data;
6601 bt_dev_dbg(hdev, "sock %p", sk);
6605 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6606 struct hci_conn_params *params;
6609 if (!bdaddr_type_is_valid(cp->addr.type)) {
6610 err = mgmt_cmd_complete(sk, hdev->id,
6611 MGMT_OP_REMOVE_DEVICE,
6612 MGMT_STATUS_INVALID_PARAMS,
6613 &cp->addr, sizeof(cp->addr));
6617 if (cp->addr.type == BDADDR_BREDR) {
6618 err = hci_bdaddr_list_del(&hdev->accept_list,
6622 err = mgmt_cmd_complete(sk, hdev->id,
6623 MGMT_OP_REMOVE_DEVICE,
6624 MGMT_STATUS_INVALID_PARAMS,
6630 hci_req_update_scan(hdev);
6632 device_removed(sk, hdev, &cp->addr.bdaddr,
6637 addr_type = le_addr_type(cp->addr.type);
6639 /* Kernel internally uses conn_params with resolvable private
6640 * address, but Remove Device allows only identity addresses.
6641 * Make sure it is enforced before calling
6642 * hci_conn_params_lookup.
6644 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6645 err = mgmt_cmd_complete(sk, hdev->id,
6646 MGMT_OP_REMOVE_DEVICE,
6647 MGMT_STATUS_INVALID_PARAMS,
6648 &cp->addr, sizeof(cp->addr));
6652 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6655 err = mgmt_cmd_complete(sk, hdev->id,
6656 MGMT_OP_REMOVE_DEVICE,
6657 MGMT_STATUS_INVALID_PARAMS,
6658 &cp->addr, sizeof(cp->addr));
6662 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6663 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6664 err = mgmt_cmd_complete(sk, hdev->id,
6665 MGMT_OP_REMOVE_DEVICE,
6666 MGMT_STATUS_INVALID_PARAMS,
6667 &cp->addr, sizeof(cp->addr));
6671 list_del(¶ms->action);
6672 list_del(¶ms->list);
6674 hci_update_background_scan(hdev);
6676 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6678 struct hci_conn_params *p, *tmp;
6679 struct bdaddr_list *b, *btmp;
6681 if (cp->addr.type) {
6682 err = mgmt_cmd_complete(sk, hdev->id,
6683 MGMT_OP_REMOVE_DEVICE,
6684 MGMT_STATUS_INVALID_PARAMS,
6685 &cp->addr, sizeof(cp->addr));
6689 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
6690 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6695 hci_req_update_scan(hdev);
6697 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6698 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6700 device_removed(sk, hdev, &p->addr, p->addr_type);
6701 if (p->explicit_connect) {
6702 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6705 list_del(&p->action);
6710 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6712 hci_update_background_scan(hdev);
6716 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6717 MGMT_STATUS_SUCCESS, &cp->addr,
6720 hci_dev_unlock(hdev);
6724 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6727 struct mgmt_cp_load_conn_param *cp = data;
6728 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6729 sizeof(struct mgmt_conn_param));
6730 u16 param_count, expected_len;
6733 if (!lmp_le_capable(hdev))
6734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6735 MGMT_STATUS_NOT_SUPPORTED);
6737 param_count = __le16_to_cpu(cp->param_count);
6738 if (param_count > max_param_count) {
6739 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6741 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6742 MGMT_STATUS_INVALID_PARAMS);
6745 expected_len = struct_size(cp, params, param_count);
6746 if (expected_len != len) {
6747 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6750 MGMT_STATUS_INVALID_PARAMS);
6753 bt_dev_dbg(hdev, "param_count %u", param_count);
6757 hci_conn_params_clear_disabled(hdev);
6759 for (i = 0; i < param_count; i++) {
6760 struct mgmt_conn_param *param = &cp->params[i];
6761 struct hci_conn_params *hci_param;
6762 u16 min, max, latency, timeout;
6765 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
6768 if (param->addr.type == BDADDR_LE_PUBLIC) {
6769 addr_type = ADDR_LE_DEV_PUBLIC;
6770 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6771 addr_type = ADDR_LE_DEV_RANDOM;
6773 bt_dev_err(hdev, "ignoring invalid connection parameters");
6777 min = le16_to_cpu(param->min_interval);
6778 max = le16_to_cpu(param->max_interval);
6779 latency = le16_to_cpu(param->latency);
6780 timeout = le16_to_cpu(param->timeout);
6782 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6783 min, max, latency, timeout);
6785 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6786 bt_dev_err(hdev, "ignoring invalid connection parameters");
6790 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6793 bt_dev_err(hdev, "failed to add connection parameters");
6797 hci_param->conn_min_interval = min;
6798 hci_param->conn_max_interval = max;
6799 hci_param->conn_latency = latency;
6800 hci_param->supervision_timeout = timeout;
6803 hci_dev_unlock(hdev);
6805 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6809 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6810 void *data, u16 len)
6812 struct mgmt_cp_set_external_config *cp = data;
6816 bt_dev_dbg(hdev, "sock %p", sk);
6818 if (hdev_is_powered(hdev))
6819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6820 MGMT_STATUS_REJECTED);
6822 if (cp->config != 0x00 && cp->config != 0x01)
6823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6824 MGMT_STATUS_INVALID_PARAMS);
6826 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6828 MGMT_STATUS_NOT_SUPPORTED);
6833 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6835 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6837 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6844 err = new_options(hdev, sk);
6846 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6847 mgmt_index_removed(hdev);
6849 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6850 hci_dev_set_flag(hdev, HCI_CONFIG);
6851 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6853 queue_work(hdev->req_workqueue, &hdev->power_on);
6855 set_bit(HCI_RAW, &hdev->flags);
6856 mgmt_index_added(hdev);
6861 hci_dev_unlock(hdev);
6865 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6866 void *data, u16 len)
6868 struct mgmt_cp_set_public_address *cp = data;
6872 bt_dev_dbg(hdev, "sock %p", sk);
6874 if (hdev_is_powered(hdev))
6875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6876 MGMT_STATUS_REJECTED);
6878 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6880 MGMT_STATUS_INVALID_PARAMS);
6882 if (!hdev->set_bdaddr)
6883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6884 MGMT_STATUS_NOT_SUPPORTED);
6888 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6889 bacpy(&hdev->public_addr, &cp->bdaddr);
6891 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6898 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6899 err = new_options(hdev, sk);
6901 if (is_configured(hdev)) {
6902 mgmt_index_removed(hdev);
6904 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6906 hci_dev_set_flag(hdev, HCI_CONFIG);
6907 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6909 queue_work(hdev->req_workqueue, &hdev->power_on);
6913 hci_dev_unlock(hdev);
6917 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6918 u16 opcode, struct sk_buff *skb)
6920 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6921 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6922 u8 *h192, *r192, *h256, *r256;
6923 struct mgmt_pending_cmd *cmd;
6927 bt_dev_dbg(hdev, "status %u", status);
6929 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6933 mgmt_cp = cmd->param;
6936 status = mgmt_status(status);
6943 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6944 struct hci_rp_read_local_oob_data *rp;
6946 if (skb->len != sizeof(*rp)) {
6947 status = MGMT_STATUS_FAILED;
6950 status = MGMT_STATUS_SUCCESS;
6951 rp = (void *)skb->data;
6953 eir_len = 5 + 18 + 18;
6960 struct hci_rp_read_local_oob_ext_data *rp;
6962 if (skb->len != sizeof(*rp)) {
6963 status = MGMT_STATUS_FAILED;
6966 status = MGMT_STATUS_SUCCESS;
6967 rp = (void *)skb->data;
6969 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6970 eir_len = 5 + 18 + 18;
6974 eir_len = 5 + 18 + 18 + 18 + 18;
6984 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6991 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6992 hdev->dev_class, 3);
6995 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6996 EIR_SSP_HASH_C192, h192, 16);
6997 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6998 EIR_SSP_RAND_R192, r192, 16);
7002 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7003 EIR_SSP_HASH_C256, h256, 16);
7004 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7005 EIR_SSP_RAND_R256, r256, 16);
7009 mgmt_rp->type = mgmt_cp->type;
7010 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7012 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7013 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7014 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7015 if (err < 0 || status)
7018 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7020 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7021 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7022 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7025 mgmt_pending_remove(cmd);
7028 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7029 struct mgmt_cp_read_local_oob_ext_data *cp)
7031 struct mgmt_pending_cmd *cmd;
7032 struct hci_request req;
7035 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7040 hci_req_init(&req, hdev);
7042 if (bredr_sc_enabled(hdev))
7043 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7045 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7047 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7049 mgmt_pending_remove(cmd);
7056 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7057 void *data, u16 data_len)
7059 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7060 struct mgmt_rp_read_local_oob_ext_data *rp;
7063 u8 status, flags, role, addr[7], hash[16], rand[16];
7066 bt_dev_dbg(hdev, "sock %p", sk);
7068 if (hdev_is_powered(hdev)) {
7070 case BIT(BDADDR_BREDR):
7071 status = mgmt_bredr_support(hdev);
7077 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7078 status = mgmt_le_support(hdev);
7082 eir_len = 9 + 3 + 18 + 18 + 3;
7085 status = MGMT_STATUS_INVALID_PARAMS;
7090 status = MGMT_STATUS_NOT_POWERED;
7094 rp_len = sizeof(*rp) + eir_len;
7095 rp = kmalloc(rp_len, GFP_ATOMIC);
7106 case BIT(BDADDR_BREDR):
7107 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7108 err = read_local_ssp_oob_req(hdev, sk, cp);
7109 hci_dev_unlock(hdev);
7113 status = MGMT_STATUS_FAILED;
7116 eir_len = eir_append_data(rp->eir, eir_len,
7118 hdev->dev_class, 3);
7121 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7122 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7123 smp_generate_oob(hdev, hash, rand) < 0) {
7124 hci_dev_unlock(hdev);
7125 status = MGMT_STATUS_FAILED;
7129 /* This should return the active RPA, but since the RPA
7130 * is only programmed on demand, it is really hard to fill
7131 * this in at the moment. For now disallow retrieving
7132 * local out-of-band data when privacy is in use.
7134 * Returning the identity address will not help here since
7135 * pairing happens before the identity resolving key is
7136 * known and thus the connection establishment happens
7137 * based on the RPA and not the identity address.
7139 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7140 hci_dev_unlock(hdev);
7141 status = MGMT_STATUS_REJECTED;
7145 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7146 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7147 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7148 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7149 memcpy(addr, &hdev->static_addr, 6);
7152 memcpy(addr, &hdev->bdaddr, 6);
7156 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7157 addr, sizeof(addr));
7159 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7164 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7165 &role, sizeof(role));
7167 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7168 eir_len = eir_append_data(rp->eir, eir_len,
7170 hash, sizeof(hash));
7172 eir_len = eir_append_data(rp->eir, eir_len,
7174 rand, sizeof(rand));
7177 flags = mgmt_get_adv_discov_flags(hdev);
7179 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7180 flags |= LE_AD_NO_BREDR;
7182 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7183 &flags, sizeof(flags));
7187 hci_dev_unlock(hdev);
7189 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7191 status = MGMT_STATUS_SUCCESS;
7194 rp->type = cp->type;
7195 rp->eir_len = cpu_to_le16(eir_len);
7197 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7198 status, rp, sizeof(*rp) + eir_len);
7199 if (err < 0 || status)
7202 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7203 rp, sizeof(*rp) + eir_len,
7204 HCI_MGMT_OOB_DATA_EVENTS, sk);
7212 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7216 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7217 flags |= MGMT_ADV_FLAG_DISCOV;
7218 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7219 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7220 flags |= MGMT_ADV_FLAG_APPEARANCE;
7221 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7223 /* In extended adv TX_POWER returned from Set Adv Param
7224 * will be always valid.
7226 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7227 ext_adv_capable(hdev))
7228 flags |= MGMT_ADV_FLAG_TX_POWER;
7230 if (ext_adv_capable(hdev)) {
7231 flags |= MGMT_ADV_FLAG_SEC_1M;
7232 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7233 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7235 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7236 flags |= MGMT_ADV_FLAG_SEC_2M;
7238 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7239 flags |= MGMT_ADV_FLAG_SEC_CODED;
7245 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7246 void *data, u16 data_len)
7248 struct mgmt_rp_read_adv_features *rp;
7251 struct adv_info *adv_instance;
7252 u32 supported_flags;
7255 bt_dev_dbg(hdev, "sock %p", sk);
7257 if (!lmp_le_capable(hdev))
7258 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7259 MGMT_STATUS_REJECTED);
7261 /* Enabling the experimental LL Privay support disables support for
7264 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7265 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7266 MGMT_STATUS_NOT_SUPPORTED);
7270 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7271 rp = kmalloc(rp_len, GFP_ATOMIC);
7273 hci_dev_unlock(hdev);
7277 supported_flags = get_supported_adv_flags(hdev);
7279 rp->supported_flags = cpu_to_le32(supported_flags);
7280 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7281 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7282 rp->max_instances = hdev->le_num_of_adv_sets;
7283 rp->num_instances = hdev->adv_instance_cnt;
7285 instance = rp->instance;
7286 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7287 *instance = adv_instance->instance;
7291 hci_dev_unlock(hdev);
7293 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7294 MGMT_STATUS_SUCCESS, rp, rp_len);
7301 static u8 calculate_name_len(struct hci_dev *hdev)
7303 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7305 return append_local_name(hdev, buf, 0);
7308 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7311 u8 max_len = HCI_MAX_AD_LENGTH;
7314 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7315 MGMT_ADV_FLAG_LIMITED_DISCOV |
7316 MGMT_ADV_FLAG_MANAGED_FLAGS))
7319 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7322 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7323 max_len -= calculate_name_len(hdev);
7325 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7332 static bool flags_managed(u32 adv_flags)
7334 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7335 MGMT_ADV_FLAG_LIMITED_DISCOV |
7336 MGMT_ADV_FLAG_MANAGED_FLAGS);
7339 static bool tx_power_managed(u32 adv_flags)
7341 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7344 static bool name_managed(u32 adv_flags)
7346 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7349 static bool appearance_managed(u32 adv_flags)
7351 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7354 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7355 u8 len, bool is_adv_data)
7360 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7365 /* Make sure that the data is correctly formatted. */
7366 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7372 if (data[i + 1] == EIR_FLAGS &&
7373 (!is_adv_data || flags_managed(adv_flags)))
7376 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7379 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7382 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7385 if (data[i + 1] == EIR_APPEARANCE &&
7386 appearance_managed(adv_flags))
7389 /* If the current field length would exceed the total data
7390 * length, then it's invalid.
7392 if (i + cur_len >= len)
7399 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7402 struct mgmt_pending_cmd *cmd;
7403 struct mgmt_cp_add_advertising *cp;
7404 struct mgmt_rp_add_advertising rp;
7405 struct adv_info *adv_instance, *n;
7408 bt_dev_dbg(hdev, "status %d", status);
7412 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7414 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7415 if (!adv_instance->pending)
7419 adv_instance->pending = false;
7423 instance = adv_instance->instance;
7425 if (hdev->cur_adv_instance == instance)
7426 cancel_adv_timeout(hdev);
7428 hci_remove_adv_instance(hdev, instance);
7429 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7436 rp.instance = cp->instance;
7439 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7440 mgmt_status(status));
7442 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7443 mgmt_status(status), &rp, sizeof(rp));
7445 mgmt_pending_remove(cmd);
7448 hci_dev_unlock(hdev);
7451 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7452 void *data, u16 data_len)
7454 struct mgmt_cp_add_advertising *cp = data;
7455 struct mgmt_rp_add_advertising rp;
7457 u32 supported_flags, phy_flags;
7459 u16 timeout, duration;
7460 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7461 u8 schedule_instance = 0;
7462 struct adv_info *next_instance;
7464 struct mgmt_pending_cmd *cmd;
7465 struct hci_request req;
7467 bt_dev_dbg(hdev, "sock %p", sk);
7469 status = mgmt_le_support(hdev);
7471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7474 /* Enabling the experimental LL Privay support disables support for
7477 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7478 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7479 MGMT_STATUS_NOT_SUPPORTED);
7481 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7482 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7483 MGMT_STATUS_INVALID_PARAMS);
7485 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7487 MGMT_STATUS_INVALID_PARAMS);
7489 flags = __le32_to_cpu(cp->flags);
7490 timeout = __le16_to_cpu(cp->timeout);
7491 duration = __le16_to_cpu(cp->duration);
7493 /* The current implementation only supports a subset of the specified
7494 * flags. Also need to check mutual exclusiveness of sec flags.
7496 supported_flags = get_supported_adv_flags(hdev);
7497 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7498 if (flags & ~supported_flags ||
7499 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7500 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7501 MGMT_STATUS_INVALID_PARAMS);
7505 if (timeout && !hdev_is_powered(hdev)) {
7506 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7507 MGMT_STATUS_REJECTED);
7511 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7512 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7513 pending_find(MGMT_OP_SET_LE, hdev)) {
7514 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7519 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7520 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7521 cp->scan_rsp_len, false)) {
7522 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7523 MGMT_STATUS_INVALID_PARAMS);
7527 err = hci_add_adv_instance(hdev, cp->instance, flags,
7528 cp->adv_data_len, cp->data,
7530 cp->data + cp->adv_data_len,
7533 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7534 MGMT_STATUS_FAILED);
7538 /* Only trigger an advertising added event if a new instance was
7541 if (hdev->adv_instance_cnt > prev_instance_cnt)
7542 mgmt_advertising_added(sk, hdev, cp->instance);
7544 if (hdev->cur_adv_instance == cp->instance) {
7545 /* If the currently advertised instance is being changed then
7546 * cancel the current advertising and schedule the next
7547 * instance. If there is only one instance then the overridden
7548 * advertising data will be visible right away.
7550 cancel_adv_timeout(hdev);
7552 next_instance = hci_get_next_instance(hdev, cp->instance);
7554 schedule_instance = next_instance->instance;
7555 } else if (!hdev->adv_instance_timeout) {
7556 /* Immediately advertise the new instance if no other
7557 * instance is currently being advertised.
7559 schedule_instance = cp->instance;
7562 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7563 * there is no instance to be advertised then we have no HCI
7564 * communication to make. Simply return.
7566 if (!hdev_is_powered(hdev) ||
7567 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7568 !schedule_instance) {
7569 rp.instance = cp->instance;
7570 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7571 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7575 /* We're good to go, update advertising data, parameters, and start
7578 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7585 hci_req_init(&req, hdev);
7587 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7590 err = hci_req_run(&req, add_advertising_complete);
7593 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7594 MGMT_STATUS_FAILED);
7595 mgmt_pending_remove(cmd);
7599 hci_dev_unlock(hdev);
7604 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7607 struct mgmt_pending_cmd *cmd;
7608 struct mgmt_cp_remove_advertising *cp;
7609 struct mgmt_rp_remove_advertising rp;
7611 bt_dev_dbg(hdev, "status %d", status);
7615 /* A failure status here only means that we failed to disable
7616 * advertising. Otherwise, the advertising instance has been removed,
7617 * so report success.
7619 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7624 rp.instance = cp->instance;
7626 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7628 mgmt_pending_remove(cmd);
7631 hci_dev_unlock(hdev);
7634 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7635 void *data, u16 data_len)
7637 struct mgmt_cp_remove_advertising *cp = data;
7638 struct mgmt_rp_remove_advertising rp;
7639 struct mgmt_pending_cmd *cmd;
7640 struct hci_request req;
7643 bt_dev_dbg(hdev, "sock %p", sk);
7645 /* Enabling the experimental LL Privay support disables support for
7648 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7649 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7650 MGMT_STATUS_NOT_SUPPORTED);
7654 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7655 err = mgmt_cmd_status(sk, hdev->id,
7656 MGMT_OP_REMOVE_ADVERTISING,
7657 MGMT_STATUS_INVALID_PARAMS);
7661 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7662 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7663 pending_find(MGMT_OP_SET_LE, hdev)) {
7664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7669 if (list_empty(&hdev->adv_instances)) {
7670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7671 MGMT_STATUS_INVALID_PARAMS);
7675 hci_req_init(&req, hdev);
7677 /* If we use extended advertising, instance is disabled and removed */
7678 if (ext_adv_capable(hdev)) {
7679 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7680 __hci_req_remove_ext_adv_instance(&req, cp->instance);
7683 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7685 if (list_empty(&hdev->adv_instances))
7686 __hci_req_disable_advertising(&req);
7688 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7689 * flag is set or the device isn't powered then we have no HCI
7690 * communication to make. Simply return.
7692 if (skb_queue_empty(&req.cmd_q) ||
7693 !hdev_is_powered(hdev) ||
7694 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7695 hci_req_purge(&req);
7696 rp.instance = cp->instance;
7697 err = mgmt_cmd_complete(sk, hdev->id,
7698 MGMT_OP_REMOVE_ADVERTISING,
7699 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7703 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7710 err = hci_req_run(&req, remove_advertising_complete);
7712 mgmt_pending_remove(cmd);
7715 hci_dev_unlock(hdev);
7720 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7721 void *data, u16 data_len)
7723 struct mgmt_cp_get_adv_size_info *cp = data;
7724 struct mgmt_rp_get_adv_size_info rp;
7725 u32 flags, supported_flags;
7728 bt_dev_dbg(hdev, "sock %p", sk);
7730 if (!lmp_le_capable(hdev))
7731 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7732 MGMT_STATUS_REJECTED);
7734 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7735 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7736 MGMT_STATUS_INVALID_PARAMS);
7738 flags = __le32_to_cpu(cp->flags);
7740 /* The current implementation only supports a subset of the specified
7743 supported_flags = get_supported_adv_flags(hdev);
7744 if (flags & ~supported_flags)
7745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7746 MGMT_STATUS_INVALID_PARAMS);
7748 rp.instance = cp->instance;
7749 rp.flags = cp->flags;
7750 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7751 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7753 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7754 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7759 static const struct hci_mgmt_handler mgmt_handlers[] = {
7760 { NULL }, /* 0x0000 (no command) */
7761 { read_version, MGMT_READ_VERSION_SIZE,
7763 HCI_MGMT_UNTRUSTED },
7764 { read_commands, MGMT_READ_COMMANDS_SIZE,
7766 HCI_MGMT_UNTRUSTED },
7767 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7769 HCI_MGMT_UNTRUSTED },
7770 { read_controller_info, MGMT_READ_INFO_SIZE,
7771 HCI_MGMT_UNTRUSTED },
7772 { set_powered, MGMT_SETTING_SIZE },
7773 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7774 { set_connectable, MGMT_SETTING_SIZE },
7775 { set_fast_connectable, MGMT_SETTING_SIZE },
7776 { set_bondable, MGMT_SETTING_SIZE },
7777 { set_link_security, MGMT_SETTING_SIZE },
7778 { set_ssp, MGMT_SETTING_SIZE },
7779 { set_hs, MGMT_SETTING_SIZE },
7780 { set_le, MGMT_SETTING_SIZE },
7781 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7782 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7783 { add_uuid, MGMT_ADD_UUID_SIZE },
7784 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7785 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7787 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7789 { disconnect, MGMT_DISCONNECT_SIZE },
7790 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7791 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7792 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7793 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7794 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7795 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7796 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7797 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7798 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7799 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7800 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7801 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7802 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7804 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7805 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7806 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7807 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7808 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7809 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7810 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7811 { set_advertising, MGMT_SETTING_SIZE },
7812 { set_bredr, MGMT_SETTING_SIZE },
7813 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7814 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7815 { set_secure_conn, MGMT_SETTING_SIZE },
7816 { set_debug_keys, MGMT_SETTING_SIZE },
7817 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7818 { load_irks, MGMT_LOAD_IRKS_SIZE,
7820 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7821 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7822 { add_device, MGMT_ADD_DEVICE_SIZE },
7823 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7824 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7826 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7828 HCI_MGMT_UNTRUSTED },
7829 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7830 HCI_MGMT_UNCONFIGURED |
7831 HCI_MGMT_UNTRUSTED },
7832 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7833 HCI_MGMT_UNCONFIGURED },
7834 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7835 HCI_MGMT_UNCONFIGURED },
7836 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7838 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7839 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7841 HCI_MGMT_UNTRUSTED },
7842 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7843 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7845 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7846 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
7847 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7848 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7849 HCI_MGMT_UNTRUSTED },
7850 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
7851 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
7852 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
7853 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7855 { set_wideband_speech, MGMT_SETTING_SIZE },
7856 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
7857 HCI_MGMT_UNTRUSTED },
7858 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
7859 HCI_MGMT_UNTRUSTED |
7860 HCI_MGMT_HDEV_OPTIONAL },
7861 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
7863 HCI_MGMT_HDEV_OPTIONAL },
7864 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7865 HCI_MGMT_UNTRUSTED },
7866 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7868 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7869 HCI_MGMT_UNTRUSTED },
7870 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7872 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
7873 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
7874 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7875 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7877 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
7880 void mgmt_index_added(struct hci_dev *hdev)
7882 struct mgmt_ev_ext_index ev;
7884 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7887 switch (hdev->dev_type) {
7889 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7890 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7891 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7894 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7895 HCI_MGMT_INDEX_EVENTS);
7908 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7909 HCI_MGMT_EXT_INDEX_EVENTS);
7912 void mgmt_index_removed(struct hci_dev *hdev)
7914 struct mgmt_ev_ext_index ev;
7915 u8 status = MGMT_STATUS_INVALID_INDEX;
7917 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7920 switch (hdev->dev_type) {
7922 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7924 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7925 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7926 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7929 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7930 HCI_MGMT_INDEX_EVENTS);
7943 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7944 HCI_MGMT_EXT_INDEX_EVENTS);
7947 /* This function requires the caller holds hdev->lock */
7948 static void restart_le_actions(struct hci_dev *hdev)
7950 struct hci_conn_params *p;
7952 list_for_each_entry(p, &hdev->le_conn_params, list) {
7953 /* Needed for AUTO_OFF case where might not "really"
7954 * have been powered off.
7956 list_del_init(&p->action);
7958 switch (p->auto_connect) {
7959 case HCI_AUTO_CONN_DIRECT:
7960 case HCI_AUTO_CONN_ALWAYS:
7961 list_add(&p->action, &hdev->pend_le_conns);
7963 case HCI_AUTO_CONN_REPORT:
7964 list_add(&p->action, &hdev->pend_le_reports);
7972 void mgmt_power_on(struct hci_dev *hdev, int err)
7974 struct cmd_lookup match = { NULL, hdev };
7976 bt_dev_dbg(hdev, "err %d", err);
7981 restart_le_actions(hdev);
7982 hci_update_background_scan(hdev);
7985 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7987 new_settings(hdev, match.sk);
7992 hci_dev_unlock(hdev);
7995 void __mgmt_power_off(struct hci_dev *hdev)
7997 struct cmd_lookup match = { NULL, hdev };
7998 u8 status, zero_cod[] = { 0, 0, 0 };
8000 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8002 /* If the power off is because of hdev unregistration let
8003 * use the appropriate INVALID_INDEX status. Otherwise use
8004 * NOT_POWERED. We cover both scenarios here since later in
8005 * mgmt_index_removed() any hci_conn callbacks will have already
8006 * been triggered, potentially causing misleading DISCONNECTED
8009 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8010 status = MGMT_STATUS_INVALID_INDEX;
8012 status = MGMT_STATUS_NOT_POWERED;
8014 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8016 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8017 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8018 zero_cod, sizeof(zero_cod),
8019 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8020 ext_info_changed(hdev, NULL);
8023 new_settings(hdev, match.sk);
8029 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8031 struct mgmt_pending_cmd *cmd;
8034 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8038 if (err == -ERFKILL)
8039 status = MGMT_STATUS_RFKILLED;
8041 status = MGMT_STATUS_FAILED;
8043 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8045 mgmt_pending_remove(cmd);
8048 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8051 struct mgmt_ev_new_link_key ev;
8053 memset(&ev, 0, sizeof(ev));
8055 ev.store_hint = persistent;
8056 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8057 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8058 ev.key.type = key->type;
8059 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8060 ev.key.pin_len = key->pin_len;
8062 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8065 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8067 switch (ltk->type) {
8069 case SMP_LTK_RESPONDER:
8070 if (ltk->authenticated)
8071 return MGMT_LTK_AUTHENTICATED;
8072 return MGMT_LTK_UNAUTHENTICATED;
8074 if (ltk->authenticated)
8075 return MGMT_LTK_P256_AUTH;
8076 return MGMT_LTK_P256_UNAUTH;
8077 case SMP_LTK_P256_DEBUG:
8078 return MGMT_LTK_P256_DEBUG;
8081 return MGMT_LTK_UNAUTHENTICATED;
8084 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8086 struct mgmt_ev_new_long_term_key ev;
8088 memset(&ev, 0, sizeof(ev));
8090 /* Devices using resolvable or non-resolvable random addresses
8091 * without providing an identity resolving key don't require
8092 * to store long term keys. Their addresses will change the
8095 * Only when a remote device provides an identity address
8096 * make sure the long term key is stored. If the remote
8097 * identity is known, the long term keys are internally
8098 * mapped to the identity address. So allow static random
8099 * and public addresses here.
8101 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8102 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8103 ev.store_hint = 0x00;
8105 ev.store_hint = persistent;
8107 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8108 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
8109 ev.key.type = mgmt_ltk_type(key);
8110 ev.key.enc_size = key->enc_size;
8111 ev.key.ediv = key->ediv;
8112 ev.key.rand = key->rand;
8114 if (key->type == SMP_LTK)
8115 ev.key.initiator = 1;
8117 /* Make sure we copy only the significant bytes based on the
8118 * encryption key size, and set the rest of the value to zeroes.
8120 memcpy(ev.key.val, key->val, key->enc_size);
8121 memset(ev.key.val + key->enc_size, 0,
8122 sizeof(ev.key.val) - key->enc_size);
8124 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8127 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8129 struct mgmt_ev_new_irk ev;
8131 memset(&ev, 0, sizeof(ev));
8133 ev.store_hint = persistent;
8135 bacpy(&ev.rpa, &irk->rpa);
8136 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8137 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
8138 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8140 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8143 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8146 struct mgmt_ev_new_csrk ev;
8148 memset(&ev, 0, sizeof(ev));
8150 /* Devices using resolvable or non-resolvable random addresses
8151 * without providing an identity resolving key don't require
8152 * to store signature resolving keys. Their addresses will change
8153 * the next time around.
8155 * Only when a remote device provides an identity address
8156 * make sure the signature resolving key is stored. So allow
8157 * static random and public addresses here.
8159 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8160 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8161 ev.store_hint = 0x00;
8163 ev.store_hint = persistent;
8165 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8166 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
8167 ev.key.type = csrk->type;
8168 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8170 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8173 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8174 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8175 u16 max_interval, u16 latency, u16 timeout)
8177 struct mgmt_ev_new_conn_param ev;
8179 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8182 memset(&ev, 0, sizeof(ev));
8183 bacpy(&ev.addr.bdaddr, bdaddr);
8184 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8185 ev.store_hint = store_hint;
8186 ev.min_interval = cpu_to_le16(min_interval);
8187 ev.max_interval = cpu_to_le16(max_interval);
8188 ev.latency = cpu_to_le16(latency);
8189 ev.timeout = cpu_to_le16(timeout);
8191 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8194 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8195 u32 flags, u8 *name, u8 name_len)
8198 struct mgmt_ev_device_connected *ev = (void *) buf;
8201 bacpy(&ev->addr.bdaddr, &conn->dst);
8202 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8204 ev->flags = __cpu_to_le32(flags);
8206 /* We must ensure that the EIR Data fields are ordered and
8207 * unique. Keep it simple for now and avoid the problem by not
8208 * adding any BR/EDR data to the LE adv.
8210 if (conn->le_adv_data_len > 0) {
8211 memcpy(&ev->eir[eir_len],
8212 conn->le_adv_data, conn->le_adv_data_len);
8213 eir_len = conn->le_adv_data_len;
8216 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8219 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8220 eir_len = eir_append_data(ev->eir, eir_len,
8222 conn->dev_class, 3);
8225 ev->eir_len = cpu_to_le16(eir_len);
8227 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8228 sizeof(*ev) + eir_len, NULL);
8231 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8233 struct sock **sk = data;
8235 cmd->cmd_complete(cmd, 0);
8240 mgmt_pending_remove(cmd);
8243 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8245 struct hci_dev *hdev = data;
8246 struct mgmt_cp_unpair_device *cp = cmd->param;
8248 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8250 cmd->cmd_complete(cmd, 0);
8251 mgmt_pending_remove(cmd);
8254 bool mgmt_powering_down(struct hci_dev *hdev)
8256 struct mgmt_pending_cmd *cmd;
8257 struct mgmt_mode *cp;
8259 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8270 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8271 u8 link_type, u8 addr_type, u8 reason,
8272 bool mgmt_connected)
8274 struct mgmt_ev_device_disconnected ev;
8275 struct sock *sk = NULL;
8277 /* The connection is still in hci_conn_hash so test for 1
8278 * instead of 0 to know if this is the last one.
8280 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8281 cancel_delayed_work(&hdev->power_off);
8282 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8285 if (!mgmt_connected)
8288 if (link_type != ACL_LINK && link_type != LE_LINK)
8291 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8293 bacpy(&ev.addr.bdaddr, bdaddr);
8294 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8297 /* Report disconnects due to suspend */
8298 if (hdev->suspended)
8299 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8301 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8306 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8310 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8311 u8 link_type, u8 addr_type, u8 status)
8313 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8314 struct mgmt_cp_disconnect *cp;
8315 struct mgmt_pending_cmd *cmd;
8317 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8320 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8326 if (bacmp(bdaddr, &cp->addr.bdaddr))
8329 if (cp->addr.type != bdaddr_type)
8332 cmd->cmd_complete(cmd, mgmt_status(status));
8333 mgmt_pending_remove(cmd);
8336 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8337 u8 addr_type, u8 status)
8339 struct mgmt_ev_connect_failed ev;
8341 /* The connection is still in hci_conn_hash so test for 1
8342 * instead of 0 to know if this is the last one.
8344 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8345 cancel_delayed_work(&hdev->power_off);
8346 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8349 bacpy(&ev.addr.bdaddr, bdaddr);
8350 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8351 ev.status = mgmt_status(status);
8353 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8356 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8358 struct mgmt_ev_pin_code_request ev;
8360 bacpy(&ev.addr.bdaddr, bdaddr);
8361 ev.addr.type = BDADDR_BREDR;
8364 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8367 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8370 struct mgmt_pending_cmd *cmd;
8372 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8376 cmd->cmd_complete(cmd, mgmt_status(status));
8377 mgmt_pending_remove(cmd);
8380 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8383 struct mgmt_pending_cmd *cmd;
8385 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8389 cmd->cmd_complete(cmd, mgmt_status(status));
8390 mgmt_pending_remove(cmd);
8393 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8394 u8 link_type, u8 addr_type, u32 value,
8397 struct mgmt_ev_user_confirm_request ev;
8399 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8401 bacpy(&ev.addr.bdaddr, bdaddr);
8402 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8403 ev.confirm_hint = confirm_hint;
8404 ev.value = cpu_to_le32(value);
8406 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8410 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8411 u8 link_type, u8 addr_type)
8413 struct mgmt_ev_user_passkey_request ev;
8415 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8417 bacpy(&ev.addr.bdaddr, bdaddr);
8418 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8420 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8424 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8425 u8 link_type, u8 addr_type, u8 status,
8428 struct mgmt_pending_cmd *cmd;
8430 cmd = pending_find(opcode, hdev);
8434 cmd->cmd_complete(cmd, mgmt_status(status));
8435 mgmt_pending_remove(cmd);
8440 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8441 u8 link_type, u8 addr_type, u8 status)
8443 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8444 status, MGMT_OP_USER_CONFIRM_REPLY);
8447 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8448 u8 link_type, u8 addr_type, u8 status)
8450 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8452 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8455 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8456 u8 link_type, u8 addr_type, u8 status)
8458 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8459 status, MGMT_OP_USER_PASSKEY_REPLY);
8462 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8463 u8 link_type, u8 addr_type, u8 status)
8465 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8467 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8470 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8471 u8 link_type, u8 addr_type, u32 passkey,
8474 struct mgmt_ev_passkey_notify ev;
8476 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8478 bacpy(&ev.addr.bdaddr, bdaddr);
8479 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8480 ev.passkey = __cpu_to_le32(passkey);
8481 ev.entered = entered;
8483 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8486 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8488 struct mgmt_ev_auth_failed ev;
8489 struct mgmt_pending_cmd *cmd;
8490 u8 status = mgmt_status(hci_status);
8492 bacpy(&ev.addr.bdaddr, &conn->dst);
8493 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8496 cmd = find_pairing(conn);
8498 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8499 cmd ? cmd->sk : NULL);
8502 cmd->cmd_complete(cmd, status);
8503 mgmt_pending_remove(cmd);
8507 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8509 struct cmd_lookup match = { NULL, hdev };
8513 u8 mgmt_err = mgmt_status(status);
8514 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8515 cmd_status_rsp, &mgmt_err);
8519 if (test_bit(HCI_AUTH, &hdev->flags))
8520 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8522 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8524 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8528 new_settings(hdev, match.sk);
8534 static void clear_eir(struct hci_request *req)
8536 struct hci_dev *hdev = req->hdev;
8537 struct hci_cp_write_eir cp;
8539 if (!lmp_ext_inq_capable(hdev))
8542 memset(hdev->eir, 0, sizeof(hdev->eir));
8544 memset(&cp, 0, sizeof(cp));
8546 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8549 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8551 struct cmd_lookup match = { NULL, hdev };
8552 struct hci_request req;
8553 bool changed = false;
8556 u8 mgmt_err = mgmt_status(status);
8558 if (enable && hci_dev_test_and_clear_flag(hdev,
8560 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8561 new_settings(hdev, NULL);
8564 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8570 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8572 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8574 changed = hci_dev_test_and_clear_flag(hdev,
8577 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8580 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8583 new_settings(hdev, match.sk);
8588 hci_req_init(&req, hdev);
8590 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8591 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8592 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8593 sizeof(enable), &enable);
8594 __hci_req_update_eir(&req);
8599 hci_req_run(&req, NULL);
8602 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8604 struct cmd_lookup *match = data;
8606 if (match->sk == NULL) {
8607 match->sk = cmd->sk;
8608 sock_hold(match->sk);
8612 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8615 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8617 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8618 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8619 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8622 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8623 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8624 ext_info_changed(hdev, NULL);
8631 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8633 struct mgmt_cp_set_local_name ev;
8634 struct mgmt_pending_cmd *cmd;
8639 memset(&ev, 0, sizeof(ev));
8640 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8641 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8643 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8645 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8647 /* If this is a HCI command related to powering on the
8648 * HCI dev don't send any mgmt signals.
8650 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8654 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8655 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8656 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8659 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8663 for (i = 0; i < uuid_count; i++) {
8664 if (!memcmp(uuid, uuids[i], 16))
8671 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8675 while (parsed < eir_len) {
8676 u8 field_len = eir[0];
8683 if (eir_len - parsed < field_len + 1)
8687 case EIR_UUID16_ALL:
8688 case EIR_UUID16_SOME:
8689 for (i = 0; i + 3 <= field_len; i += 2) {
8690 memcpy(uuid, bluetooth_base_uuid, 16);
8691 uuid[13] = eir[i + 3];
8692 uuid[12] = eir[i + 2];
8693 if (has_uuid(uuid, uuid_count, uuids))
8697 case EIR_UUID32_ALL:
8698 case EIR_UUID32_SOME:
8699 for (i = 0; i + 5 <= field_len; i += 4) {
8700 memcpy(uuid, bluetooth_base_uuid, 16);
8701 uuid[15] = eir[i + 5];
8702 uuid[14] = eir[i + 4];
8703 uuid[13] = eir[i + 3];
8704 uuid[12] = eir[i + 2];
8705 if (has_uuid(uuid, uuid_count, uuids))
8709 case EIR_UUID128_ALL:
8710 case EIR_UUID128_SOME:
8711 for (i = 0; i + 17 <= field_len; i += 16) {
8712 memcpy(uuid, eir + i + 2, 16);
8713 if (has_uuid(uuid, uuid_count, uuids))
8719 parsed += field_len + 1;
8720 eir += field_len + 1;
8726 static void restart_le_scan(struct hci_dev *hdev)
8728 /* If controller is not scanning we are done. */
8729 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8732 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8733 hdev->discovery.scan_start +
8734 hdev->discovery.scan_duration))
8737 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8738 DISCOV_LE_RESTART_DELAY);
8741 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8742 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8744 /* If a RSSI threshold has been specified, and
8745 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8746 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8747 * is set, let it through for further processing, as we might need to
8750 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8751 * the results are also dropped.
8753 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8754 (rssi == HCI_RSSI_INVALID ||
8755 (rssi < hdev->discovery.rssi &&
8756 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8759 if (hdev->discovery.uuid_count != 0) {
8760 /* If a list of UUIDs is provided in filter, results with no
8761 * matching UUID should be dropped.
8763 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8764 hdev->discovery.uuids) &&
8765 !eir_has_uuids(scan_rsp, scan_rsp_len,
8766 hdev->discovery.uuid_count,
8767 hdev->discovery.uuids))
8771 /* If duplicate filtering does not report RSSI changes, then restart
8772 * scanning to ensure updated result with updated RSSI values.
8774 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8775 restart_le_scan(hdev);
8777 /* Validate RSSI value against the RSSI threshold once more. */
8778 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8779 rssi < hdev->discovery.rssi)
8786 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8787 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8788 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8791 struct mgmt_ev_device_found *ev = (void *)buf;
8794 /* Don't send events for a non-kernel initiated discovery. With
8795 * LE one exception is if we have pend_le_reports > 0 in which
8796 * case we're doing passive scanning and want these events.
8798 if (!hci_discovery_active(hdev)) {
8799 if (link_type == ACL_LINK)
8801 if (link_type == LE_LINK &&
8802 list_empty(&hdev->pend_le_reports) &&
8803 !hci_is_adv_monitoring(hdev)) {
8808 if (hdev->discovery.result_filtering) {
8809 /* We are using service discovery */
8810 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8815 if (hdev->discovery.limited) {
8816 /* Check for limited discoverable bit */
8818 if (!(dev_class[1] & 0x20))
8821 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8822 if (!flags || !(flags[0] & LE_AD_LIMITED))
8827 /* Make sure that the buffer is big enough. The 5 extra bytes
8828 * are for the potential CoD field.
8830 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8833 memset(buf, 0, sizeof(buf));
8835 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8836 * RSSI value was reported as 0 when not available. This behavior
8837 * is kept when using device discovery. This is required for full
8838 * backwards compatibility with the API.
8840 * However when using service discovery, the value 127 will be
8841 * returned when the RSSI is not available.
8843 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8844 link_type == ACL_LINK)
8847 bacpy(&ev->addr.bdaddr, bdaddr);
8848 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8850 ev->flags = cpu_to_le32(flags);
8853 /* Copy EIR or advertising data into event */
8854 memcpy(ev->eir, eir, eir_len);
8856 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8858 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8861 if (scan_rsp_len > 0)
8862 /* Append scan response data to event */
8863 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8865 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8866 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8868 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8871 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8872 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8874 struct mgmt_ev_device_found *ev;
8875 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8878 ev = (struct mgmt_ev_device_found *) buf;
8880 memset(buf, 0, sizeof(buf));
8882 bacpy(&ev->addr.bdaddr, bdaddr);
8883 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8886 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8889 ev->eir_len = cpu_to_le16(eir_len);
8891 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8894 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8896 struct mgmt_ev_discovering ev;
8898 bt_dev_dbg(hdev, "discovering %u", discovering);
8900 memset(&ev, 0, sizeof(ev));
8901 ev.type = hdev->discovery.type;
8902 ev.discovering = discovering;
8904 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8907 void mgmt_suspending(struct hci_dev *hdev, u8 state)
8909 struct mgmt_ev_controller_suspend ev;
8911 ev.suspend_state = state;
8912 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
8915 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
8918 struct mgmt_ev_controller_resume ev;
8920 ev.wake_reason = reason;
8922 bacpy(&ev.addr.bdaddr, bdaddr);
8923 ev.addr.type = addr_type;
8925 memset(&ev.addr, 0, sizeof(ev.addr));
8928 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
8931 static struct hci_mgmt_chan chan = {
8932 .channel = HCI_CHANNEL_CONTROL,
8933 .handler_count = ARRAY_SIZE(mgmt_handlers),
8934 .handlers = mgmt_handlers,
8935 .hdev_init = mgmt_init_hdev,
8940 return hci_mgmt_chan_register(&chan);
8943 void mgmt_exit(void)
8945 hci_mgmt_chan_unregister(&chan);