2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 18
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_LINK_SECURITY,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_SECURITY_INFO,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
127 static const u16 mgmt_events[] = {
128 MGMT_EV_CONTROLLER_ERROR,
130 MGMT_EV_INDEX_REMOVED,
131 MGMT_EV_NEW_SETTINGS,
132 MGMT_EV_CLASS_OF_DEV_CHANGED,
133 MGMT_EV_LOCAL_NAME_CHANGED,
134 MGMT_EV_NEW_LINK_KEY,
135 MGMT_EV_NEW_LONG_TERM_KEY,
136 MGMT_EV_DEVICE_CONNECTED,
137 MGMT_EV_DEVICE_DISCONNECTED,
138 MGMT_EV_CONNECT_FAILED,
139 MGMT_EV_PIN_CODE_REQUEST,
140 MGMT_EV_USER_CONFIRM_REQUEST,
141 MGMT_EV_USER_PASSKEY_REQUEST,
143 MGMT_EV_DEVICE_FOUND,
145 MGMT_EV_DEVICE_BLOCKED,
146 MGMT_EV_DEVICE_UNBLOCKED,
147 MGMT_EV_DEVICE_UNPAIRED,
148 MGMT_EV_PASSKEY_NOTIFY,
151 MGMT_EV_DEVICE_ADDED,
152 MGMT_EV_DEVICE_REMOVED,
153 MGMT_EV_NEW_CONN_PARAM,
154 MGMT_EV_UNCONF_INDEX_ADDED,
155 MGMT_EV_UNCONF_INDEX_REMOVED,
156 MGMT_EV_NEW_CONFIG_OPTIONS,
157 MGMT_EV_EXT_INDEX_ADDED,
158 MGMT_EV_EXT_INDEX_REMOVED,
159 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 MGMT_EV_ADVERTISING_ADDED,
161 MGMT_EV_ADVERTISING_REMOVED,
162 MGMT_EV_EXT_INFO_CHANGED,
163 MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 MGMT_EV_EXP_FEATURE_CHANGED,
165 MGMT_EV_DEVICE_FLAGS_CHANGED,
166 MGMT_EV_CONTROLLER_SUSPEND,
167 MGMT_EV_CONTROLLER_RESUME,
170 static const u16 mgmt_untrusted_commands[] = {
171 MGMT_OP_READ_INDEX_LIST,
173 MGMT_OP_READ_UNCONF_INDEX_LIST,
174 MGMT_OP_READ_CONFIG_INFO,
175 MGMT_OP_READ_EXT_INDEX_LIST,
176 MGMT_OP_READ_EXT_INFO,
177 MGMT_OP_READ_SECURITY_INFO,
178 MGMT_OP_READ_EXP_FEATURES_INFO,
179 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
180 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
183 static const u16 mgmt_untrusted_events[] = {
185 MGMT_EV_INDEX_REMOVED,
186 MGMT_EV_NEW_SETTINGS,
187 MGMT_EV_CLASS_OF_DEV_CHANGED,
188 MGMT_EV_LOCAL_NAME_CHANGED,
189 MGMT_EV_UNCONF_INDEX_ADDED,
190 MGMT_EV_UNCONF_INDEX_REMOVED,
191 MGMT_EV_NEW_CONFIG_OPTIONS,
192 MGMT_EV_EXT_INDEX_ADDED,
193 MGMT_EV_EXT_INDEX_REMOVED,
194 MGMT_EV_EXT_INFO_CHANGED,
195 MGMT_EV_EXP_FEATURE_CHANGED,
196 MGMT_EV_ADV_MONITOR_ADDED,
197 MGMT_EV_ADV_MONITOR_REMOVED,
200 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
202 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
203 "\x00\x00\x00\x00\x00\x00\x00\x00"
205 /* HCI to MGMT error code conversion table */
206 static const u8 mgmt_status_table[] = {
208 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
209 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
210 MGMT_STATUS_FAILED, /* Hardware Failure */
211 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
212 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
213 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
214 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
215 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
216 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
217 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
218 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
219 MGMT_STATUS_BUSY, /* Command Disallowed */
220 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
221 MGMT_STATUS_REJECTED, /* Rejected Security */
222 MGMT_STATUS_REJECTED, /* Rejected Personal */
223 MGMT_STATUS_TIMEOUT, /* Host Timeout */
224 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
225 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
226 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
227 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
228 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
229 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
230 MGMT_STATUS_BUSY, /* Repeated Attempts */
231 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
232 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
234 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
235 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
236 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
238 MGMT_STATUS_FAILED, /* Unspecified Error */
239 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
240 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
241 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
242 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
243 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
244 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
245 MGMT_STATUS_FAILED, /* Unit Link Key Used */
246 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
247 MGMT_STATUS_TIMEOUT, /* Instant Passed */
248 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
249 MGMT_STATUS_FAILED, /* Transaction Collision */
250 MGMT_STATUS_FAILED, /* Reserved for future use */
251 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
252 MGMT_STATUS_REJECTED, /* QoS Rejected */
253 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
254 MGMT_STATUS_REJECTED, /* Insufficient Security */
255 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
256 MGMT_STATUS_FAILED, /* Reserved for future use */
257 MGMT_STATUS_BUSY, /* Role Switch Pending */
258 MGMT_STATUS_FAILED, /* Reserved for future use */
259 MGMT_STATUS_FAILED, /* Slot Violation */
260 MGMT_STATUS_FAILED, /* Role Switch Failed */
261 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
262 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
263 MGMT_STATUS_BUSY, /* Host Busy Pairing */
264 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
265 MGMT_STATUS_BUSY, /* Controller Busy */
266 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
267 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
268 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
269 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
270 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
273 static u8 mgmt_status(u8 hci_status)
275 if (hci_status < ARRAY_SIZE(mgmt_status_table))
276 return mgmt_status_table[hci_status];
278 return MGMT_STATUS_FAILED;
281 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
284 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
288 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
289 u16 len, int flag, struct sock *skip_sk)
291 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
295 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
296 struct sock *skip_sk)
298 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
299 HCI_SOCK_TRUSTED, skip_sk);
302 static u8 le_addr_type(u8 mgmt_addr_type)
304 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
305 return ADDR_LE_DEV_PUBLIC;
307 return ADDR_LE_DEV_RANDOM;
310 void mgmt_fill_version_info(void *ver)
312 struct mgmt_rp_read_version *rp = ver;
314 rp->version = MGMT_VERSION;
315 rp->revision = cpu_to_le16(MGMT_REVISION);
318 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
321 struct mgmt_rp_read_version rp;
323 bt_dev_dbg(hdev, "sock %p", sk);
325 mgmt_fill_version_info(&rp);
327 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
331 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
334 struct mgmt_rp_read_commands *rp;
335 u16 num_commands, num_events;
339 bt_dev_dbg(hdev, "sock %p", sk);
341 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
342 num_commands = ARRAY_SIZE(mgmt_commands);
343 num_events = ARRAY_SIZE(mgmt_events);
345 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
346 num_events = ARRAY_SIZE(mgmt_untrusted_events);
349 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
351 rp = kmalloc(rp_size, GFP_KERNEL);
355 rp->num_commands = cpu_to_le16(num_commands);
356 rp->num_events = cpu_to_le16(num_events);
358 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
359 __le16 *opcode = rp->opcodes;
361 for (i = 0; i < num_commands; i++, opcode++)
362 put_unaligned_le16(mgmt_commands[i], opcode);
364 for (i = 0; i < num_events; i++, opcode++)
365 put_unaligned_le16(mgmt_events[i], opcode);
367 __le16 *opcode = rp->opcodes;
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
376 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
383 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
386 struct mgmt_rp_read_index_list *rp;
392 bt_dev_dbg(hdev, "sock %p", sk);
394 read_lock(&hci_dev_list_lock);
397 list_for_each_entry(d, &hci_dev_list, list) {
398 if (d->dev_type == HCI_PRIMARY &&
399 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
403 rp_len = sizeof(*rp) + (2 * count);
404 rp = kmalloc(rp_len, GFP_ATOMIC);
406 read_unlock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (hci_dev_test_flag(d, HCI_SETUP) ||
413 hci_dev_test_flag(d, HCI_CONFIG) ||
414 hci_dev_test_flag(d, HCI_USER_CHANNEL))
417 /* Devices marked as raw-only are neither configured
418 * nor unconfigured controllers.
420 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
423 if (d->dev_type == HCI_PRIMARY &&
424 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
425 rp->index[count++] = cpu_to_le16(d->id);
426 bt_dev_dbg(hdev, "Added hci%u", d->id);
430 rp->num_controllers = cpu_to_le16(count);
431 rp_len = sizeof(*rp) + (2 * count);
433 read_unlock(&hci_dev_list_lock);
435 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
443 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
444 void *data, u16 data_len)
446 struct mgmt_rp_read_unconf_index_list *rp;
452 bt_dev_dbg(hdev, "sock %p", sk);
454 read_lock(&hci_dev_list_lock);
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (d->dev_type == HCI_PRIMARY &&
459 hci_dev_test_flag(d, HCI_UNCONFIGURED))
463 rp_len = sizeof(*rp) + (2 * count);
464 rp = kmalloc(rp_len, GFP_ATOMIC);
466 read_unlock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (hci_dev_test_flag(d, HCI_SETUP) ||
473 hci_dev_test_flag(d, HCI_CONFIG) ||
474 hci_dev_test_flag(d, HCI_USER_CHANNEL))
477 /* Devices marked as raw-only are neither configured
478 * nor unconfigured controllers.
480 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
483 if (d->dev_type == HCI_PRIMARY &&
484 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
485 rp->index[count++] = cpu_to_le16(d->id);
486 bt_dev_dbg(hdev, "Added hci%u", d->id);
490 rp->num_controllers = cpu_to_le16(count);
491 rp_len = sizeof(*rp) + (2 * count);
493 read_unlock(&hci_dev_list_lock);
495 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
496 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
503 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
504 void *data, u16 data_len)
506 struct mgmt_rp_read_ext_index_list *rp;
511 bt_dev_dbg(hdev, "sock %p", sk);
513 read_lock(&hci_dev_list_lock);
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
521 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
523 read_unlock(&hci_dev_list_lock);
528 list_for_each_entry(d, &hci_dev_list, list) {
529 if (hci_dev_test_flag(d, HCI_SETUP) ||
530 hci_dev_test_flag(d, HCI_CONFIG) ||
531 hci_dev_test_flag(d, HCI_USER_CHANNEL))
534 /* Devices marked as raw-only are neither configured
535 * nor unconfigured controllers.
537 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
540 if (d->dev_type == HCI_PRIMARY) {
541 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
542 rp->entry[count].type = 0x01;
544 rp->entry[count].type = 0x00;
545 } else if (d->dev_type == HCI_AMP) {
546 rp->entry[count].type = 0x02;
551 rp->entry[count].bus = d->bus;
552 rp->entry[count++].index = cpu_to_le16(d->id);
553 bt_dev_dbg(hdev, "Added hci%u", d->id);
556 rp->num_controllers = cpu_to_le16(count);
558 read_unlock(&hci_dev_list_lock);
560 /* If this command is called at least once, then all the
561 * default index and unconfigured index events are disabled
562 * and from now on only extended index events are used.
564 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
565 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
566 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
568 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
569 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
570 struct_size(rp, entry, count));
577 static bool is_configured(struct hci_dev *hdev)
579 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
580 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
583 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
584 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
585 !bacmp(&hdev->public_addr, BDADDR_ANY))
591 static __le32 get_missing_options(struct hci_dev *hdev)
595 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
596 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
597 options |= MGMT_OPTION_EXTERNAL_CONFIG;
599 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
600 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
601 !bacmp(&hdev->public_addr, BDADDR_ANY))
602 options |= MGMT_OPTION_PUBLIC_ADDRESS;
604 return cpu_to_le32(options);
607 static int new_options(struct hci_dev *hdev, struct sock *skip)
609 __le32 options = get_missing_options(hdev);
611 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
612 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
615 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
617 __le32 options = get_missing_options(hdev);
619 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
623 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
624 void *data, u16 data_len)
626 struct mgmt_rp_read_config_info rp;
629 bt_dev_dbg(hdev, "sock %p", sk);
633 memset(&rp, 0, sizeof(rp));
634 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
636 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
637 options |= MGMT_OPTION_EXTERNAL_CONFIG;
639 if (hdev->set_bdaddr)
640 options |= MGMT_OPTION_PUBLIC_ADDRESS;
642 rp.supported_options = cpu_to_le32(options);
643 rp.missing_options = get_missing_options(hdev);
645 hci_dev_unlock(hdev);
647 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
651 static u32 get_supported_phys(struct hci_dev *hdev)
653 u32 supported_phys = 0;
655 if (lmp_bredr_capable(hdev)) {
656 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
658 if (hdev->features[0][0] & LMP_3SLOT)
659 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
661 if (hdev->features[0][0] & LMP_5SLOT)
662 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
664 if (lmp_edr_2m_capable(hdev)) {
665 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
667 if (lmp_edr_3slot_capable(hdev))
668 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
670 if (lmp_edr_5slot_capable(hdev))
671 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
673 if (lmp_edr_3m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
685 if (lmp_le_capable(hdev)) {
686 supported_phys |= MGMT_PHY_LE_1M_TX;
687 supported_phys |= MGMT_PHY_LE_1M_RX;
689 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
690 supported_phys |= MGMT_PHY_LE_2M_TX;
691 supported_phys |= MGMT_PHY_LE_2M_RX;
694 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
695 supported_phys |= MGMT_PHY_LE_CODED_TX;
696 supported_phys |= MGMT_PHY_LE_CODED_RX;
700 return supported_phys;
703 static u32 get_selected_phys(struct hci_dev *hdev)
705 u32 selected_phys = 0;
707 if (lmp_bredr_capable(hdev)) {
708 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
710 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
711 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
713 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
714 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
716 if (lmp_edr_2m_capable(hdev)) {
717 if (!(hdev->pkt_type & HCI_2DH1))
718 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
720 if (lmp_edr_3slot_capable(hdev) &&
721 !(hdev->pkt_type & HCI_2DH3))
722 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
724 if (lmp_edr_5slot_capable(hdev) &&
725 !(hdev->pkt_type & HCI_2DH5))
726 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
728 if (lmp_edr_3m_capable(hdev)) {
729 if (!(hdev->pkt_type & HCI_3DH1))
730 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
732 if (lmp_edr_3slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_3DH3))
734 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
736 if (lmp_edr_5slot_capable(hdev) &&
737 !(hdev->pkt_type & HCI_3DH5))
738 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
743 if (lmp_le_capable(hdev)) {
744 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
745 selected_phys |= MGMT_PHY_LE_1M_TX;
747 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
748 selected_phys |= MGMT_PHY_LE_1M_RX;
750 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
751 selected_phys |= MGMT_PHY_LE_2M_TX;
753 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
754 selected_phys |= MGMT_PHY_LE_2M_RX;
756 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
757 selected_phys |= MGMT_PHY_LE_CODED_TX;
759 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
760 selected_phys |= MGMT_PHY_LE_CODED_RX;
763 return selected_phys;
766 static u32 get_configurable_phys(struct hci_dev *hdev)
768 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
769 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
772 static u32 get_supported_settings(struct hci_dev *hdev)
776 settings |= MGMT_SETTING_POWERED;
777 settings |= MGMT_SETTING_BONDABLE;
778 settings |= MGMT_SETTING_DEBUG_KEYS;
779 settings |= MGMT_SETTING_CONNECTABLE;
780 settings |= MGMT_SETTING_DISCOVERABLE;
782 if (lmp_bredr_capable(hdev)) {
783 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
784 settings |= MGMT_SETTING_FAST_CONNECTABLE;
785 settings |= MGMT_SETTING_BREDR;
786 settings |= MGMT_SETTING_LINK_SECURITY;
788 if (lmp_ssp_capable(hdev)) {
789 settings |= MGMT_SETTING_SSP;
790 if (IS_ENABLED(CONFIG_BT_HS))
791 settings |= MGMT_SETTING_HS;
794 if (lmp_sc_capable(hdev))
795 settings |= MGMT_SETTING_SECURE_CONN;
797 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
799 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
802 if (lmp_le_capable(hdev)) {
803 settings |= MGMT_SETTING_LE;
804 settings |= MGMT_SETTING_SECURE_CONN;
805 settings |= MGMT_SETTING_PRIVACY;
806 settings |= MGMT_SETTING_STATIC_ADDRESS;
808 /* When the experimental feature for LL Privacy support is
809 * enabled, then advertising is no longer supported.
811 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
812 settings |= MGMT_SETTING_ADVERTISING;
815 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
817 settings |= MGMT_SETTING_CONFIGURATION;
819 settings |= MGMT_SETTING_PHY_CONFIGURATION;
824 static u32 get_current_settings(struct hci_dev *hdev)
828 if (hdev_is_powered(hdev))
829 settings |= MGMT_SETTING_POWERED;
831 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
832 settings |= MGMT_SETTING_CONNECTABLE;
834 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
835 settings |= MGMT_SETTING_FAST_CONNECTABLE;
837 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
838 settings |= MGMT_SETTING_DISCOVERABLE;
840 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
841 settings |= MGMT_SETTING_BONDABLE;
843 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
844 settings |= MGMT_SETTING_BREDR;
846 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
847 settings |= MGMT_SETTING_LE;
849 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
850 settings |= MGMT_SETTING_LINK_SECURITY;
852 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
853 settings |= MGMT_SETTING_SSP;
855 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
856 settings |= MGMT_SETTING_HS;
858 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
859 settings |= MGMT_SETTING_ADVERTISING;
861 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
862 settings |= MGMT_SETTING_SECURE_CONN;
864 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
865 settings |= MGMT_SETTING_DEBUG_KEYS;
867 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
868 settings |= MGMT_SETTING_PRIVACY;
870 /* The current setting for static address has two purposes. The
871 * first is to indicate if the static address will be used and
872 * the second is to indicate if it is actually set.
874 * This means if the static address is not configured, this flag
875 * will never be set. If the address is configured, then if the
876 * address is actually used decides if the flag is set or not.
878 * For single mode LE only controllers and dual-mode controllers
879 * with BR/EDR disabled, the existence of the static address will
882 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
883 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
884 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
885 if (bacmp(&hdev->static_addr, BDADDR_ANY))
886 settings |= MGMT_SETTING_STATIC_ADDRESS;
889 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
890 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
895 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
897 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
900 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
901 struct hci_dev *hdev,
904 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
907 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
909 struct mgmt_pending_cmd *cmd;
911 /* If there's a pending mgmt command the flags will not yet have
912 * their final values, so check for this first.
914 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
916 struct mgmt_mode *cp = cmd->param;
918 return LE_AD_GENERAL;
919 else if (cp->val == 0x02)
920 return LE_AD_LIMITED;
922 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
923 return LE_AD_LIMITED;
924 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
925 return LE_AD_GENERAL;
931 bool mgmt_get_connectable(struct hci_dev *hdev)
933 struct mgmt_pending_cmd *cmd;
935 /* If there's a pending mgmt command the flag will not yet have
936 * it's final value, so check for this first.
938 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
940 struct mgmt_mode *cp = cmd->param;
945 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
948 static void service_cache_off(struct work_struct *work)
950 struct hci_dev *hdev = container_of(work, struct hci_dev,
952 struct hci_request req;
954 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
957 hci_req_init(&req, hdev);
961 __hci_req_update_eir(&req);
962 __hci_req_update_class(&req);
964 hci_dev_unlock(hdev);
966 hci_req_run(&req, NULL);
969 static void rpa_expired(struct work_struct *work)
971 struct hci_dev *hdev = container_of(work, struct hci_dev,
973 struct hci_request req;
975 bt_dev_dbg(hdev, "");
977 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
979 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
982 /* The generation of a new RPA and programming it into the
983 * controller happens in the hci_req_enable_advertising()
986 hci_req_init(&req, hdev);
987 if (ext_adv_capable(hdev))
988 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
990 __hci_req_enable_advertising(&req);
991 hci_req_run(&req, NULL);
994 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
996 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
999 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1000 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1002 /* Non-mgmt controlled devices get this bit set
1003 * implicitly so that pairing works for them, however
1004 * for mgmt we require user-space to explicitly enable
1007 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1010 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1011 void *data, u16 data_len)
1013 struct mgmt_rp_read_info rp;
1015 bt_dev_dbg(hdev, "sock %p", sk);
1019 memset(&rp, 0, sizeof(rp));
1021 bacpy(&rp.bdaddr, &hdev->bdaddr);
1023 rp.version = hdev->hci_ver;
1024 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1026 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1027 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1029 memcpy(rp.dev_class, hdev->dev_class, 3);
1031 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1032 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1034 hci_dev_unlock(hdev);
1036 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1040 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1045 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1046 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1047 hdev->dev_class, 3);
1049 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1050 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1053 name_len = strlen(hdev->dev_name);
1054 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1055 hdev->dev_name, name_len);
1057 name_len = strlen(hdev->short_name);
1058 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1059 hdev->short_name, name_len);
1064 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1065 void *data, u16 data_len)
1068 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1071 bt_dev_dbg(hdev, "sock %p", sk);
1073 memset(&buf, 0, sizeof(buf));
1077 bacpy(&rp->bdaddr, &hdev->bdaddr);
1079 rp->version = hdev->hci_ver;
1080 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1082 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1083 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1086 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1087 rp->eir_len = cpu_to_le16(eir_len);
1089 hci_dev_unlock(hdev);
1091 /* If this command is called at least once, then the events
1092 * for class of device and local name changes are disabled
1093 * and only the new extended controller information event
1096 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1097 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1098 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1100 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1101 sizeof(*rp) + eir_len);
1104 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1107 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1110 memset(buf, 0, sizeof(buf));
1112 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1113 ev->eir_len = cpu_to_le16(eir_len);
1115 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1116 sizeof(*ev) + eir_len,
1117 HCI_MGMT_EXT_INFO_EVENTS, skip);
1120 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1122 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1124 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1128 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1130 bt_dev_dbg(hdev, "status 0x%02x", status);
1132 if (hci_conn_count(hdev) == 0) {
1133 cancel_delayed_work(&hdev->power_off);
1134 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1138 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1140 struct mgmt_ev_advertising_added ev;
1142 ev.instance = instance;
1144 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1147 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1150 struct mgmt_ev_advertising_removed ev;
1152 ev.instance = instance;
1154 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1157 static void cancel_adv_timeout(struct hci_dev *hdev)
1159 if (hdev->adv_instance_timeout) {
1160 hdev->adv_instance_timeout = 0;
1161 cancel_delayed_work(&hdev->adv_instance_expire);
1165 static int clean_up_hci_state(struct hci_dev *hdev)
1167 struct hci_request req;
1168 struct hci_conn *conn;
1169 bool discov_stopped;
1172 hci_req_init(&req, hdev);
1174 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1175 test_bit(HCI_PSCAN, &hdev->flags)) {
1177 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1180 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1182 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1183 __hci_req_disable_advertising(&req);
1185 discov_stopped = hci_req_stop_discovery(&req);
1187 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1188 /* 0x15 == Terminated due to Power Off */
1189 __hci_abort_conn(&req, conn, 0x15);
1192 err = hci_req_run(&req, clean_up_hci_complete);
1193 if (!err && discov_stopped)
1194 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1199 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1202 struct mgmt_mode *cp = data;
1203 struct mgmt_pending_cmd *cmd;
1206 bt_dev_dbg(hdev, "sock %p", sk);
1208 if (cp->val != 0x00 && cp->val != 0x01)
1209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210 MGMT_STATUS_INVALID_PARAMS);
1214 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1220 if (!!cp->val == hdev_is_powered(hdev)) {
1221 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1225 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1232 queue_work(hdev->req_workqueue, &hdev->power_on);
1235 /* Disconnect connections, stop scans, etc */
1236 err = clean_up_hci_state(hdev);
1238 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1239 HCI_POWER_OFF_TIMEOUT);
1241 /* ENODATA means there were no HCI commands queued */
1242 if (err == -ENODATA) {
1243 cancel_delayed_work(&hdev->power_off);
1244 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1250 hci_dev_unlock(hdev);
1254 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1256 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1258 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1259 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1262 int mgmt_new_settings(struct hci_dev *hdev)
1264 return new_settings(hdev, NULL);
1269 struct hci_dev *hdev;
1273 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1275 struct cmd_lookup *match = data;
1277 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1279 list_del(&cmd->list);
1281 if (match->sk == NULL) {
1282 match->sk = cmd->sk;
1283 sock_hold(match->sk);
1286 mgmt_pending_free(cmd);
1289 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1293 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1294 mgmt_pending_remove(cmd);
1297 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1299 if (cmd->cmd_complete) {
1302 cmd->cmd_complete(cmd, *status);
1303 mgmt_pending_remove(cmd);
1308 cmd_status_rsp(cmd, data);
1311 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1313 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1314 cmd->param, cmd->param_len);
1317 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1319 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1320 cmd->param, sizeof(struct mgmt_addr_info));
1323 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1325 if (!lmp_bredr_capable(hdev))
1326 return MGMT_STATUS_NOT_SUPPORTED;
1327 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1328 return MGMT_STATUS_REJECTED;
1330 return MGMT_STATUS_SUCCESS;
1333 static u8 mgmt_le_support(struct hci_dev *hdev)
1335 if (!lmp_le_capable(hdev))
1336 return MGMT_STATUS_NOT_SUPPORTED;
1337 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1338 return MGMT_STATUS_REJECTED;
1340 return MGMT_STATUS_SUCCESS;
1343 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1345 struct mgmt_pending_cmd *cmd;
1347 bt_dev_dbg(hdev, "status 0x%02x", status);
1351 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1356 u8 mgmt_err = mgmt_status(status);
1357 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1358 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1362 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1363 hdev->discov_timeout > 0) {
1364 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1365 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1368 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1369 new_settings(hdev, cmd->sk);
1372 mgmt_pending_remove(cmd);
1375 hci_dev_unlock(hdev);
1378 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1381 struct mgmt_cp_set_discoverable *cp = data;
1382 struct mgmt_pending_cmd *cmd;
1386 bt_dev_dbg(hdev, "sock %p", sk);
1388 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1389 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 MGMT_STATUS_REJECTED);
1393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1395 MGMT_STATUS_INVALID_PARAMS);
1397 timeout = __le16_to_cpu(cp->timeout);
1399 /* Disabling discoverable requires that no timeout is set,
1400 * and enabling limited discoverable requires a timeout.
1402 if ((cp->val == 0x00 && timeout > 0) ||
1403 (cp->val == 0x02 && timeout == 0))
1404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 MGMT_STATUS_INVALID_PARAMS);
1409 if (!hdev_is_powered(hdev) && timeout > 0) {
1410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 MGMT_STATUS_NOT_POWERED);
1415 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1416 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1422 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 MGMT_STATUS_REJECTED);
1428 if (hdev->advertising_paused) {
1429 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1434 if (!hdev_is_powered(hdev)) {
1435 bool changed = false;
1437 /* Setting limited discoverable when powered off is
1438 * not a valid operation since it requires a timeout
1439 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1441 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1442 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1446 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1451 err = new_settings(hdev, sk);
1456 /* If the current mode is the same, then just update the timeout
1457 * value with the new value. And if only the timeout gets updated,
1458 * then no need for any HCI transactions.
1460 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1461 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1462 HCI_LIMITED_DISCOVERABLE)) {
1463 cancel_delayed_work(&hdev->discov_off);
1464 hdev->discov_timeout = timeout;
1466 if (cp->val && hdev->discov_timeout > 0) {
1467 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1468 queue_delayed_work(hdev->req_workqueue,
1469 &hdev->discov_off, to);
1472 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1476 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1482 /* Cancel any potential discoverable timeout that might be
1483 * still active and store new timeout value. The arming of
1484 * the timeout happens in the complete handler.
1486 cancel_delayed_work(&hdev->discov_off);
1487 hdev->discov_timeout = timeout;
1490 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1492 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1494 /* Limited discoverable mode */
1495 if (cp->val == 0x02)
1496 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1498 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1500 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1504 hci_dev_unlock(hdev);
1508 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1510 struct mgmt_pending_cmd *cmd;
1512 bt_dev_dbg(hdev, "status 0x%02x", status);
1516 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1521 u8 mgmt_err = mgmt_status(status);
1522 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1526 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1527 new_settings(hdev, cmd->sk);
1530 mgmt_pending_remove(cmd);
1533 hci_dev_unlock(hdev);
1536 static int set_connectable_update_settings(struct hci_dev *hdev,
1537 struct sock *sk, u8 val)
1539 bool changed = false;
1542 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1546 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1548 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1549 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1552 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1557 hci_req_update_scan(hdev);
1558 hci_update_background_scan(hdev);
1559 return new_settings(hdev, sk);
1565 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1568 struct mgmt_mode *cp = data;
1569 struct mgmt_pending_cmd *cmd;
1572 bt_dev_dbg(hdev, "sock %p", sk);
1574 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1575 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1577 MGMT_STATUS_REJECTED);
1579 if (cp->val != 0x00 && cp->val != 0x01)
1580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1585 if (!hdev_is_powered(hdev)) {
1586 err = set_connectable_update_settings(hdev, sk, cp->val);
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1597 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1604 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1606 if (hdev->discov_timeout > 0)
1607 cancel_delayed_work(&hdev->discov_off);
1609 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1610 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1611 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1614 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1618 hci_dev_unlock(hdev);
1622 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1625 struct mgmt_mode *cp = data;
1629 bt_dev_dbg(hdev, "sock %p", sk);
1631 if (cp->val != 0x00 && cp->val != 0x01)
1632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1633 MGMT_STATUS_INVALID_PARAMS);
1638 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1640 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1642 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1647 /* In limited privacy mode the change of bondable mode
1648 * may affect the local advertising address.
1650 if (hdev_is_powered(hdev) &&
1651 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1652 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1653 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1654 queue_work(hdev->req_workqueue,
1655 &hdev->discoverable_update);
1657 err = new_settings(hdev, sk);
1661 hci_dev_unlock(hdev);
1665 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1668 struct mgmt_mode *cp = data;
1669 struct mgmt_pending_cmd *cmd;
1673 bt_dev_dbg(hdev, "sock %p", sk);
1675 status = mgmt_bredr_support(hdev);
1677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1680 if (cp->val != 0x00 && cp->val != 0x01)
1681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1682 MGMT_STATUS_INVALID_PARAMS);
1686 if (!hdev_is_powered(hdev)) {
1687 bool changed = false;
1689 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1690 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1694 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1699 err = new_settings(hdev, sk);
1704 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1712 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1713 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1717 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1723 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1725 mgmt_pending_remove(cmd);
1730 hci_dev_unlock(hdev);
1734 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1736 struct mgmt_mode *cp = data;
1737 struct mgmt_pending_cmd *cmd;
1741 bt_dev_dbg(hdev, "sock %p", sk);
1743 status = mgmt_bredr_support(hdev);
1745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1747 if (!lmp_ssp_capable(hdev))
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1749 MGMT_STATUS_NOT_SUPPORTED);
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1753 MGMT_STATUS_INVALID_PARAMS);
1757 if (!hdev_is_powered(hdev)) {
1761 changed = !hci_dev_test_and_set_flag(hdev,
1764 changed = hci_dev_test_and_clear_flag(hdev,
1767 changed = hci_dev_test_and_clear_flag(hdev,
1770 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1773 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1778 err = new_settings(hdev, sk);
1783 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1789 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1790 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1794 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1800 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1801 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1802 sizeof(cp->val), &cp->val);
1804 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1806 mgmt_pending_remove(cmd);
1811 hci_dev_unlock(hdev);
1815 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1817 struct mgmt_mode *cp = data;
1822 bt_dev_dbg(hdev, "sock %p", sk);
1824 if (!IS_ENABLED(CONFIG_BT_HS))
1825 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1826 MGMT_STATUS_NOT_SUPPORTED);
1828 status = mgmt_bredr_support(hdev);
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1832 if (!lmp_ssp_capable(hdev))
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 MGMT_STATUS_NOT_SUPPORTED);
1836 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1838 MGMT_STATUS_REJECTED);
1840 if (cp->val != 0x00 && cp->val != 0x01)
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 MGMT_STATUS_INVALID_PARAMS);
1846 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1847 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1853 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1855 if (hdev_is_powered(hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1857 MGMT_STATUS_REJECTED);
1861 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1864 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1869 err = new_settings(hdev, sk);
1872 hci_dev_unlock(hdev);
1876 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1878 struct cmd_lookup match = { NULL, hdev };
1883 u8 mgmt_err = mgmt_status(status);
1885 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1890 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1892 new_settings(hdev, match.sk);
1897 /* Make sure the controller has a good default for
1898 * advertising data. Restrict the update to when LE
1899 * has actually been enabled. During power on, the
1900 * update in powered_update_hci will take care of it.
1902 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1903 struct hci_request req;
1904 hci_req_init(&req, hdev);
1905 if (ext_adv_capable(hdev)) {
1908 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1910 __hci_req_update_scan_rsp_data(&req, 0x00);
1912 __hci_req_update_adv_data(&req, 0x00);
1913 __hci_req_update_scan_rsp_data(&req, 0x00);
1915 hci_req_run(&req, NULL);
1916 hci_update_background_scan(hdev);
1920 hci_dev_unlock(hdev);
1923 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1925 struct mgmt_mode *cp = data;
1926 struct hci_cp_write_le_host_supported hci_cp;
1927 struct mgmt_pending_cmd *cmd;
1928 struct hci_request req;
1932 bt_dev_dbg(hdev, "sock %p", sk);
1934 if (!lmp_le_capable(hdev))
1935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1936 MGMT_STATUS_NOT_SUPPORTED);
1938 if (cp->val != 0x00 && cp->val != 0x01)
1939 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1940 MGMT_STATUS_INVALID_PARAMS);
1942 /* Bluetooth single mode LE only controllers or dual-mode
1943 * controllers configured as LE only devices, do not allow
1944 * switching LE off. These have either LE enabled explicitly
1945 * or BR/EDR has been previously switched off.
1947 * When trying to enable an already enabled LE, then gracefully
1948 * send a positive response. Trying to disable it however will
1949 * result into rejection.
1951 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1952 if (cp->val == 0x01)
1953 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1955 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 MGMT_STATUS_REJECTED);
1962 enabled = lmp_host_le_capable(hdev);
1965 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1967 if (!hdev_is_powered(hdev) || val == enabled) {
1968 bool changed = false;
1970 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1971 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1975 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1976 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1980 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1985 err = new_settings(hdev, sk);
1990 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1991 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1992 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1997 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2003 hci_req_init(&req, hdev);
2005 memset(&hci_cp, 0, sizeof(hci_cp));
2009 hci_cp.simul = 0x00;
2011 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2012 __hci_req_disable_advertising(&req);
2014 if (ext_adv_capable(hdev))
2015 __hci_req_clear_ext_adv_sets(&req);
2018 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2021 err = hci_req_run(&req, le_enable_complete);
2023 mgmt_pending_remove(cmd);
2026 hci_dev_unlock(hdev);
2030 /* This is a helper function to test for pending mgmt commands that can
2031 * cause CoD or EIR HCI commands. We can only allow one such pending
2032 * mgmt command at a time since otherwise we cannot easily track what
2033 * the current values are, will be, and based on that calculate if a new
2034 * HCI command needs to be sent and if yes with what value.
2036 static bool pending_eir_or_class(struct hci_dev *hdev)
2038 struct mgmt_pending_cmd *cmd;
2040 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2041 switch (cmd->opcode) {
2042 case MGMT_OP_ADD_UUID:
2043 case MGMT_OP_REMOVE_UUID:
2044 case MGMT_OP_SET_DEV_CLASS:
2045 case MGMT_OP_SET_POWERED:
2053 static const u8 bluetooth_base_uuid[] = {
2054 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2055 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2058 static u8 get_uuid_size(const u8 *uuid)
2062 if (memcmp(uuid, bluetooth_base_uuid, 12))
2065 val = get_unaligned_le32(&uuid[12]);
2072 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2074 struct mgmt_pending_cmd *cmd;
2078 cmd = pending_find(mgmt_op, hdev);
2082 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2083 mgmt_status(status), hdev->dev_class, 3);
2085 mgmt_pending_remove(cmd);
2088 hci_dev_unlock(hdev);
2091 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2093 bt_dev_dbg(hdev, "status 0x%02x", status);
2095 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2098 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2100 struct mgmt_cp_add_uuid *cp = data;
2101 struct mgmt_pending_cmd *cmd;
2102 struct hci_request req;
2103 struct bt_uuid *uuid;
2106 bt_dev_dbg(hdev, "sock %p", sk);
2110 if (pending_eir_or_class(hdev)) {
2111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2116 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2122 memcpy(uuid->uuid, cp->uuid, 16);
2123 uuid->svc_hint = cp->svc_hint;
2124 uuid->size = get_uuid_size(cp->uuid);
2126 list_add_tail(&uuid->list, &hdev->uuids);
2128 hci_req_init(&req, hdev);
2130 __hci_req_update_class(&req);
2131 __hci_req_update_eir(&req);
2133 err = hci_req_run(&req, add_uuid_complete);
2135 if (err != -ENODATA)
2138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2139 hdev->dev_class, 3);
2143 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2152 hci_dev_unlock(hdev);
2156 static bool enable_service_cache(struct hci_dev *hdev)
2158 if (!hdev_is_powered(hdev))
2161 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2162 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2170 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2172 bt_dev_dbg(hdev, "status 0x%02x", status);
2174 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2177 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2180 struct mgmt_cp_remove_uuid *cp = data;
2181 struct mgmt_pending_cmd *cmd;
2182 struct bt_uuid *match, *tmp;
2183 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2184 struct hci_request req;
2187 bt_dev_dbg(hdev, "sock %p", sk);
2191 if (pending_eir_or_class(hdev)) {
2192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2197 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2198 hci_uuids_clear(hdev);
2200 if (enable_service_cache(hdev)) {
2201 err = mgmt_cmd_complete(sk, hdev->id,
2202 MGMT_OP_REMOVE_UUID,
2203 0, hdev->dev_class, 3);
2212 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2213 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2216 list_del(&match->list);
2222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2223 MGMT_STATUS_INVALID_PARAMS);
2228 hci_req_init(&req, hdev);
2230 __hci_req_update_class(&req);
2231 __hci_req_update_eir(&req);
2233 err = hci_req_run(&req, remove_uuid_complete);
2235 if (err != -ENODATA)
2238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2239 hdev->dev_class, 3);
2243 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2252 hci_dev_unlock(hdev);
2256 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2258 bt_dev_dbg(hdev, "status 0x%02x", status);
2260 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2263 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2266 struct mgmt_cp_set_dev_class *cp = data;
2267 struct mgmt_pending_cmd *cmd;
2268 struct hci_request req;
2271 bt_dev_dbg(hdev, "sock %p", sk);
2273 if (!lmp_bredr_capable(hdev))
2274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2275 MGMT_STATUS_NOT_SUPPORTED);
2279 if (pending_eir_or_class(hdev)) {
2280 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2285 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2287 MGMT_STATUS_INVALID_PARAMS);
2291 hdev->major_class = cp->major;
2292 hdev->minor_class = cp->minor;
2294 if (!hdev_is_powered(hdev)) {
2295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2296 hdev->dev_class, 3);
2300 hci_req_init(&req, hdev);
2302 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2303 hci_dev_unlock(hdev);
2304 cancel_delayed_work_sync(&hdev->service_cache);
2306 __hci_req_update_eir(&req);
2309 __hci_req_update_class(&req);
2311 err = hci_req_run(&req, set_class_complete);
2313 if (err != -ENODATA)
2316 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2317 hdev->dev_class, 3);
2321 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2330 hci_dev_unlock(hdev);
2334 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2337 struct mgmt_cp_load_link_keys *cp = data;
2338 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2339 sizeof(struct mgmt_link_key_info));
2340 u16 key_count, expected_len;
2344 bt_dev_dbg(hdev, "sock %p", sk);
2346 if (!lmp_bredr_capable(hdev))
2347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 MGMT_STATUS_NOT_SUPPORTED);
2350 key_count = __le16_to_cpu(cp->key_count);
2351 if (key_count > max_key_count) {
2352 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2355 MGMT_STATUS_INVALID_PARAMS);
2358 expected_len = struct_size(cp, keys, key_count);
2359 if (expected_len != len) {
2360 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2366 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2368 MGMT_STATUS_INVALID_PARAMS);
2370 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2373 for (i = 0; i < key_count; i++) {
2374 struct mgmt_link_key_info *key = &cp->keys[i];
2376 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2377 return mgmt_cmd_status(sk, hdev->id,
2378 MGMT_OP_LOAD_LINK_KEYS,
2379 MGMT_STATUS_INVALID_PARAMS);
2384 hci_link_keys_clear(hdev);
2387 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2389 changed = hci_dev_test_and_clear_flag(hdev,
2390 HCI_KEEP_DEBUG_KEYS);
2393 new_settings(hdev, NULL);
2395 for (i = 0; i < key_count; i++) {
2396 struct mgmt_link_key_info *key = &cp->keys[i];
2398 if (hci_is_blocked_key(hdev,
2399 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2401 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2406 /* Always ignore debug keys and require a new pairing if
2407 * the user wants to use them.
2409 if (key->type == HCI_LK_DEBUG_COMBINATION)
2412 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2413 key->type, key->pin_len, NULL);
2416 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2418 hci_dev_unlock(hdev);
2423 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2424 u8 addr_type, struct sock *skip_sk)
2426 struct mgmt_ev_device_unpaired ev;
2428 bacpy(&ev.addr.bdaddr, bdaddr);
2429 ev.addr.type = addr_type;
2431 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2435 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2438 struct mgmt_cp_unpair_device *cp = data;
2439 struct mgmt_rp_unpair_device rp;
2440 struct hci_conn_params *params;
2441 struct mgmt_pending_cmd *cmd;
2442 struct hci_conn *conn;
2446 memset(&rp, 0, sizeof(rp));
2447 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2448 rp.addr.type = cp->addr.type;
2450 if (!bdaddr_type_is_valid(cp->addr.type))
2451 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2452 MGMT_STATUS_INVALID_PARAMS,
2455 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2456 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2457 MGMT_STATUS_INVALID_PARAMS,
2462 if (!hdev_is_powered(hdev)) {
2463 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2464 MGMT_STATUS_NOT_POWERED, &rp,
2469 if (cp->addr.type == BDADDR_BREDR) {
2470 /* If disconnection is requested, then look up the
2471 * connection. If the remote device is connected, it
2472 * will be later used to terminate the link.
2474 * Setting it to NULL explicitly will cause no
2475 * termination of the link.
2478 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2483 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2485 err = mgmt_cmd_complete(sk, hdev->id,
2486 MGMT_OP_UNPAIR_DEVICE,
2487 MGMT_STATUS_NOT_PAIRED, &rp,
2495 /* LE address type */
2496 addr_type = le_addr_type(cp->addr.type);
2498 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2499 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2501 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2502 MGMT_STATUS_NOT_PAIRED, &rp,
2507 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2509 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2514 /* Defer clearing up the connection parameters until closing to
2515 * give a chance of keeping them if a repairing happens.
2517 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2519 /* Disable auto-connection parameters if present */
2520 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2522 if (params->explicit_connect)
2523 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2525 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2528 /* If disconnection is not requested, then clear the connection
2529 * variable so that the link is not terminated.
2531 if (!cp->disconnect)
2535 /* If the connection variable is set, then termination of the
2536 * link is requested.
2539 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2541 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2545 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2552 cmd->cmd_complete = addr_cmd_complete;
2554 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2556 mgmt_pending_remove(cmd);
2559 hci_dev_unlock(hdev);
2563 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2566 struct mgmt_cp_disconnect *cp = data;
2567 struct mgmt_rp_disconnect rp;
2568 struct mgmt_pending_cmd *cmd;
2569 struct hci_conn *conn;
2572 bt_dev_dbg(hdev, "sock %p", sk);
2574 memset(&rp, 0, sizeof(rp));
2575 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2576 rp.addr.type = cp->addr.type;
2578 if (!bdaddr_type_is_valid(cp->addr.type))
2579 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2580 MGMT_STATUS_INVALID_PARAMS,
2585 if (!test_bit(HCI_UP, &hdev->flags)) {
2586 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2587 MGMT_STATUS_NOT_POWERED, &rp,
2592 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2593 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2594 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2598 if (cp->addr.type == BDADDR_BREDR)
2599 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2602 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2603 le_addr_type(cp->addr.type));
2605 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2606 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2607 MGMT_STATUS_NOT_CONNECTED, &rp,
2612 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2618 cmd->cmd_complete = generic_cmd_complete;
2620 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2622 mgmt_pending_remove(cmd);
2625 hci_dev_unlock(hdev);
2629 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2631 switch (link_type) {
2633 switch (addr_type) {
2634 case ADDR_LE_DEV_PUBLIC:
2635 return BDADDR_LE_PUBLIC;
2638 /* Fallback to LE Random address type */
2639 return BDADDR_LE_RANDOM;
2643 /* Fallback to BR/EDR type */
2644 return BDADDR_BREDR;
2648 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2651 struct mgmt_rp_get_connections *rp;
2656 bt_dev_dbg(hdev, "sock %p", sk);
2660 if (!hdev_is_powered(hdev)) {
2661 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2662 MGMT_STATUS_NOT_POWERED);
2667 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2668 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2672 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2679 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2680 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2682 bacpy(&rp->addr[i].bdaddr, &c->dst);
2683 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2684 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2689 rp->conn_count = cpu_to_le16(i);
2691 /* Recalculate length in case of filtered SCO connections, etc */
2692 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2693 struct_size(rp, addr, i));
2698 hci_dev_unlock(hdev);
2702 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2703 struct mgmt_cp_pin_code_neg_reply *cp)
2705 struct mgmt_pending_cmd *cmd;
2708 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2713 cmd->cmd_complete = addr_cmd_complete;
2715 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2716 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2718 mgmt_pending_remove(cmd);
2723 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2726 struct hci_conn *conn;
2727 struct mgmt_cp_pin_code_reply *cp = data;
2728 struct hci_cp_pin_code_reply reply;
2729 struct mgmt_pending_cmd *cmd;
2732 bt_dev_dbg(hdev, "sock %p", sk);
2736 if (!hdev_is_powered(hdev)) {
2737 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2738 MGMT_STATUS_NOT_POWERED);
2742 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2744 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2745 MGMT_STATUS_NOT_CONNECTED);
2749 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2750 struct mgmt_cp_pin_code_neg_reply ncp;
2752 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2754 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2756 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2758 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2759 MGMT_STATUS_INVALID_PARAMS);
2764 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2770 cmd->cmd_complete = addr_cmd_complete;
2772 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2773 reply.pin_len = cp->pin_len;
2774 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2776 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2778 mgmt_pending_remove(cmd);
2781 hci_dev_unlock(hdev);
2785 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2788 struct mgmt_cp_set_io_capability *cp = data;
2790 bt_dev_dbg(hdev, "sock %p", sk);
2792 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2794 MGMT_STATUS_INVALID_PARAMS);
2798 hdev->io_capability = cp->io_capability;
2800 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2802 hci_dev_unlock(hdev);
2804 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2808 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2810 struct hci_dev *hdev = conn->hdev;
2811 struct mgmt_pending_cmd *cmd;
2813 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2814 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2817 if (cmd->user_data != conn)
2826 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2828 struct mgmt_rp_pair_device rp;
2829 struct hci_conn *conn = cmd->user_data;
2832 bacpy(&rp.addr.bdaddr, &conn->dst);
2833 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2835 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2836 status, &rp, sizeof(rp));
2838 /* So we don't get further callbacks for this connection */
2839 conn->connect_cfm_cb = NULL;
2840 conn->security_cfm_cb = NULL;
2841 conn->disconn_cfm_cb = NULL;
2843 hci_conn_drop(conn);
2845 /* The device is paired so there is no need to remove
2846 * its connection parameters anymore.
2848 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2855 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2857 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2858 struct mgmt_pending_cmd *cmd;
2860 cmd = find_pairing(conn);
2862 cmd->cmd_complete(cmd, status);
2863 mgmt_pending_remove(cmd);
2867 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2869 struct mgmt_pending_cmd *cmd;
2871 BT_DBG("status %u", status);
2873 cmd = find_pairing(conn);
2875 BT_DBG("Unable to find a pending command");
2879 cmd->cmd_complete(cmd, mgmt_status(status));
2880 mgmt_pending_remove(cmd);
2883 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2885 struct mgmt_pending_cmd *cmd;
2887 BT_DBG("status %u", status);
2892 cmd = find_pairing(conn);
2894 BT_DBG("Unable to find a pending command");
2898 cmd->cmd_complete(cmd, mgmt_status(status));
2899 mgmt_pending_remove(cmd);
2902 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2905 struct mgmt_cp_pair_device *cp = data;
2906 struct mgmt_rp_pair_device rp;
2907 struct mgmt_pending_cmd *cmd;
2908 u8 sec_level, auth_type;
2909 struct hci_conn *conn;
2912 bt_dev_dbg(hdev, "sock %p", sk);
2914 memset(&rp, 0, sizeof(rp));
2915 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2916 rp.addr.type = cp->addr.type;
2918 if (!bdaddr_type_is_valid(cp->addr.type))
2919 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2920 MGMT_STATUS_INVALID_PARAMS,
2923 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2924 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2925 MGMT_STATUS_INVALID_PARAMS,
2930 if (!hdev_is_powered(hdev)) {
2931 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2932 MGMT_STATUS_NOT_POWERED, &rp,
2937 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2938 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2939 MGMT_STATUS_ALREADY_PAIRED, &rp,
2944 sec_level = BT_SECURITY_MEDIUM;
2945 auth_type = HCI_AT_DEDICATED_BONDING;
2947 if (cp->addr.type == BDADDR_BREDR) {
2948 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2949 auth_type, CONN_REASON_PAIR_DEVICE);
2951 u8 addr_type = le_addr_type(cp->addr.type);
2952 struct hci_conn_params *p;
2954 /* When pairing a new device, it is expected to remember
2955 * this device for future connections. Adding the connection
2956 * parameter information ahead of time allows tracking
2957 * of the slave preferred values and will speed up any
2958 * further connection establishment.
2960 * If connection parameters already exist, then they
2961 * will be kept and this function does nothing.
2963 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2965 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2966 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2968 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2969 sec_level, HCI_LE_CONN_TIMEOUT,
2970 CONN_REASON_PAIR_DEVICE);
2976 if (PTR_ERR(conn) == -EBUSY)
2977 status = MGMT_STATUS_BUSY;
2978 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2979 status = MGMT_STATUS_NOT_SUPPORTED;
2980 else if (PTR_ERR(conn) == -ECONNREFUSED)
2981 status = MGMT_STATUS_REJECTED;
2983 status = MGMT_STATUS_CONNECT_FAILED;
2985 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2986 status, &rp, sizeof(rp));
2990 if (conn->connect_cfm_cb) {
2991 hci_conn_drop(conn);
2992 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2993 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2997 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3000 hci_conn_drop(conn);
3004 cmd->cmd_complete = pairing_complete;
3006 /* For LE, just connecting isn't a proof that the pairing finished */
3007 if (cp->addr.type == BDADDR_BREDR) {
3008 conn->connect_cfm_cb = pairing_complete_cb;
3009 conn->security_cfm_cb = pairing_complete_cb;
3010 conn->disconn_cfm_cb = pairing_complete_cb;
3012 conn->connect_cfm_cb = le_pairing_complete_cb;
3013 conn->security_cfm_cb = le_pairing_complete_cb;
3014 conn->disconn_cfm_cb = le_pairing_complete_cb;
3017 conn->io_capability = cp->io_cap;
3018 cmd->user_data = hci_conn_get(conn);
3020 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3021 hci_conn_security(conn, sec_level, auth_type, true)) {
3022 cmd->cmd_complete(cmd, 0);
3023 mgmt_pending_remove(cmd);
3029 hci_dev_unlock(hdev);
3033 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3036 struct mgmt_addr_info *addr = data;
3037 struct mgmt_pending_cmd *cmd;
3038 struct hci_conn *conn;
3041 bt_dev_dbg(hdev, "sock %p", sk);
3045 if (!hdev_is_powered(hdev)) {
3046 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3047 MGMT_STATUS_NOT_POWERED);
3051 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3054 MGMT_STATUS_INVALID_PARAMS);
3058 conn = cmd->user_data;
3060 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3061 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3062 MGMT_STATUS_INVALID_PARAMS);
3066 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3067 mgmt_pending_remove(cmd);
3069 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3070 addr, sizeof(*addr));
3072 /* Since user doesn't want to proceed with the connection, abort any
3073 * ongoing pairing and then terminate the link if it was created
3074 * because of the pair device action.
3076 if (addr->type == BDADDR_BREDR)
3077 hci_remove_link_key(hdev, &addr->bdaddr);
3079 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3080 le_addr_type(addr->type));
3082 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3083 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3086 hci_dev_unlock(hdev);
3090 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3091 struct mgmt_addr_info *addr, u16 mgmt_op,
3092 u16 hci_op, __le32 passkey)
3094 struct mgmt_pending_cmd *cmd;
3095 struct hci_conn *conn;
3100 if (!hdev_is_powered(hdev)) {
3101 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3102 MGMT_STATUS_NOT_POWERED, addr,
3107 if (addr->type == BDADDR_BREDR)
3108 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3110 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3111 le_addr_type(addr->type));
3114 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3115 MGMT_STATUS_NOT_CONNECTED, addr,
3120 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3121 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3123 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 MGMT_STATUS_SUCCESS, addr,
3127 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3128 MGMT_STATUS_FAILED, addr,
3134 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3140 cmd->cmd_complete = addr_cmd_complete;
3142 /* Continue with pairing via HCI */
3143 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3144 struct hci_cp_user_passkey_reply cp;
3146 bacpy(&cp.bdaddr, &addr->bdaddr);
3147 cp.passkey = passkey;
3148 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3150 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3154 mgmt_pending_remove(cmd);
3157 hci_dev_unlock(hdev);
3161 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3162 void *data, u16 len)
3164 struct mgmt_cp_pin_code_neg_reply *cp = data;
3166 bt_dev_dbg(hdev, "sock %p", sk);
3168 return user_pairing_resp(sk, hdev, &cp->addr,
3169 MGMT_OP_PIN_CODE_NEG_REPLY,
3170 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3173 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3176 struct mgmt_cp_user_confirm_reply *cp = data;
3178 bt_dev_dbg(hdev, "sock %p", sk);
3180 if (len != sizeof(*cp))
3181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3182 MGMT_STATUS_INVALID_PARAMS);
3184 return user_pairing_resp(sk, hdev, &cp->addr,
3185 MGMT_OP_USER_CONFIRM_REPLY,
3186 HCI_OP_USER_CONFIRM_REPLY, 0);
3189 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3190 void *data, u16 len)
3192 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3194 bt_dev_dbg(hdev, "sock %p", sk);
3196 return user_pairing_resp(sk, hdev, &cp->addr,
3197 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3198 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3201 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3204 struct mgmt_cp_user_passkey_reply *cp = data;
3206 bt_dev_dbg(hdev, "sock %p", sk);
3208 return user_pairing_resp(sk, hdev, &cp->addr,
3209 MGMT_OP_USER_PASSKEY_REPLY,
3210 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3213 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3214 void *data, u16 len)
3216 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3218 bt_dev_dbg(hdev, "sock %p", sk);
3220 return user_pairing_resp(sk, hdev, &cp->addr,
3221 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3222 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3225 static void adv_expire(struct hci_dev *hdev, u32 flags)
3227 struct adv_info *adv_instance;
3228 struct hci_request req;
3231 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3235 /* stop if current instance doesn't need to be changed */
3236 if (!(adv_instance->flags & flags))
3239 cancel_adv_timeout(hdev);
3241 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3245 hci_req_init(&req, hdev);
3246 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3251 hci_req_run(&req, NULL);
3254 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3256 struct mgmt_cp_set_local_name *cp;
3257 struct mgmt_pending_cmd *cmd;
3259 bt_dev_dbg(hdev, "status 0x%02x", status);
3263 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3270 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3271 mgmt_status(status));
3273 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3276 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3277 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3280 mgmt_pending_remove(cmd);
3283 hci_dev_unlock(hdev);
3286 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3289 struct mgmt_cp_set_local_name *cp = data;
3290 struct mgmt_pending_cmd *cmd;
3291 struct hci_request req;
3294 bt_dev_dbg(hdev, "sock %p", sk);
3298 /* If the old values are the same as the new ones just return a
3299 * direct command complete event.
3301 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3302 !memcmp(hdev->short_name, cp->short_name,
3303 sizeof(hdev->short_name))) {
3304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3309 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3311 if (!hdev_is_powered(hdev)) {
3312 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3314 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3319 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3320 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3321 ext_info_changed(hdev, sk);
3326 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3332 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3334 hci_req_init(&req, hdev);
3336 if (lmp_bredr_capable(hdev)) {
3337 __hci_req_update_name(&req);
3338 __hci_req_update_eir(&req);
3341 /* The name is stored in the scan response data and so
3342 * no need to udpate the advertising data here.
3344 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3345 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3347 err = hci_req_run(&req, set_name_complete);
3349 mgmt_pending_remove(cmd);
3352 hci_dev_unlock(hdev);
3356 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3359 struct mgmt_cp_set_appearance *cp = data;
3363 bt_dev_dbg(hdev, "sock %p", sk);
3365 if (!lmp_le_capable(hdev))
3366 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3367 MGMT_STATUS_NOT_SUPPORTED);
3369 appearance = le16_to_cpu(cp->appearance);
3373 if (hdev->appearance != appearance) {
3374 hdev->appearance = appearance;
3376 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3377 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3379 ext_info_changed(hdev, sk);
3382 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3385 hci_dev_unlock(hdev);
3390 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3391 void *data, u16 len)
3393 struct mgmt_rp_get_phy_confguration rp;
3395 bt_dev_dbg(hdev, "sock %p", sk);
3399 memset(&rp, 0, sizeof(rp));
3401 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3402 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3403 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3405 hci_dev_unlock(hdev);
3407 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3411 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3413 struct mgmt_ev_phy_configuration_changed ev;
3415 memset(&ev, 0, sizeof(ev));
3417 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3419 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3423 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3424 u16 opcode, struct sk_buff *skb)
3426 struct mgmt_pending_cmd *cmd;
3428 bt_dev_dbg(hdev, "status 0x%02x", status);
3432 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3437 mgmt_cmd_status(cmd->sk, hdev->id,
3438 MGMT_OP_SET_PHY_CONFIGURATION,
3439 mgmt_status(status));
3441 mgmt_cmd_complete(cmd->sk, hdev->id,
3442 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3445 mgmt_phy_configuration_changed(hdev, cmd->sk);
3448 mgmt_pending_remove(cmd);
3451 hci_dev_unlock(hdev);
3454 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3455 void *data, u16 len)
3457 struct mgmt_cp_set_phy_confguration *cp = data;
3458 struct hci_cp_le_set_default_phy cp_phy;
3459 struct mgmt_pending_cmd *cmd;
3460 struct hci_request req;
3461 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3462 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3463 bool changed = false;
3466 bt_dev_dbg(hdev, "sock %p", sk);
3468 configurable_phys = get_configurable_phys(hdev);
3469 supported_phys = get_supported_phys(hdev);
3470 selected_phys = __le32_to_cpu(cp->selected_phys);
3472 if (selected_phys & ~supported_phys)
3473 return mgmt_cmd_status(sk, hdev->id,
3474 MGMT_OP_SET_PHY_CONFIGURATION,
3475 MGMT_STATUS_INVALID_PARAMS);
3477 unconfigure_phys = supported_phys & ~configurable_phys;
3479 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3480 return mgmt_cmd_status(sk, hdev->id,
3481 MGMT_OP_SET_PHY_CONFIGURATION,
3482 MGMT_STATUS_INVALID_PARAMS);
3484 if (selected_phys == get_selected_phys(hdev))
3485 return mgmt_cmd_complete(sk, hdev->id,
3486 MGMT_OP_SET_PHY_CONFIGURATION,
3491 if (!hdev_is_powered(hdev)) {
3492 err = mgmt_cmd_status(sk, hdev->id,
3493 MGMT_OP_SET_PHY_CONFIGURATION,
3494 MGMT_STATUS_REJECTED);
3498 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3499 err = mgmt_cmd_status(sk, hdev->id,
3500 MGMT_OP_SET_PHY_CONFIGURATION,
3505 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3506 pkt_type |= (HCI_DH3 | HCI_DM3);
3508 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3510 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3511 pkt_type |= (HCI_DH5 | HCI_DM5);
3513 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3515 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3516 pkt_type &= ~HCI_2DH1;
3518 pkt_type |= HCI_2DH1;
3520 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3521 pkt_type &= ~HCI_2DH3;
3523 pkt_type |= HCI_2DH3;
3525 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3526 pkt_type &= ~HCI_2DH5;
3528 pkt_type |= HCI_2DH5;
3530 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3531 pkt_type &= ~HCI_3DH1;
3533 pkt_type |= HCI_3DH1;
3535 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3536 pkt_type &= ~HCI_3DH3;
3538 pkt_type |= HCI_3DH3;
3540 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3541 pkt_type &= ~HCI_3DH5;
3543 pkt_type |= HCI_3DH5;
3545 if (pkt_type != hdev->pkt_type) {
3546 hdev->pkt_type = pkt_type;
3550 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3551 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3553 mgmt_phy_configuration_changed(hdev, sk);
3555 err = mgmt_cmd_complete(sk, hdev->id,
3556 MGMT_OP_SET_PHY_CONFIGURATION,
3562 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3569 hci_req_init(&req, hdev);
3571 memset(&cp_phy, 0, sizeof(cp_phy));
3573 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3574 cp_phy.all_phys |= 0x01;
3576 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3577 cp_phy.all_phys |= 0x02;
3579 if (selected_phys & MGMT_PHY_LE_1M_TX)
3580 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3582 if (selected_phys & MGMT_PHY_LE_2M_TX)
3583 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3585 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3586 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3588 if (selected_phys & MGMT_PHY_LE_1M_RX)
3589 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3591 if (selected_phys & MGMT_PHY_LE_2M_RX)
3592 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3594 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3595 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3597 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3599 err = hci_req_run_skb(&req, set_default_phy_complete);
3601 mgmt_pending_remove(cmd);
3604 hci_dev_unlock(hdev);
3609 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3612 int err = MGMT_STATUS_SUCCESS;
3613 struct mgmt_cp_set_blocked_keys *keys = data;
3614 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3615 sizeof(struct mgmt_blocked_key_info));
3616 u16 key_count, expected_len;
3619 bt_dev_dbg(hdev, "sock %p", sk);
3621 key_count = __le16_to_cpu(keys->key_count);
3622 if (key_count > max_key_count) {
3623 bt_dev_err(hdev, "too big key_count value %u", key_count);
3624 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3625 MGMT_STATUS_INVALID_PARAMS);
3628 expected_len = struct_size(keys, keys, key_count);
3629 if (expected_len != len) {
3630 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3633 MGMT_STATUS_INVALID_PARAMS);
3638 hci_blocked_keys_clear(hdev);
3640 for (i = 0; i < keys->key_count; ++i) {
3641 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3644 err = MGMT_STATUS_NO_RESOURCES;
3648 b->type = keys->keys[i].type;
3649 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3650 list_add_rcu(&b->list, &hdev->blocked_keys);
3652 hci_dev_unlock(hdev);
3654 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3658 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3659 void *data, u16 len)
3661 struct mgmt_mode *cp = data;
3663 bool changed = false;
3665 bt_dev_dbg(hdev, "sock %p", sk);
3667 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3668 return mgmt_cmd_status(sk, hdev->id,
3669 MGMT_OP_SET_WIDEBAND_SPEECH,
3670 MGMT_STATUS_NOT_SUPPORTED);
3672 if (cp->val != 0x00 && cp->val != 0x01)
3673 return mgmt_cmd_status(sk, hdev->id,
3674 MGMT_OP_SET_WIDEBAND_SPEECH,
3675 MGMT_STATUS_INVALID_PARAMS);
3679 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3680 err = mgmt_cmd_status(sk, hdev->id,
3681 MGMT_OP_SET_WIDEBAND_SPEECH,
3686 if (hdev_is_powered(hdev) &&
3687 !!cp->val != hci_dev_test_flag(hdev,
3688 HCI_WIDEBAND_SPEECH_ENABLED)) {
3689 err = mgmt_cmd_status(sk, hdev->id,
3690 MGMT_OP_SET_WIDEBAND_SPEECH,
3691 MGMT_STATUS_REJECTED);
3696 changed = !hci_dev_test_and_set_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED);
3699 changed = hci_dev_test_and_clear_flag(hdev,
3700 HCI_WIDEBAND_SPEECH_ENABLED);
3702 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3707 err = new_settings(hdev, sk);
3710 hci_dev_unlock(hdev);
3714 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3715 void *data, u16 data_len)
3718 struct mgmt_rp_read_security_info *rp = (void *)buf;
3722 bt_dev_dbg(hdev, "sock %p", sk);
3724 memset(&buf, 0, sizeof(buf));
3728 /* When the Read Simple Pairing Options command is supported, then
3729 * the remote public key validation is supported.
3731 if (hdev->commands[41] & 0x08)
3732 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3734 flags |= 0x02; /* Remote public key validation (LE) */
3736 /* When the Read Encryption Key Size command is supported, then the
3737 * encryption key size is enforced.
3739 if (hdev->commands[20] & 0x10)
3740 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3742 flags |= 0x08; /* Encryption key size enforcement (LE) */
3744 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3746 /* When the Read Simple Pairing Options command is supported, then
3747 * also max encryption key size information is provided.
3749 if (hdev->commands[41] & 0x08)
3750 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3751 hdev->max_enc_key_size);
3753 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3755 rp->sec_len = cpu_to_le16(sec_len);
3757 hci_dev_unlock(hdev);
3759 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3760 rp, sizeof(*rp) + sec_len);
3763 #ifdef CONFIG_BT_FEATURE_DEBUG
3764 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3765 static const u8 debug_uuid[16] = {
3766 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3767 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3771 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3772 static const u8 simult_central_periph_uuid[16] = {
3773 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3774 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3777 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3778 static const u8 rpa_resolution_uuid[16] = {
3779 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3780 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3783 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3784 void *data, u16 data_len)
3786 char buf[62]; /* Enough space for 3 features */
3787 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3791 bt_dev_dbg(hdev, "sock %p", sk);
3793 memset(&buf, 0, sizeof(buf));
3795 #ifdef CONFIG_BT_FEATURE_DEBUG
3797 flags = bt_dbg_get() ? BIT(0) : 0;
3799 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3800 rp->features[idx].flags = cpu_to_le32(flags);
3806 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3807 (hdev->le_states[4] & 0x08) && /* Central */
3808 (hdev->le_states[4] & 0x40) && /* Peripheral */
3809 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3814 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3815 rp->features[idx].flags = cpu_to_le32(flags);
3819 if (hdev && use_ll_privacy(hdev)) {
3820 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3821 flags = BIT(0) | BIT(1);
3825 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3826 rp->features[idx].flags = cpu_to_le32(flags);
3830 rp->feature_count = cpu_to_le16(idx);
3832 /* After reading the experimental features information, enable
3833 * the events to update client on any future change.
3835 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3837 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3838 MGMT_OP_READ_EXP_FEATURES_INFO,
3839 0, rp, sizeof(*rp) + (20 * idx));
3842 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3845 struct mgmt_ev_exp_feature_changed ev;
3847 memset(&ev, 0, sizeof(ev));
3848 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3849 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3851 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3853 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3857 #ifdef CONFIG_BT_FEATURE_DEBUG
3858 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3860 struct mgmt_ev_exp_feature_changed ev;
3862 memset(&ev, 0, sizeof(ev));
3863 memcpy(ev.uuid, debug_uuid, 16);
3864 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3866 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3868 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3872 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3873 void *data, u16 data_len)
3875 struct mgmt_cp_set_exp_feature *cp = data;
3876 struct mgmt_rp_set_exp_feature rp;
3878 bt_dev_dbg(hdev, "sock %p", sk);
3880 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3881 memset(rp.uuid, 0, 16);
3882 rp.flags = cpu_to_le32(0);
3884 #ifdef CONFIG_BT_FEATURE_DEBUG
3886 bool changed = bt_dbg_get();
3891 exp_debug_feature_changed(false, sk);
3895 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3896 bool changed = hci_dev_test_flag(hdev,
3897 HCI_ENABLE_LL_PRIVACY);
3899 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3902 exp_ll_privacy_feature_changed(false, hdev, sk);
3905 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3907 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3908 MGMT_OP_SET_EXP_FEATURE, 0,
3912 #ifdef CONFIG_BT_FEATURE_DEBUG
3913 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3917 /* Command requires to use the non-controller index */
3919 return mgmt_cmd_status(sk, hdev->id,
3920 MGMT_OP_SET_EXP_FEATURE,
3921 MGMT_STATUS_INVALID_INDEX);
3923 /* Parameters are limited to a single octet */
3924 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3925 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3926 MGMT_OP_SET_EXP_FEATURE,
3927 MGMT_STATUS_INVALID_PARAMS);
3929 /* Only boolean on/off is supported */
3930 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3931 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3932 MGMT_OP_SET_EXP_FEATURE,
3933 MGMT_STATUS_INVALID_PARAMS);
3935 val = !!cp->param[0];
3936 changed = val ? !bt_dbg_get() : bt_dbg_get();
3939 memcpy(rp.uuid, debug_uuid, 16);
3940 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3942 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3944 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3945 MGMT_OP_SET_EXP_FEATURE, 0,
3949 exp_debug_feature_changed(val, sk);
3955 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3960 /* Command requires to use the controller index */
3962 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3963 MGMT_OP_SET_EXP_FEATURE,
3964 MGMT_STATUS_INVALID_INDEX);
3966 /* Changes can only be made when controller is powered down */
3967 if (hdev_is_powered(hdev))
3968 return mgmt_cmd_status(sk, hdev->id,
3969 MGMT_OP_SET_EXP_FEATURE,
3970 MGMT_STATUS_NOT_POWERED);
3972 /* Parameters are limited to a single octet */
3973 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3974 return mgmt_cmd_status(sk, hdev->id,
3975 MGMT_OP_SET_EXP_FEATURE,
3976 MGMT_STATUS_INVALID_PARAMS);
3978 /* Only boolean on/off is supported */
3979 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3980 return mgmt_cmd_status(sk, hdev->id,
3981 MGMT_OP_SET_EXP_FEATURE,
3982 MGMT_STATUS_INVALID_PARAMS);
3984 val = !!cp->param[0];
3987 changed = !hci_dev_test_flag(hdev,
3988 HCI_ENABLE_LL_PRIVACY);
3989 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3990 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3992 /* Enable LL privacy + supported settings changed */
3993 flags = BIT(0) | BIT(1);
3995 changed = hci_dev_test_flag(hdev,
3996 HCI_ENABLE_LL_PRIVACY);
3997 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3999 /* Disable LL privacy + supported settings changed */
4003 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4004 rp.flags = cpu_to_le32(flags);
4006 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4008 err = mgmt_cmd_complete(sk, hdev->id,
4009 MGMT_OP_SET_EXP_FEATURE, 0,
4013 exp_ll_privacy_feature_changed(val, hdev, sk);
4018 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4019 MGMT_OP_SET_EXP_FEATURE,
4020 MGMT_STATUS_NOT_SUPPORTED);
4023 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4025 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4028 struct mgmt_cp_get_device_flags *cp = data;
4029 struct mgmt_rp_get_device_flags rp;
4030 struct bdaddr_list_with_flags *br_params;
4031 struct hci_conn_params *params;
4032 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4033 u32 current_flags = 0;
4034 u8 status = MGMT_STATUS_INVALID_PARAMS;
4036 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4037 &cp->addr.bdaddr, cp->addr.type);
4041 memset(&rp, 0, sizeof(rp));
4043 if (cp->addr.type == BDADDR_BREDR) {
4044 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4050 current_flags = br_params->current_flags;
4052 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4053 le_addr_type(cp->addr.type));
4058 current_flags = params->current_flags;
4061 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4062 rp.addr.type = cp->addr.type;
4063 rp.supported_flags = cpu_to_le32(supported_flags);
4064 rp.current_flags = cpu_to_le32(current_flags);
4066 status = MGMT_STATUS_SUCCESS;
4069 hci_dev_unlock(hdev);
4071 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4075 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4076 bdaddr_t *bdaddr, u8 bdaddr_type,
4077 u32 supported_flags, u32 current_flags)
4079 struct mgmt_ev_device_flags_changed ev;
4081 bacpy(&ev.addr.bdaddr, bdaddr);
4082 ev.addr.type = bdaddr_type;
4083 ev.supported_flags = cpu_to_le32(supported_flags);
4084 ev.current_flags = cpu_to_le32(current_flags);
4086 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4089 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4092 struct mgmt_cp_set_device_flags *cp = data;
4093 struct bdaddr_list_with_flags *br_params;
4094 struct hci_conn_params *params;
4095 u8 status = MGMT_STATUS_INVALID_PARAMS;
4096 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4097 u32 current_flags = __le32_to_cpu(cp->current_flags);
4099 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4100 &cp->addr.bdaddr, cp->addr.type,
4101 __le32_to_cpu(current_flags));
4103 if ((supported_flags | current_flags) != supported_flags) {
4104 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4105 current_flags, supported_flags);
4111 if (cp->addr.type == BDADDR_BREDR) {
4112 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4117 br_params->current_flags = current_flags;
4118 status = MGMT_STATUS_SUCCESS;
4120 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4121 &cp->addr.bdaddr, cp->addr.type);
4124 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4125 le_addr_type(cp->addr.type));
4127 params->current_flags = current_flags;
4128 status = MGMT_STATUS_SUCCESS;
4130 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4132 le_addr_type(cp->addr.type));
4137 hci_dev_unlock(hdev);
4139 if (status == MGMT_STATUS_SUCCESS)
4140 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4141 supported_flags, current_flags);
4143 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4144 &cp->addr, sizeof(cp->addr));
4147 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4150 struct mgmt_ev_adv_monitor_added ev;
4152 ev.monitor_handle = cpu_to_le16(handle);
4154 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4157 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4160 struct mgmt_ev_adv_monitor_added ev;
4162 ev.monitor_handle = cpu_to_le16(handle);
4164 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4167 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4168 void *data, u16 len)
4170 struct adv_monitor *monitor = NULL;
4171 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4174 __u32 supported = 0;
4175 __u16 num_handles = 0;
4176 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4178 BT_DBG("request for %s", hdev->name);
4182 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4183 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4185 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4186 handles[num_handles++] = monitor->handle;
4189 hci_dev_unlock(hdev);
4191 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4192 rp = kmalloc(rp_size, GFP_KERNEL);
4196 /* Once controller-based monitoring is in place, the enabled_features
4197 * should reflect the use.
4199 rp->supported_features = cpu_to_le32(supported);
4200 rp->enabled_features = 0;
4201 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4202 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4203 rp->num_handles = cpu_to_le16(num_handles);
4205 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4207 err = mgmt_cmd_complete(sk, hdev->id,
4208 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4209 MGMT_STATUS_SUCCESS, rp, rp_size);
4216 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4217 void *data, u16 len)
4219 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4220 struct mgmt_rp_add_adv_patterns_monitor rp;
4221 struct adv_monitor *m = NULL;
4222 struct adv_pattern *p = NULL;
4223 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4224 __u8 cp_ofst = 0, cp_len = 0;
4227 BT_DBG("request for %s", hdev->name);
4229 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4230 err = mgmt_cmd_status(sk, hdev->id,
4231 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4232 MGMT_STATUS_INVALID_PARAMS);
4236 m = kmalloc(sizeof(*m), GFP_KERNEL);
4242 INIT_LIST_HEAD(&m->patterns);
4245 for (i = 0; i < cp->pattern_count; i++) {
4246 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4247 err = mgmt_cmd_status(sk, hdev->id,
4248 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4249 MGMT_STATUS_INVALID_PARAMS);
4253 cp_ofst = cp->patterns[i].offset;
4254 cp_len = cp->patterns[i].length;
4255 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4256 cp_len > HCI_MAX_AD_LENGTH ||
4257 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4258 err = mgmt_cmd_status(sk, hdev->id,
4259 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4260 MGMT_STATUS_INVALID_PARAMS);
4264 p = kmalloc(sizeof(*p), GFP_KERNEL);
4270 p->ad_type = cp->patterns[i].ad_type;
4271 p->offset = cp->patterns[i].offset;
4272 p->length = cp->patterns[i].length;
4273 memcpy(p->value, cp->patterns[i].value, p->length);
4275 INIT_LIST_HEAD(&p->list);
4276 list_add(&p->list, &m->patterns);
4279 if (mp_cnt != cp->pattern_count) {
4280 err = mgmt_cmd_status(sk, hdev->id,
4281 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4282 MGMT_STATUS_INVALID_PARAMS);
4288 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4290 err = hci_add_adv_monitor(hdev, m);
4292 if (err == -ENOSPC) {
4293 mgmt_cmd_status(sk, hdev->id,
4294 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4295 MGMT_STATUS_NO_RESOURCES);
4300 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4301 mgmt_adv_monitor_added(sk, hdev, m->handle);
4303 hci_dev_unlock(hdev);
4305 rp.monitor_handle = cpu_to_le16(m->handle);
4307 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4308 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4311 hci_dev_unlock(hdev);
4314 hci_free_adv_monitor(m);
4318 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4319 void *data, u16 len)
4321 struct mgmt_cp_remove_adv_monitor *cp = data;
4322 struct mgmt_rp_remove_adv_monitor rp;
4323 unsigned int prev_adv_monitors_cnt;
4327 BT_DBG("request for %s", hdev->name);
4331 handle = __le16_to_cpu(cp->monitor_handle);
4332 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4334 err = hci_remove_adv_monitor(hdev, handle);
4335 if (err == -ENOENT) {
4336 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4337 MGMT_STATUS_INVALID_INDEX);
4341 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4342 mgmt_adv_monitor_removed(sk, hdev, handle);
4344 hci_dev_unlock(hdev);
4346 rp.monitor_handle = cp->monitor_handle;
4348 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4349 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4352 hci_dev_unlock(hdev);
4356 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4357 u16 opcode, struct sk_buff *skb)
4359 struct mgmt_rp_read_local_oob_data mgmt_rp;
4360 size_t rp_size = sizeof(mgmt_rp);
4361 struct mgmt_pending_cmd *cmd;
4363 bt_dev_dbg(hdev, "status %u", status);
4365 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4369 if (status || !skb) {
4370 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4371 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4375 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4377 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4378 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4380 if (skb->len < sizeof(*rp)) {
4381 mgmt_cmd_status(cmd->sk, hdev->id,
4382 MGMT_OP_READ_LOCAL_OOB_DATA,
4383 MGMT_STATUS_FAILED);
4387 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4388 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4390 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4392 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4394 if (skb->len < sizeof(*rp)) {
4395 mgmt_cmd_status(cmd->sk, hdev->id,
4396 MGMT_OP_READ_LOCAL_OOB_DATA,
4397 MGMT_STATUS_FAILED);
4401 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4402 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4404 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4405 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4408 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4409 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4412 mgmt_pending_remove(cmd);
4415 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4416 void *data, u16 data_len)
4418 struct mgmt_pending_cmd *cmd;
4419 struct hci_request req;
4422 bt_dev_dbg(hdev, "sock %p", sk);
4426 if (!hdev_is_powered(hdev)) {
4427 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4428 MGMT_STATUS_NOT_POWERED);
4432 if (!lmp_ssp_capable(hdev)) {
4433 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4434 MGMT_STATUS_NOT_SUPPORTED);
4438 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4439 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4444 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4450 hci_req_init(&req, hdev);
4452 if (bredr_sc_enabled(hdev))
4453 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4455 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4457 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4459 mgmt_pending_remove(cmd);
4462 hci_dev_unlock(hdev);
4466 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4467 void *data, u16 len)
4469 struct mgmt_addr_info *addr = data;
4472 bt_dev_dbg(hdev, "sock %p", sk);
4474 if (!bdaddr_type_is_valid(addr->type))
4475 return mgmt_cmd_complete(sk, hdev->id,
4476 MGMT_OP_ADD_REMOTE_OOB_DATA,
4477 MGMT_STATUS_INVALID_PARAMS,
4478 addr, sizeof(*addr));
4482 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4483 struct mgmt_cp_add_remote_oob_data *cp = data;
4486 if (cp->addr.type != BDADDR_BREDR) {
4487 err = mgmt_cmd_complete(sk, hdev->id,
4488 MGMT_OP_ADD_REMOTE_OOB_DATA,
4489 MGMT_STATUS_INVALID_PARAMS,
4490 &cp->addr, sizeof(cp->addr));
4494 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4495 cp->addr.type, cp->hash,
4496 cp->rand, NULL, NULL);
4498 status = MGMT_STATUS_FAILED;
4500 status = MGMT_STATUS_SUCCESS;
4502 err = mgmt_cmd_complete(sk, hdev->id,
4503 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4504 &cp->addr, sizeof(cp->addr));
4505 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4506 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4507 u8 *rand192, *hash192, *rand256, *hash256;
4510 if (bdaddr_type_is_le(cp->addr.type)) {
4511 /* Enforce zero-valued 192-bit parameters as
4512 * long as legacy SMP OOB isn't implemented.
4514 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4515 memcmp(cp->hash192, ZERO_KEY, 16)) {
4516 err = mgmt_cmd_complete(sk, hdev->id,
4517 MGMT_OP_ADD_REMOTE_OOB_DATA,
4518 MGMT_STATUS_INVALID_PARAMS,
4519 addr, sizeof(*addr));
4526 /* In case one of the P-192 values is set to zero,
4527 * then just disable OOB data for P-192.
4529 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4530 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4534 rand192 = cp->rand192;
4535 hash192 = cp->hash192;
4539 /* In case one of the P-256 values is set to zero, then just
4540 * disable OOB data for P-256.
4542 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4543 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4547 rand256 = cp->rand256;
4548 hash256 = cp->hash256;
4551 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4552 cp->addr.type, hash192, rand192,
4555 status = MGMT_STATUS_FAILED;
4557 status = MGMT_STATUS_SUCCESS;
4559 err = mgmt_cmd_complete(sk, hdev->id,
4560 MGMT_OP_ADD_REMOTE_OOB_DATA,
4561 status, &cp->addr, sizeof(cp->addr));
4563 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4565 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4566 MGMT_STATUS_INVALID_PARAMS);
4570 hci_dev_unlock(hdev);
4574 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4575 void *data, u16 len)
4577 struct mgmt_cp_remove_remote_oob_data *cp = data;
4581 bt_dev_dbg(hdev, "sock %p", sk);
4583 if (cp->addr.type != BDADDR_BREDR)
4584 return mgmt_cmd_complete(sk, hdev->id,
4585 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4586 MGMT_STATUS_INVALID_PARAMS,
4587 &cp->addr, sizeof(cp->addr));
4591 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4592 hci_remote_oob_data_clear(hdev);
4593 status = MGMT_STATUS_SUCCESS;
4597 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4599 status = MGMT_STATUS_INVALID_PARAMS;
4601 status = MGMT_STATUS_SUCCESS;
4604 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4605 status, &cp->addr, sizeof(cp->addr));
4607 hci_dev_unlock(hdev);
4611 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4613 struct mgmt_pending_cmd *cmd;
4615 bt_dev_dbg(hdev, "status %d", status);
4619 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4621 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4624 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4627 cmd->cmd_complete(cmd, mgmt_status(status));
4628 mgmt_pending_remove(cmd);
4631 hci_dev_unlock(hdev);
4633 /* Handle suspend notifier */
4634 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4635 hdev->suspend_tasks)) {
4636 bt_dev_dbg(hdev, "Unpaused discovery");
4637 wake_up(&hdev->suspend_wait_q);
4641 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4642 uint8_t *mgmt_status)
4645 case DISCOV_TYPE_LE:
4646 *mgmt_status = mgmt_le_support(hdev);
4650 case DISCOV_TYPE_INTERLEAVED:
4651 *mgmt_status = mgmt_le_support(hdev);
4655 case DISCOV_TYPE_BREDR:
4656 *mgmt_status = mgmt_bredr_support(hdev);
4661 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4668 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4669 u16 op, void *data, u16 len)
4671 struct mgmt_cp_start_discovery *cp = data;
4672 struct mgmt_pending_cmd *cmd;
4676 bt_dev_dbg(hdev, "sock %p", sk);
4680 if (!hdev_is_powered(hdev)) {
4681 err = mgmt_cmd_complete(sk, hdev->id, op,
4682 MGMT_STATUS_NOT_POWERED,
4683 &cp->type, sizeof(cp->type));
4687 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4688 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4689 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4690 &cp->type, sizeof(cp->type));
4694 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4695 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4696 &cp->type, sizeof(cp->type));
4700 /* Can't start discovery when it is paused */
4701 if (hdev->discovery_paused) {
4702 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4703 &cp->type, sizeof(cp->type));
4707 /* Clear the discovery filter first to free any previously
4708 * allocated memory for the UUID list.
4710 hci_discovery_filter_clear(hdev);
4712 hdev->discovery.type = cp->type;
4713 hdev->discovery.report_invalid_rssi = false;
4714 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4715 hdev->discovery.limited = true;
4717 hdev->discovery.limited = false;
4719 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4725 cmd->cmd_complete = generic_cmd_complete;
4727 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4728 queue_work(hdev->req_workqueue, &hdev->discov_update);
4732 hci_dev_unlock(hdev);
4736 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4737 void *data, u16 len)
4739 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4743 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4744 void *data, u16 len)
4746 return start_discovery_internal(sk, hdev,
4747 MGMT_OP_START_LIMITED_DISCOVERY,
4751 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4754 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4758 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4759 void *data, u16 len)
4761 struct mgmt_cp_start_service_discovery *cp = data;
4762 struct mgmt_pending_cmd *cmd;
4763 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4764 u16 uuid_count, expected_len;
4768 bt_dev_dbg(hdev, "sock %p", sk);
4772 if (!hdev_is_powered(hdev)) {
4773 err = mgmt_cmd_complete(sk, hdev->id,
4774 MGMT_OP_START_SERVICE_DISCOVERY,
4775 MGMT_STATUS_NOT_POWERED,
4776 &cp->type, sizeof(cp->type));
4780 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4781 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4782 err = mgmt_cmd_complete(sk, hdev->id,
4783 MGMT_OP_START_SERVICE_DISCOVERY,
4784 MGMT_STATUS_BUSY, &cp->type,
4789 uuid_count = __le16_to_cpu(cp->uuid_count);
4790 if (uuid_count > max_uuid_count) {
4791 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4793 err = mgmt_cmd_complete(sk, hdev->id,
4794 MGMT_OP_START_SERVICE_DISCOVERY,
4795 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4800 expected_len = sizeof(*cp) + uuid_count * 16;
4801 if (expected_len != len) {
4802 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4804 err = mgmt_cmd_complete(sk, hdev->id,
4805 MGMT_OP_START_SERVICE_DISCOVERY,
4806 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4811 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4812 err = mgmt_cmd_complete(sk, hdev->id,
4813 MGMT_OP_START_SERVICE_DISCOVERY,
4814 status, &cp->type, sizeof(cp->type));
4818 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4825 cmd->cmd_complete = service_discovery_cmd_complete;
4827 /* Clear the discovery filter first to free any previously
4828 * allocated memory for the UUID list.
4830 hci_discovery_filter_clear(hdev);
4832 hdev->discovery.result_filtering = true;
4833 hdev->discovery.type = cp->type;
4834 hdev->discovery.rssi = cp->rssi;
4835 hdev->discovery.uuid_count = uuid_count;
4837 if (uuid_count > 0) {
4838 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4840 if (!hdev->discovery.uuids) {
4841 err = mgmt_cmd_complete(sk, hdev->id,
4842 MGMT_OP_START_SERVICE_DISCOVERY,
4844 &cp->type, sizeof(cp->type));
4845 mgmt_pending_remove(cmd);
4850 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4851 queue_work(hdev->req_workqueue, &hdev->discov_update);
4855 hci_dev_unlock(hdev);
4859 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4861 struct mgmt_pending_cmd *cmd;
4863 bt_dev_dbg(hdev, "status %d", status);
4867 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4869 cmd->cmd_complete(cmd, mgmt_status(status));
4870 mgmt_pending_remove(cmd);
4873 hci_dev_unlock(hdev);
4875 /* Handle suspend notifier */
4876 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4877 bt_dev_dbg(hdev, "Paused discovery");
4878 wake_up(&hdev->suspend_wait_q);
4882 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4885 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4886 struct mgmt_pending_cmd *cmd;
4889 bt_dev_dbg(hdev, "sock %p", sk);
4893 if (!hci_discovery_active(hdev)) {
4894 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4895 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4896 sizeof(mgmt_cp->type));
4900 if (hdev->discovery.type != mgmt_cp->type) {
4901 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4902 MGMT_STATUS_INVALID_PARAMS,
4903 &mgmt_cp->type, sizeof(mgmt_cp->type));
4907 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4913 cmd->cmd_complete = generic_cmd_complete;
4915 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4916 queue_work(hdev->req_workqueue, &hdev->discov_update);
4920 hci_dev_unlock(hdev);
4924 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4927 struct mgmt_cp_confirm_name *cp = data;
4928 struct inquiry_entry *e;
4931 bt_dev_dbg(hdev, "sock %p", sk);
4935 if (!hci_discovery_active(hdev)) {
4936 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4937 MGMT_STATUS_FAILED, &cp->addr,
4942 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4944 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4945 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4950 if (cp->name_known) {
4951 e->name_state = NAME_KNOWN;
4954 e->name_state = NAME_NEEDED;
4955 hci_inquiry_cache_update_resolve(hdev, e);
4958 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4959 &cp->addr, sizeof(cp->addr));
4962 hci_dev_unlock(hdev);
4966 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4969 struct mgmt_cp_block_device *cp = data;
4973 bt_dev_dbg(hdev, "sock %p", sk);
4975 if (!bdaddr_type_is_valid(cp->addr.type))
4976 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4977 MGMT_STATUS_INVALID_PARAMS,
4978 &cp->addr, sizeof(cp->addr));
4982 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
4985 status = MGMT_STATUS_FAILED;
4989 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4991 status = MGMT_STATUS_SUCCESS;
4994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4995 &cp->addr, sizeof(cp->addr));
4997 hci_dev_unlock(hdev);
5002 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5005 struct mgmt_cp_unblock_device *cp = data;
5009 bt_dev_dbg(hdev, "sock %p", sk);
5011 if (!bdaddr_type_is_valid(cp->addr.type))
5012 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5013 MGMT_STATUS_INVALID_PARAMS,
5014 &cp->addr, sizeof(cp->addr));
5018 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5021 status = MGMT_STATUS_INVALID_PARAMS;
5025 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5027 status = MGMT_STATUS_SUCCESS;
5030 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5031 &cp->addr, sizeof(cp->addr));
5033 hci_dev_unlock(hdev);
5038 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5041 struct mgmt_cp_set_device_id *cp = data;
5042 struct hci_request req;
5046 bt_dev_dbg(hdev, "sock %p", sk);
5048 source = __le16_to_cpu(cp->source);
5050 if (source > 0x0002)
5051 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5052 MGMT_STATUS_INVALID_PARAMS);
5056 hdev->devid_source = source;
5057 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5058 hdev->devid_product = __le16_to_cpu(cp->product);
5059 hdev->devid_version = __le16_to_cpu(cp->version);
5061 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5064 hci_req_init(&req, hdev);
5065 __hci_req_update_eir(&req);
5066 hci_req_run(&req, NULL);
5068 hci_dev_unlock(hdev);
5073 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5076 bt_dev_dbg(hdev, "status %d", status);
5079 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5082 struct cmd_lookup match = { NULL, hdev };
5083 struct hci_request req;
5085 struct adv_info *adv_instance;
5091 u8 mgmt_err = mgmt_status(status);
5093 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5094 cmd_status_rsp, &mgmt_err);
5098 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5099 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5101 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5103 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5106 new_settings(hdev, match.sk);
5111 /* Handle suspend notifier */
5112 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5113 hdev->suspend_tasks)) {
5114 bt_dev_dbg(hdev, "Paused advertising");
5115 wake_up(&hdev->suspend_wait_q);
5116 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5117 hdev->suspend_tasks)) {
5118 bt_dev_dbg(hdev, "Unpaused advertising");
5119 wake_up(&hdev->suspend_wait_q);
5122 /* If "Set Advertising" was just disabled and instance advertising was
5123 * set up earlier, then re-enable multi-instance advertising.
5125 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5126 list_empty(&hdev->adv_instances))
5129 instance = hdev->cur_adv_instance;
5131 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5132 struct adv_info, list);
5136 instance = adv_instance->instance;
5139 hci_req_init(&req, hdev);
5141 err = __hci_req_schedule_adv_instance(&req, instance, true);
5144 err = hci_req_run(&req, enable_advertising_instance);
5147 bt_dev_err(hdev, "failed to re-configure advertising");
5150 hci_dev_unlock(hdev);
5153 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5156 struct mgmt_mode *cp = data;
5157 struct mgmt_pending_cmd *cmd;
5158 struct hci_request req;
5162 bt_dev_dbg(hdev, "sock %p", sk);
5164 status = mgmt_le_support(hdev);
5166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5169 /* Enabling the experimental LL Privay support disables support for
5172 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5173 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5174 MGMT_STATUS_NOT_SUPPORTED);
5176 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5178 MGMT_STATUS_INVALID_PARAMS);
5180 if (hdev->advertising_paused)
5181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5188 /* The following conditions are ones which mean that we should
5189 * not do any HCI communication but directly send a mgmt
5190 * response to user space (after toggling the flag if
5193 if (!hdev_is_powered(hdev) ||
5194 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5195 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5196 hci_conn_num(hdev, LE_LINK) > 0 ||
5197 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5198 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5202 hdev->cur_adv_instance = 0x00;
5203 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5204 if (cp->val == 0x02)
5205 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5207 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5209 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5210 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5213 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5218 err = new_settings(hdev, sk);
5223 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5224 pending_find(MGMT_OP_SET_LE, hdev)) {
5225 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5236 hci_req_init(&req, hdev);
5238 if (cp->val == 0x02)
5239 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5241 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5243 cancel_adv_timeout(hdev);
5246 /* Switch to instance "0" for the Set Advertising setting.
5247 * We cannot use update_[adv|scan_rsp]_data() here as the
5248 * HCI_ADVERTISING flag is not yet set.
5250 hdev->cur_adv_instance = 0x00;
5252 if (ext_adv_capable(hdev)) {
5253 __hci_req_start_ext_adv(&req, 0x00);
5255 __hci_req_update_adv_data(&req, 0x00);
5256 __hci_req_update_scan_rsp_data(&req, 0x00);
5257 __hci_req_enable_advertising(&req);
5260 __hci_req_disable_advertising(&req);
5263 err = hci_req_run(&req, set_advertising_complete);
5265 mgmt_pending_remove(cmd);
5268 hci_dev_unlock(hdev);
5272 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5273 void *data, u16 len)
5275 struct mgmt_cp_set_static_address *cp = data;
5278 bt_dev_dbg(hdev, "sock %p", sk);
5280 if (!lmp_le_capable(hdev))
5281 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5282 MGMT_STATUS_NOT_SUPPORTED);
5284 if (hdev_is_powered(hdev))
5285 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5286 MGMT_STATUS_REJECTED);
5288 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5289 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5290 return mgmt_cmd_status(sk, hdev->id,
5291 MGMT_OP_SET_STATIC_ADDRESS,
5292 MGMT_STATUS_INVALID_PARAMS);
5294 /* Two most significant bits shall be set */
5295 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5296 return mgmt_cmd_status(sk, hdev->id,
5297 MGMT_OP_SET_STATIC_ADDRESS,
5298 MGMT_STATUS_INVALID_PARAMS);
5303 bacpy(&hdev->static_addr, &cp->bdaddr);
5305 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5309 err = new_settings(hdev, sk);
5312 hci_dev_unlock(hdev);
5316 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5317 void *data, u16 len)
5319 struct mgmt_cp_set_scan_params *cp = data;
5320 __u16 interval, window;
5323 bt_dev_dbg(hdev, "sock %p", sk);
5325 if (!lmp_le_capable(hdev))
5326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5327 MGMT_STATUS_NOT_SUPPORTED);
5329 interval = __le16_to_cpu(cp->interval);
5331 if (interval < 0x0004 || interval > 0x4000)
5332 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5333 MGMT_STATUS_INVALID_PARAMS);
5335 window = __le16_to_cpu(cp->window);
5337 if (window < 0x0004 || window > 0x4000)
5338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5339 MGMT_STATUS_INVALID_PARAMS);
5341 if (window > interval)
5342 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5343 MGMT_STATUS_INVALID_PARAMS);
5347 hdev->le_scan_interval = interval;
5348 hdev->le_scan_window = window;
5350 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5353 /* If background scan is running, restart it so new parameters are
5356 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5357 hdev->discovery.state == DISCOVERY_STOPPED) {
5358 struct hci_request req;
5360 hci_req_init(&req, hdev);
5362 hci_req_add_le_scan_disable(&req, false);
5363 hci_req_add_le_passive_scan(&req);
5365 hci_req_run(&req, NULL);
5368 hci_dev_unlock(hdev);
5373 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5376 struct mgmt_pending_cmd *cmd;
5378 bt_dev_dbg(hdev, "status 0x%02x", status);
5382 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5387 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5388 mgmt_status(status));
5390 struct mgmt_mode *cp = cmd->param;
5393 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5395 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5397 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5398 new_settings(hdev, cmd->sk);
5401 mgmt_pending_remove(cmd);
5404 hci_dev_unlock(hdev);
5407 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5408 void *data, u16 len)
5410 struct mgmt_mode *cp = data;
5411 struct mgmt_pending_cmd *cmd;
5412 struct hci_request req;
5415 bt_dev_dbg(hdev, "sock %p", sk);
5417 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5418 hdev->hci_ver < BLUETOOTH_VER_1_2)
5419 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5420 MGMT_STATUS_NOT_SUPPORTED);
5422 if (cp->val != 0x00 && cp->val != 0x01)
5423 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5424 MGMT_STATUS_INVALID_PARAMS);
5428 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5429 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5434 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5435 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5440 if (!hdev_is_powered(hdev)) {
5441 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5442 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5444 new_settings(hdev, sk);
5448 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5455 hci_req_init(&req, hdev);
5457 __hci_req_write_fast_connectable(&req, cp->val);
5459 err = hci_req_run(&req, fast_connectable_complete);
5461 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5462 MGMT_STATUS_FAILED);
5463 mgmt_pending_remove(cmd);
5467 hci_dev_unlock(hdev);
5472 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5474 struct mgmt_pending_cmd *cmd;
5476 bt_dev_dbg(hdev, "status 0x%02x", status);
5480 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5485 u8 mgmt_err = mgmt_status(status);
5487 /* We need to restore the flag if related HCI commands
5490 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5492 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5494 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5495 new_settings(hdev, cmd->sk);
5498 mgmt_pending_remove(cmd);
5501 hci_dev_unlock(hdev);
5504 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5506 struct mgmt_mode *cp = data;
5507 struct mgmt_pending_cmd *cmd;
5508 struct hci_request req;
5511 bt_dev_dbg(hdev, "sock %p", sk);
5513 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5514 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5515 MGMT_STATUS_NOT_SUPPORTED);
5517 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5518 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5519 MGMT_STATUS_REJECTED);
5521 if (cp->val != 0x00 && cp->val != 0x01)
5522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5523 MGMT_STATUS_INVALID_PARAMS);
5527 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5528 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5532 if (!hdev_is_powered(hdev)) {
5534 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5535 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5536 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5537 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5538 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5541 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5543 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5547 err = new_settings(hdev, sk);
5551 /* Reject disabling when powered on */
5553 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5554 MGMT_STATUS_REJECTED);
5557 /* When configuring a dual-mode controller to operate
5558 * with LE only and using a static address, then switching
5559 * BR/EDR back on is not allowed.
5561 * Dual-mode controllers shall operate with the public
5562 * address as its identity address for BR/EDR and LE. So
5563 * reject the attempt to create an invalid configuration.
5565 * The same restrictions applies when secure connections
5566 * has been enabled. For BR/EDR this is a controller feature
5567 * while for LE it is a host stack feature. This means that
5568 * switching BR/EDR back on when secure connections has been
5569 * enabled is not a supported transaction.
5571 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5572 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5573 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5574 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5575 MGMT_STATUS_REJECTED);
5580 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5581 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5586 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5592 /* We need to flip the bit already here so that
5593 * hci_req_update_adv_data generates the correct flags.
5595 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5597 hci_req_init(&req, hdev);
5599 __hci_req_write_fast_connectable(&req, false);
5600 __hci_req_update_scan(&req);
5602 /* Since only the advertising data flags will change, there
5603 * is no need to update the scan response data.
5605 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5607 err = hci_req_run(&req, set_bredr_complete);
5609 mgmt_pending_remove(cmd);
5612 hci_dev_unlock(hdev);
5616 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5618 struct mgmt_pending_cmd *cmd;
5619 struct mgmt_mode *cp;
5621 bt_dev_dbg(hdev, "status %u", status);
5625 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5630 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5631 mgmt_status(status));
5639 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5640 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5643 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5644 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5647 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5648 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5652 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5653 new_settings(hdev, cmd->sk);
5656 mgmt_pending_remove(cmd);
5658 hci_dev_unlock(hdev);
5661 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5662 void *data, u16 len)
5664 struct mgmt_mode *cp = data;
5665 struct mgmt_pending_cmd *cmd;
5666 struct hci_request req;
5670 bt_dev_dbg(hdev, "sock %p", sk);
5672 if (!lmp_sc_capable(hdev) &&
5673 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5675 MGMT_STATUS_NOT_SUPPORTED);
5677 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5678 lmp_sc_capable(hdev) &&
5679 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5680 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5681 MGMT_STATUS_REJECTED);
5683 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5685 MGMT_STATUS_INVALID_PARAMS);
5689 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5690 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5694 changed = !hci_dev_test_and_set_flag(hdev,
5696 if (cp->val == 0x02)
5697 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5699 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5701 changed = hci_dev_test_and_clear_flag(hdev,
5703 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5706 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5711 err = new_settings(hdev, sk);
5716 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5717 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5724 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5725 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5726 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5730 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5736 hci_req_init(&req, hdev);
5737 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5738 err = hci_req_run(&req, sc_enable_complete);
5740 mgmt_pending_remove(cmd);
5745 hci_dev_unlock(hdev);
5749 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5750 void *data, u16 len)
5752 struct mgmt_mode *cp = data;
5753 bool changed, use_changed;
5756 bt_dev_dbg(hdev, "sock %p", sk);
5758 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5759 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5760 MGMT_STATUS_INVALID_PARAMS);
5765 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5767 changed = hci_dev_test_and_clear_flag(hdev,
5768 HCI_KEEP_DEBUG_KEYS);
5770 if (cp->val == 0x02)
5771 use_changed = !hci_dev_test_and_set_flag(hdev,
5772 HCI_USE_DEBUG_KEYS);
5774 use_changed = hci_dev_test_and_clear_flag(hdev,
5775 HCI_USE_DEBUG_KEYS);
5777 if (hdev_is_powered(hdev) && use_changed &&
5778 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5779 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5780 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5781 sizeof(mode), &mode);
5784 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5789 err = new_settings(hdev, sk);
5792 hci_dev_unlock(hdev);
5796 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5799 struct mgmt_cp_set_privacy *cp = cp_data;
5803 bt_dev_dbg(hdev, "sock %p", sk);
5805 if (!lmp_le_capable(hdev))
5806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5807 MGMT_STATUS_NOT_SUPPORTED);
5809 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5810 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5811 MGMT_STATUS_INVALID_PARAMS);
5813 if (hdev_is_powered(hdev))
5814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5815 MGMT_STATUS_REJECTED);
5819 /* If user space supports this command it is also expected to
5820 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5822 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5825 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5826 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5827 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5828 hci_adv_instances_set_rpa_expired(hdev, true);
5829 if (cp->privacy == 0x02)
5830 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5832 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5834 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5835 memset(hdev->irk, 0, sizeof(hdev->irk));
5836 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5837 hci_adv_instances_set_rpa_expired(hdev, false);
5838 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5841 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5846 err = new_settings(hdev, sk);
5849 hci_dev_unlock(hdev);
5853 static bool irk_is_valid(struct mgmt_irk_info *irk)
5855 switch (irk->addr.type) {
5856 case BDADDR_LE_PUBLIC:
5859 case BDADDR_LE_RANDOM:
5860 /* Two most significant bits shall be set */
5861 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5869 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5872 struct mgmt_cp_load_irks *cp = cp_data;
5873 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5874 sizeof(struct mgmt_irk_info));
5875 u16 irk_count, expected_len;
5878 bt_dev_dbg(hdev, "sock %p", sk);
5880 if (!lmp_le_capable(hdev))
5881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5882 MGMT_STATUS_NOT_SUPPORTED);
5884 irk_count = __le16_to_cpu(cp->irk_count);
5885 if (irk_count > max_irk_count) {
5886 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5889 MGMT_STATUS_INVALID_PARAMS);
5892 expected_len = struct_size(cp, irks, irk_count);
5893 if (expected_len != len) {
5894 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5896 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5897 MGMT_STATUS_INVALID_PARAMS);
5900 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5902 for (i = 0; i < irk_count; i++) {
5903 struct mgmt_irk_info *key = &cp->irks[i];
5905 if (!irk_is_valid(key))
5906 return mgmt_cmd_status(sk, hdev->id,
5908 MGMT_STATUS_INVALID_PARAMS);
5913 hci_smp_irks_clear(hdev);
5915 for (i = 0; i < irk_count; i++) {
5916 struct mgmt_irk_info *irk = &cp->irks[i];
5918 if (hci_is_blocked_key(hdev,
5919 HCI_BLOCKED_KEY_TYPE_IRK,
5921 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5926 hci_add_irk(hdev, &irk->addr.bdaddr,
5927 le_addr_type(irk->addr.type), irk->val,
5931 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5933 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5935 hci_dev_unlock(hdev);
5940 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5942 if (key->master != 0x00 && key->master != 0x01)
5945 switch (key->addr.type) {
5946 case BDADDR_LE_PUBLIC:
5949 case BDADDR_LE_RANDOM:
5950 /* Two most significant bits shall be set */
5951 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5959 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5960 void *cp_data, u16 len)
5962 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5963 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5964 sizeof(struct mgmt_ltk_info));
5965 u16 key_count, expected_len;
5968 bt_dev_dbg(hdev, "sock %p", sk);
5970 if (!lmp_le_capable(hdev))
5971 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5972 MGMT_STATUS_NOT_SUPPORTED);
5974 key_count = __le16_to_cpu(cp->key_count);
5975 if (key_count > max_key_count) {
5976 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5979 MGMT_STATUS_INVALID_PARAMS);
5982 expected_len = struct_size(cp, keys, key_count);
5983 if (expected_len != len) {
5984 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5987 MGMT_STATUS_INVALID_PARAMS);
5990 bt_dev_dbg(hdev, "key_count %u", key_count);
5992 for (i = 0; i < key_count; i++) {
5993 struct mgmt_ltk_info *key = &cp->keys[i];
5995 if (!ltk_is_valid(key))
5996 return mgmt_cmd_status(sk, hdev->id,
5997 MGMT_OP_LOAD_LONG_TERM_KEYS,
5998 MGMT_STATUS_INVALID_PARAMS);
6003 hci_smp_ltks_clear(hdev);
6005 for (i = 0; i < key_count; i++) {
6006 struct mgmt_ltk_info *key = &cp->keys[i];
6007 u8 type, authenticated;
6009 if (hci_is_blocked_key(hdev,
6010 HCI_BLOCKED_KEY_TYPE_LTK,
6012 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6017 switch (key->type) {
6018 case MGMT_LTK_UNAUTHENTICATED:
6019 authenticated = 0x00;
6020 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6022 case MGMT_LTK_AUTHENTICATED:
6023 authenticated = 0x01;
6024 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6026 case MGMT_LTK_P256_UNAUTH:
6027 authenticated = 0x00;
6028 type = SMP_LTK_P256;
6030 case MGMT_LTK_P256_AUTH:
6031 authenticated = 0x01;
6032 type = SMP_LTK_P256;
6034 case MGMT_LTK_P256_DEBUG:
6035 authenticated = 0x00;
6036 type = SMP_LTK_P256_DEBUG;
6042 hci_add_ltk(hdev, &key->addr.bdaddr,
6043 le_addr_type(key->addr.type), type, authenticated,
6044 key->val, key->enc_size, key->ediv, key->rand);
6047 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6050 hci_dev_unlock(hdev);
6055 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6057 struct hci_conn *conn = cmd->user_data;
6058 struct mgmt_rp_get_conn_info rp;
6061 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6063 if (status == MGMT_STATUS_SUCCESS) {
6064 rp.rssi = conn->rssi;
6065 rp.tx_power = conn->tx_power;
6066 rp.max_tx_power = conn->max_tx_power;
6068 rp.rssi = HCI_RSSI_INVALID;
6069 rp.tx_power = HCI_TX_POWER_INVALID;
6070 rp.max_tx_power = HCI_TX_POWER_INVALID;
6073 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6074 status, &rp, sizeof(rp));
6076 hci_conn_drop(conn);
6082 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6085 struct hci_cp_read_rssi *cp;
6086 struct mgmt_pending_cmd *cmd;
6087 struct hci_conn *conn;
6091 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6095 /* Commands sent in request are either Read RSSI or Read Transmit Power
6096 * Level so we check which one was last sent to retrieve connection
6097 * handle. Both commands have handle as first parameter so it's safe to
6098 * cast data on the same command struct.
6100 * First command sent is always Read RSSI and we fail only if it fails.
6101 * In other case we simply override error to indicate success as we
6102 * already remembered if TX power value is actually valid.
6104 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6106 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6107 status = MGMT_STATUS_SUCCESS;
6109 status = mgmt_status(hci_status);
6113 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6117 handle = __le16_to_cpu(cp->handle);
6118 conn = hci_conn_hash_lookup_handle(hdev, handle);
6120 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6125 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6129 cmd->cmd_complete(cmd, status);
6130 mgmt_pending_remove(cmd);
6133 hci_dev_unlock(hdev);
6136 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6139 struct mgmt_cp_get_conn_info *cp = data;
6140 struct mgmt_rp_get_conn_info rp;
6141 struct hci_conn *conn;
6142 unsigned long conn_info_age;
6145 bt_dev_dbg(hdev, "sock %p", sk);
6147 memset(&rp, 0, sizeof(rp));
6148 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6149 rp.addr.type = cp->addr.type;
6151 if (!bdaddr_type_is_valid(cp->addr.type))
6152 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6153 MGMT_STATUS_INVALID_PARAMS,
6158 if (!hdev_is_powered(hdev)) {
6159 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6160 MGMT_STATUS_NOT_POWERED, &rp,
6165 if (cp->addr.type == BDADDR_BREDR)
6166 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6169 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6171 if (!conn || conn->state != BT_CONNECTED) {
6172 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6173 MGMT_STATUS_NOT_CONNECTED, &rp,
6178 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6179 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6180 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6184 /* To avoid client trying to guess when to poll again for information we
6185 * calculate conn info age as random value between min/max set in hdev.
6187 conn_info_age = hdev->conn_info_min_age +
6188 prandom_u32_max(hdev->conn_info_max_age -
6189 hdev->conn_info_min_age);
6191 /* Query controller to refresh cached values if they are too old or were
6194 if (time_after(jiffies, conn->conn_info_timestamp +
6195 msecs_to_jiffies(conn_info_age)) ||
6196 !conn->conn_info_timestamp) {
6197 struct hci_request req;
6198 struct hci_cp_read_tx_power req_txp_cp;
6199 struct hci_cp_read_rssi req_rssi_cp;
6200 struct mgmt_pending_cmd *cmd;
6202 hci_req_init(&req, hdev);
6203 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6204 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6207 /* For LE links TX power does not change thus we don't need to
6208 * query for it once value is known.
6210 if (!bdaddr_type_is_le(cp->addr.type) ||
6211 conn->tx_power == HCI_TX_POWER_INVALID) {
6212 req_txp_cp.handle = cpu_to_le16(conn->handle);
6213 req_txp_cp.type = 0x00;
6214 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6215 sizeof(req_txp_cp), &req_txp_cp);
6218 /* Max TX power needs to be read only once per connection */
6219 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6220 req_txp_cp.handle = cpu_to_le16(conn->handle);
6221 req_txp_cp.type = 0x01;
6222 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6223 sizeof(req_txp_cp), &req_txp_cp);
6226 err = hci_req_run(&req, conn_info_refresh_complete);
6230 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6237 hci_conn_hold(conn);
6238 cmd->user_data = hci_conn_get(conn);
6239 cmd->cmd_complete = conn_info_cmd_complete;
6241 conn->conn_info_timestamp = jiffies;
6243 /* Cache is valid, just reply with values cached in hci_conn */
6244 rp.rssi = conn->rssi;
6245 rp.tx_power = conn->tx_power;
6246 rp.max_tx_power = conn->max_tx_power;
6248 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6249 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6253 hci_dev_unlock(hdev);
6257 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6259 struct hci_conn *conn = cmd->user_data;
6260 struct mgmt_rp_get_clock_info rp;
6261 struct hci_dev *hdev;
6264 memset(&rp, 0, sizeof(rp));
6265 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6270 hdev = hci_dev_get(cmd->index);
6272 rp.local_clock = cpu_to_le32(hdev->clock);
6277 rp.piconet_clock = cpu_to_le32(conn->clock);
6278 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6282 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6286 hci_conn_drop(conn);
6293 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6295 struct hci_cp_read_clock *hci_cp;
6296 struct mgmt_pending_cmd *cmd;
6297 struct hci_conn *conn;
6299 bt_dev_dbg(hdev, "status %u", status);
6303 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6307 if (hci_cp->which) {
6308 u16 handle = __le16_to_cpu(hci_cp->handle);
6309 conn = hci_conn_hash_lookup_handle(hdev, handle);
6314 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6318 cmd->cmd_complete(cmd, mgmt_status(status));
6319 mgmt_pending_remove(cmd);
6322 hci_dev_unlock(hdev);
6325 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6328 struct mgmt_cp_get_clock_info *cp = data;
6329 struct mgmt_rp_get_clock_info rp;
6330 struct hci_cp_read_clock hci_cp;
6331 struct mgmt_pending_cmd *cmd;
6332 struct hci_request req;
6333 struct hci_conn *conn;
6336 bt_dev_dbg(hdev, "sock %p", sk);
6338 memset(&rp, 0, sizeof(rp));
6339 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6340 rp.addr.type = cp->addr.type;
6342 if (cp->addr.type != BDADDR_BREDR)
6343 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6344 MGMT_STATUS_INVALID_PARAMS,
6349 if (!hdev_is_powered(hdev)) {
6350 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6351 MGMT_STATUS_NOT_POWERED, &rp,
6356 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6357 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6359 if (!conn || conn->state != BT_CONNECTED) {
6360 err = mgmt_cmd_complete(sk, hdev->id,
6361 MGMT_OP_GET_CLOCK_INFO,
6362 MGMT_STATUS_NOT_CONNECTED,
6370 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6376 cmd->cmd_complete = clock_info_cmd_complete;
6378 hci_req_init(&req, hdev);
6380 memset(&hci_cp, 0, sizeof(hci_cp));
6381 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6384 hci_conn_hold(conn);
6385 cmd->user_data = hci_conn_get(conn);
6387 hci_cp.handle = cpu_to_le16(conn->handle);
6388 hci_cp.which = 0x01; /* Piconet clock */
6389 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6392 err = hci_req_run(&req, get_clock_info_complete);
6394 mgmt_pending_remove(cmd);
6397 hci_dev_unlock(hdev);
6401 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6403 struct hci_conn *conn;
6405 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6409 if (conn->dst_type != type)
6412 if (conn->state != BT_CONNECTED)
6418 /* This function requires the caller holds hdev->lock */
6419 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6420 u8 addr_type, u8 auto_connect)
6422 struct hci_conn_params *params;
6424 params = hci_conn_params_add(hdev, addr, addr_type);
6428 if (params->auto_connect == auto_connect)
6431 list_del_init(¶ms->action);
6433 switch (auto_connect) {
6434 case HCI_AUTO_CONN_DISABLED:
6435 case HCI_AUTO_CONN_LINK_LOSS:
6436 /* If auto connect is being disabled when we're trying to
6437 * connect to device, keep connecting.
6439 if (params->explicit_connect)
6440 list_add(¶ms->action, &hdev->pend_le_conns);
6442 case HCI_AUTO_CONN_REPORT:
6443 if (params->explicit_connect)
6444 list_add(¶ms->action, &hdev->pend_le_conns);
6446 list_add(¶ms->action, &hdev->pend_le_reports);
6448 case HCI_AUTO_CONN_DIRECT:
6449 case HCI_AUTO_CONN_ALWAYS:
6450 if (!is_connected(hdev, addr, addr_type))
6451 list_add(¶ms->action, &hdev->pend_le_conns);
6455 params->auto_connect = auto_connect;
6457 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6458 addr, addr_type, auto_connect);
6463 static void device_added(struct sock *sk, struct hci_dev *hdev,
6464 bdaddr_t *bdaddr, u8 type, u8 action)
6466 struct mgmt_ev_device_added ev;
6468 bacpy(&ev.addr.bdaddr, bdaddr);
6469 ev.addr.type = type;
6472 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6475 static int add_device(struct sock *sk, struct hci_dev *hdev,
6476 void *data, u16 len)
6478 struct mgmt_cp_add_device *cp = data;
6479 u8 auto_conn, addr_type;
6480 struct hci_conn_params *params;
6482 u32 current_flags = 0;
6484 bt_dev_dbg(hdev, "sock %p", sk);
6486 if (!bdaddr_type_is_valid(cp->addr.type) ||
6487 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6488 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6489 MGMT_STATUS_INVALID_PARAMS,
6490 &cp->addr, sizeof(cp->addr));
6492 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6493 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6494 MGMT_STATUS_INVALID_PARAMS,
6495 &cp->addr, sizeof(cp->addr));
6499 if (cp->addr.type == BDADDR_BREDR) {
6500 /* Only incoming connections action is supported for now */
6501 if (cp->action != 0x01) {
6502 err = mgmt_cmd_complete(sk, hdev->id,
6504 MGMT_STATUS_INVALID_PARAMS,
6505 &cp->addr, sizeof(cp->addr));
6509 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6515 hci_req_update_scan(hdev);
6520 addr_type = le_addr_type(cp->addr.type);
6522 if (cp->action == 0x02)
6523 auto_conn = HCI_AUTO_CONN_ALWAYS;
6524 else if (cp->action == 0x01)
6525 auto_conn = HCI_AUTO_CONN_DIRECT;
6527 auto_conn = HCI_AUTO_CONN_REPORT;
6529 /* Kernel internally uses conn_params with resolvable private
6530 * address, but Add Device allows only identity addresses.
6531 * Make sure it is enforced before calling
6532 * hci_conn_params_lookup.
6534 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6535 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6536 MGMT_STATUS_INVALID_PARAMS,
6537 &cp->addr, sizeof(cp->addr));
6541 /* If the connection parameters don't exist for this device,
6542 * they will be created and configured with defaults.
6544 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6546 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6547 MGMT_STATUS_FAILED, &cp->addr,
6551 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6554 current_flags = params->current_flags;
6557 hci_update_background_scan(hdev);
6560 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6561 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6562 SUPPORTED_DEVICE_FLAGS(), current_flags);
6564 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6565 MGMT_STATUS_SUCCESS, &cp->addr,
6569 hci_dev_unlock(hdev);
6573 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6574 bdaddr_t *bdaddr, u8 type)
6576 struct mgmt_ev_device_removed ev;
6578 bacpy(&ev.addr.bdaddr, bdaddr);
6579 ev.addr.type = type;
6581 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6584 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6585 void *data, u16 len)
6587 struct mgmt_cp_remove_device *cp = data;
6590 bt_dev_dbg(hdev, "sock %p", sk);
6594 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6595 struct hci_conn_params *params;
6598 if (!bdaddr_type_is_valid(cp->addr.type)) {
6599 err = mgmt_cmd_complete(sk, hdev->id,
6600 MGMT_OP_REMOVE_DEVICE,
6601 MGMT_STATUS_INVALID_PARAMS,
6602 &cp->addr, sizeof(cp->addr));
6606 if (cp->addr.type == BDADDR_BREDR) {
6607 err = hci_bdaddr_list_del(&hdev->accept_list,
6611 err = mgmt_cmd_complete(sk, hdev->id,
6612 MGMT_OP_REMOVE_DEVICE,
6613 MGMT_STATUS_INVALID_PARAMS,
6619 hci_req_update_scan(hdev);
6621 device_removed(sk, hdev, &cp->addr.bdaddr,
6626 addr_type = le_addr_type(cp->addr.type);
6628 /* Kernel internally uses conn_params with resolvable private
6629 * address, but Remove Device allows only identity addresses.
6630 * Make sure it is enforced before calling
6631 * hci_conn_params_lookup.
6633 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6634 err = mgmt_cmd_complete(sk, hdev->id,
6635 MGMT_OP_REMOVE_DEVICE,
6636 MGMT_STATUS_INVALID_PARAMS,
6637 &cp->addr, sizeof(cp->addr));
6641 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6644 err = mgmt_cmd_complete(sk, hdev->id,
6645 MGMT_OP_REMOVE_DEVICE,
6646 MGMT_STATUS_INVALID_PARAMS,
6647 &cp->addr, sizeof(cp->addr));
6651 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6652 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6653 err = mgmt_cmd_complete(sk, hdev->id,
6654 MGMT_OP_REMOVE_DEVICE,
6655 MGMT_STATUS_INVALID_PARAMS,
6656 &cp->addr, sizeof(cp->addr));
6660 list_del(¶ms->action);
6661 list_del(¶ms->list);
6663 hci_update_background_scan(hdev);
6665 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6667 struct hci_conn_params *p, *tmp;
6668 struct bdaddr_list *b, *btmp;
6670 if (cp->addr.type) {
6671 err = mgmt_cmd_complete(sk, hdev->id,
6672 MGMT_OP_REMOVE_DEVICE,
6673 MGMT_STATUS_INVALID_PARAMS,
6674 &cp->addr, sizeof(cp->addr));
6678 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
6679 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6684 hci_req_update_scan(hdev);
6686 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6687 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6689 device_removed(sk, hdev, &p->addr, p->addr_type);
6690 if (p->explicit_connect) {
6691 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6694 list_del(&p->action);
6699 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6701 hci_update_background_scan(hdev);
6705 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6706 MGMT_STATUS_SUCCESS, &cp->addr,
6709 hci_dev_unlock(hdev);
6713 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6716 struct mgmt_cp_load_conn_param *cp = data;
6717 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6718 sizeof(struct mgmt_conn_param));
6719 u16 param_count, expected_len;
6722 if (!lmp_le_capable(hdev))
6723 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6724 MGMT_STATUS_NOT_SUPPORTED);
6726 param_count = __le16_to_cpu(cp->param_count);
6727 if (param_count > max_param_count) {
6728 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6730 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6731 MGMT_STATUS_INVALID_PARAMS);
6734 expected_len = struct_size(cp, params, param_count);
6735 if (expected_len != len) {
6736 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6739 MGMT_STATUS_INVALID_PARAMS);
6742 bt_dev_dbg(hdev, "param_count %u", param_count);
6746 hci_conn_params_clear_disabled(hdev);
6748 for (i = 0; i < param_count; i++) {
6749 struct mgmt_conn_param *param = &cp->params[i];
6750 struct hci_conn_params *hci_param;
6751 u16 min, max, latency, timeout;
6754 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
6757 if (param->addr.type == BDADDR_LE_PUBLIC) {
6758 addr_type = ADDR_LE_DEV_PUBLIC;
6759 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6760 addr_type = ADDR_LE_DEV_RANDOM;
6762 bt_dev_err(hdev, "ignoring invalid connection parameters");
6766 min = le16_to_cpu(param->min_interval);
6767 max = le16_to_cpu(param->max_interval);
6768 latency = le16_to_cpu(param->latency);
6769 timeout = le16_to_cpu(param->timeout);
6771 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6772 min, max, latency, timeout);
6774 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6775 bt_dev_err(hdev, "ignoring invalid connection parameters");
6779 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6782 bt_dev_err(hdev, "failed to add connection parameters");
6786 hci_param->conn_min_interval = min;
6787 hci_param->conn_max_interval = max;
6788 hci_param->conn_latency = latency;
6789 hci_param->supervision_timeout = timeout;
6792 hci_dev_unlock(hdev);
6794 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6798 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6799 void *data, u16 len)
6801 struct mgmt_cp_set_external_config *cp = data;
6805 bt_dev_dbg(hdev, "sock %p", sk);
6807 if (hdev_is_powered(hdev))
6808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6809 MGMT_STATUS_REJECTED);
6811 if (cp->config != 0x00 && cp->config != 0x01)
6812 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6813 MGMT_STATUS_INVALID_PARAMS);
6815 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6817 MGMT_STATUS_NOT_SUPPORTED);
6822 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6824 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6826 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6833 err = new_options(hdev, sk);
6835 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6836 mgmt_index_removed(hdev);
6838 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6839 hci_dev_set_flag(hdev, HCI_CONFIG);
6840 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6842 queue_work(hdev->req_workqueue, &hdev->power_on);
6844 set_bit(HCI_RAW, &hdev->flags);
6845 mgmt_index_added(hdev);
6850 hci_dev_unlock(hdev);
6854 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6855 void *data, u16 len)
6857 struct mgmt_cp_set_public_address *cp = data;
6861 bt_dev_dbg(hdev, "sock %p", sk);
6863 if (hdev_is_powered(hdev))
6864 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6865 MGMT_STATUS_REJECTED);
6867 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6869 MGMT_STATUS_INVALID_PARAMS);
6871 if (!hdev->set_bdaddr)
6872 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6873 MGMT_STATUS_NOT_SUPPORTED);
6877 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6878 bacpy(&hdev->public_addr, &cp->bdaddr);
6880 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6887 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6888 err = new_options(hdev, sk);
6890 if (is_configured(hdev)) {
6891 mgmt_index_removed(hdev);
6893 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6895 hci_dev_set_flag(hdev, HCI_CONFIG);
6896 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6898 queue_work(hdev->req_workqueue, &hdev->power_on);
6902 hci_dev_unlock(hdev);
6906 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6907 u16 opcode, struct sk_buff *skb)
6909 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6910 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6911 u8 *h192, *r192, *h256, *r256;
6912 struct mgmt_pending_cmd *cmd;
6916 bt_dev_dbg(hdev, "status %u", status);
6918 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6922 mgmt_cp = cmd->param;
6925 status = mgmt_status(status);
6932 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6933 struct hci_rp_read_local_oob_data *rp;
6935 if (skb->len != sizeof(*rp)) {
6936 status = MGMT_STATUS_FAILED;
6939 status = MGMT_STATUS_SUCCESS;
6940 rp = (void *)skb->data;
6942 eir_len = 5 + 18 + 18;
6949 struct hci_rp_read_local_oob_ext_data *rp;
6951 if (skb->len != sizeof(*rp)) {
6952 status = MGMT_STATUS_FAILED;
6955 status = MGMT_STATUS_SUCCESS;
6956 rp = (void *)skb->data;
6958 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6959 eir_len = 5 + 18 + 18;
6963 eir_len = 5 + 18 + 18 + 18 + 18;
6973 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6980 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6981 hdev->dev_class, 3);
6984 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6985 EIR_SSP_HASH_C192, h192, 16);
6986 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6987 EIR_SSP_RAND_R192, r192, 16);
6991 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6992 EIR_SSP_HASH_C256, h256, 16);
6993 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6994 EIR_SSP_RAND_R256, r256, 16);
6998 mgmt_rp->type = mgmt_cp->type;
6999 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7001 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7002 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7003 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7004 if (err < 0 || status)
7007 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7009 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7010 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7011 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7014 mgmt_pending_remove(cmd);
7017 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7018 struct mgmt_cp_read_local_oob_ext_data *cp)
7020 struct mgmt_pending_cmd *cmd;
7021 struct hci_request req;
7024 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7029 hci_req_init(&req, hdev);
7031 if (bredr_sc_enabled(hdev))
7032 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7034 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7036 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7038 mgmt_pending_remove(cmd);
7045 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7046 void *data, u16 data_len)
7048 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7049 struct mgmt_rp_read_local_oob_ext_data *rp;
7052 u8 status, flags, role, addr[7], hash[16], rand[16];
7055 bt_dev_dbg(hdev, "sock %p", sk);
7057 if (hdev_is_powered(hdev)) {
7059 case BIT(BDADDR_BREDR):
7060 status = mgmt_bredr_support(hdev);
7066 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7067 status = mgmt_le_support(hdev);
7071 eir_len = 9 + 3 + 18 + 18 + 3;
7074 status = MGMT_STATUS_INVALID_PARAMS;
7079 status = MGMT_STATUS_NOT_POWERED;
7083 rp_len = sizeof(*rp) + eir_len;
7084 rp = kmalloc(rp_len, GFP_ATOMIC);
7095 case BIT(BDADDR_BREDR):
7096 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7097 err = read_local_ssp_oob_req(hdev, sk, cp);
7098 hci_dev_unlock(hdev);
7102 status = MGMT_STATUS_FAILED;
7105 eir_len = eir_append_data(rp->eir, eir_len,
7107 hdev->dev_class, 3);
7110 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7111 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7112 smp_generate_oob(hdev, hash, rand) < 0) {
7113 hci_dev_unlock(hdev);
7114 status = MGMT_STATUS_FAILED;
7118 /* This should return the active RPA, but since the RPA
7119 * is only programmed on demand, it is really hard to fill
7120 * this in at the moment. For now disallow retrieving
7121 * local out-of-band data when privacy is in use.
7123 * Returning the identity address will not help here since
7124 * pairing happens before the identity resolving key is
7125 * known and thus the connection establishment happens
7126 * based on the RPA and not the identity address.
7128 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7129 hci_dev_unlock(hdev);
7130 status = MGMT_STATUS_REJECTED;
7134 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7135 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7136 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7137 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7138 memcpy(addr, &hdev->static_addr, 6);
7141 memcpy(addr, &hdev->bdaddr, 6);
7145 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7146 addr, sizeof(addr));
7148 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7153 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7154 &role, sizeof(role));
7156 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7157 eir_len = eir_append_data(rp->eir, eir_len,
7159 hash, sizeof(hash));
7161 eir_len = eir_append_data(rp->eir, eir_len,
7163 rand, sizeof(rand));
7166 flags = mgmt_get_adv_discov_flags(hdev);
7168 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7169 flags |= LE_AD_NO_BREDR;
7171 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7172 &flags, sizeof(flags));
7176 hci_dev_unlock(hdev);
7178 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7180 status = MGMT_STATUS_SUCCESS;
7183 rp->type = cp->type;
7184 rp->eir_len = cpu_to_le16(eir_len);
7186 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7187 status, rp, sizeof(*rp) + eir_len);
7188 if (err < 0 || status)
7191 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7192 rp, sizeof(*rp) + eir_len,
7193 HCI_MGMT_OOB_DATA_EVENTS, sk);
7201 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7205 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7206 flags |= MGMT_ADV_FLAG_DISCOV;
7207 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7208 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7209 flags |= MGMT_ADV_FLAG_APPEARANCE;
7210 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7212 /* In extended adv TX_POWER returned from Set Adv Param
7213 * will be always valid.
7215 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7216 ext_adv_capable(hdev))
7217 flags |= MGMT_ADV_FLAG_TX_POWER;
7219 if (ext_adv_capable(hdev)) {
7220 flags |= MGMT_ADV_FLAG_SEC_1M;
7221 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7222 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7224 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7225 flags |= MGMT_ADV_FLAG_SEC_2M;
7227 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7228 flags |= MGMT_ADV_FLAG_SEC_CODED;
7234 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7235 void *data, u16 data_len)
7237 struct mgmt_rp_read_adv_features *rp;
7240 struct adv_info *adv_instance;
7241 u32 supported_flags;
7244 bt_dev_dbg(hdev, "sock %p", sk);
7246 if (!lmp_le_capable(hdev))
7247 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7248 MGMT_STATUS_REJECTED);
7250 /* Enabling the experimental LL Privay support disables support for
7253 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7254 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7255 MGMT_STATUS_NOT_SUPPORTED);
7259 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7260 rp = kmalloc(rp_len, GFP_ATOMIC);
7262 hci_dev_unlock(hdev);
7266 supported_flags = get_supported_adv_flags(hdev);
7268 rp->supported_flags = cpu_to_le32(supported_flags);
7269 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7270 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7271 rp->max_instances = hdev->le_num_of_adv_sets;
7272 rp->num_instances = hdev->adv_instance_cnt;
7274 instance = rp->instance;
7275 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7276 *instance = adv_instance->instance;
7280 hci_dev_unlock(hdev);
7282 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7283 MGMT_STATUS_SUCCESS, rp, rp_len);
7290 static u8 calculate_name_len(struct hci_dev *hdev)
7292 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7294 return append_local_name(hdev, buf, 0);
7297 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7300 u8 max_len = HCI_MAX_AD_LENGTH;
7303 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7304 MGMT_ADV_FLAG_LIMITED_DISCOV |
7305 MGMT_ADV_FLAG_MANAGED_FLAGS))
7308 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7311 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7312 max_len -= calculate_name_len(hdev);
7314 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7321 static bool flags_managed(u32 adv_flags)
7323 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7324 MGMT_ADV_FLAG_LIMITED_DISCOV |
7325 MGMT_ADV_FLAG_MANAGED_FLAGS);
7328 static bool tx_power_managed(u32 adv_flags)
7330 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7333 static bool name_managed(u32 adv_flags)
7335 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7338 static bool appearance_managed(u32 adv_flags)
7340 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7343 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7344 u8 len, bool is_adv_data)
7349 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7354 /* Make sure that the data is correctly formatted. */
7355 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7361 if (data[i + 1] == EIR_FLAGS &&
7362 (!is_adv_data || flags_managed(adv_flags)))
7365 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7368 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7371 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7374 if (data[i + 1] == EIR_APPEARANCE &&
7375 appearance_managed(adv_flags))
7378 /* If the current field length would exceed the total data
7379 * length, then it's invalid.
7381 if (i + cur_len >= len)
7388 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7391 struct mgmt_pending_cmd *cmd;
7392 struct mgmt_cp_add_advertising *cp;
7393 struct mgmt_rp_add_advertising rp;
7394 struct adv_info *adv_instance, *n;
7397 bt_dev_dbg(hdev, "status %d", status);
7401 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7403 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7404 if (!adv_instance->pending)
7408 adv_instance->pending = false;
7412 instance = adv_instance->instance;
7414 if (hdev->cur_adv_instance == instance)
7415 cancel_adv_timeout(hdev);
7417 hci_remove_adv_instance(hdev, instance);
7418 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7425 rp.instance = cp->instance;
7428 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7429 mgmt_status(status));
7431 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7432 mgmt_status(status), &rp, sizeof(rp));
7434 mgmt_pending_remove(cmd);
7437 hci_dev_unlock(hdev);
7440 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7441 void *data, u16 data_len)
7443 struct mgmt_cp_add_advertising *cp = data;
7444 struct mgmt_rp_add_advertising rp;
7446 u32 supported_flags, phy_flags;
7448 u16 timeout, duration;
7449 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7450 u8 schedule_instance = 0;
7451 struct adv_info *next_instance;
7453 struct mgmt_pending_cmd *cmd;
7454 struct hci_request req;
7456 bt_dev_dbg(hdev, "sock %p", sk);
7458 status = mgmt_le_support(hdev);
7460 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7463 /* Enabling the experimental LL Privay support disables support for
7466 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7468 MGMT_STATUS_NOT_SUPPORTED);
7470 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7472 MGMT_STATUS_INVALID_PARAMS);
7474 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7476 MGMT_STATUS_INVALID_PARAMS);
7478 flags = __le32_to_cpu(cp->flags);
7479 timeout = __le16_to_cpu(cp->timeout);
7480 duration = __le16_to_cpu(cp->duration);
7482 /* The current implementation only supports a subset of the specified
7483 * flags. Also need to check mutual exclusiveness of sec flags.
7485 supported_flags = get_supported_adv_flags(hdev);
7486 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7487 if (flags & ~supported_flags ||
7488 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7489 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7490 MGMT_STATUS_INVALID_PARAMS);
7494 if (timeout && !hdev_is_powered(hdev)) {
7495 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7496 MGMT_STATUS_REJECTED);
7500 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7501 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7502 pending_find(MGMT_OP_SET_LE, hdev)) {
7503 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7508 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7509 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7510 cp->scan_rsp_len, false)) {
7511 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7512 MGMT_STATUS_INVALID_PARAMS);
7516 err = hci_add_adv_instance(hdev, cp->instance, flags,
7517 cp->adv_data_len, cp->data,
7519 cp->data + cp->adv_data_len,
7522 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7523 MGMT_STATUS_FAILED);
7527 /* Only trigger an advertising added event if a new instance was
7530 if (hdev->adv_instance_cnt > prev_instance_cnt)
7531 mgmt_advertising_added(sk, hdev, cp->instance);
7533 if (hdev->cur_adv_instance == cp->instance) {
7534 /* If the currently advertised instance is being changed then
7535 * cancel the current advertising and schedule the next
7536 * instance. If there is only one instance then the overridden
7537 * advertising data will be visible right away.
7539 cancel_adv_timeout(hdev);
7541 next_instance = hci_get_next_instance(hdev, cp->instance);
7543 schedule_instance = next_instance->instance;
7544 } else if (!hdev->adv_instance_timeout) {
7545 /* Immediately advertise the new instance if no other
7546 * instance is currently being advertised.
7548 schedule_instance = cp->instance;
7551 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7552 * there is no instance to be advertised then we have no HCI
7553 * communication to make. Simply return.
7555 if (!hdev_is_powered(hdev) ||
7556 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7557 !schedule_instance) {
7558 rp.instance = cp->instance;
7559 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7560 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7564 /* We're good to go, update advertising data, parameters, and start
7567 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7574 hci_req_init(&req, hdev);
7576 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7579 err = hci_req_run(&req, add_advertising_complete);
7582 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7583 MGMT_STATUS_FAILED);
7584 mgmt_pending_remove(cmd);
7588 hci_dev_unlock(hdev);
7593 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7596 struct mgmt_pending_cmd *cmd;
7597 struct mgmt_cp_remove_advertising *cp;
7598 struct mgmt_rp_remove_advertising rp;
7600 bt_dev_dbg(hdev, "status %d", status);
7604 /* A failure status here only means that we failed to disable
7605 * advertising. Otherwise, the advertising instance has been removed,
7606 * so report success.
7608 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7613 rp.instance = cp->instance;
7615 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7617 mgmt_pending_remove(cmd);
7620 hci_dev_unlock(hdev);
7623 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7624 void *data, u16 data_len)
7626 struct mgmt_cp_remove_advertising *cp = data;
7627 struct mgmt_rp_remove_advertising rp;
7628 struct mgmt_pending_cmd *cmd;
7629 struct hci_request req;
7632 bt_dev_dbg(hdev, "sock %p", sk);
7634 /* Enabling the experimental LL Privay support disables support for
7637 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7639 MGMT_STATUS_NOT_SUPPORTED);
7643 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7644 err = mgmt_cmd_status(sk, hdev->id,
7645 MGMT_OP_REMOVE_ADVERTISING,
7646 MGMT_STATUS_INVALID_PARAMS);
7650 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7651 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7652 pending_find(MGMT_OP_SET_LE, hdev)) {
7653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7658 if (list_empty(&hdev->adv_instances)) {
7659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7660 MGMT_STATUS_INVALID_PARAMS);
7664 hci_req_init(&req, hdev);
7666 /* If we use extended advertising, instance is disabled and removed */
7667 if (ext_adv_capable(hdev)) {
7668 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7669 __hci_req_remove_ext_adv_instance(&req, cp->instance);
7672 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7674 if (list_empty(&hdev->adv_instances))
7675 __hci_req_disable_advertising(&req);
7677 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7678 * flag is set or the device isn't powered then we have no HCI
7679 * communication to make. Simply return.
7681 if (skb_queue_empty(&req.cmd_q) ||
7682 !hdev_is_powered(hdev) ||
7683 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7684 hci_req_purge(&req);
7685 rp.instance = cp->instance;
7686 err = mgmt_cmd_complete(sk, hdev->id,
7687 MGMT_OP_REMOVE_ADVERTISING,
7688 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7692 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7699 err = hci_req_run(&req, remove_advertising_complete);
7701 mgmt_pending_remove(cmd);
7704 hci_dev_unlock(hdev);
7709 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7710 void *data, u16 data_len)
7712 struct mgmt_cp_get_adv_size_info *cp = data;
7713 struct mgmt_rp_get_adv_size_info rp;
7714 u32 flags, supported_flags;
7717 bt_dev_dbg(hdev, "sock %p", sk);
7719 if (!lmp_le_capable(hdev))
7720 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7721 MGMT_STATUS_REJECTED);
7723 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7724 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7725 MGMT_STATUS_INVALID_PARAMS);
7727 flags = __le32_to_cpu(cp->flags);
7729 /* The current implementation only supports a subset of the specified
7732 supported_flags = get_supported_adv_flags(hdev);
7733 if (flags & ~supported_flags)
7734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7735 MGMT_STATUS_INVALID_PARAMS);
7737 rp.instance = cp->instance;
7738 rp.flags = cp->flags;
7739 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7740 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7742 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7743 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7748 static const struct hci_mgmt_handler mgmt_handlers[] = {
7749 { NULL }, /* 0x0000 (no command) */
7750 { read_version, MGMT_READ_VERSION_SIZE,
7752 HCI_MGMT_UNTRUSTED },
7753 { read_commands, MGMT_READ_COMMANDS_SIZE,
7755 HCI_MGMT_UNTRUSTED },
7756 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7758 HCI_MGMT_UNTRUSTED },
7759 { read_controller_info, MGMT_READ_INFO_SIZE,
7760 HCI_MGMT_UNTRUSTED },
7761 { set_powered, MGMT_SETTING_SIZE },
7762 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7763 { set_connectable, MGMT_SETTING_SIZE },
7764 { set_fast_connectable, MGMT_SETTING_SIZE },
7765 { set_bondable, MGMT_SETTING_SIZE },
7766 { set_link_security, MGMT_SETTING_SIZE },
7767 { set_ssp, MGMT_SETTING_SIZE },
7768 { set_hs, MGMT_SETTING_SIZE },
7769 { set_le, MGMT_SETTING_SIZE },
7770 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7771 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7772 { add_uuid, MGMT_ADD_UUID_SIZE },
7773 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7774 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7776 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7778 { disconnect, MGMT_DISCONNECT_SIZE },
7779 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7780 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7781 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7782 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7783 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7784 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7785 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7786 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7787 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7788 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7789 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7790 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7791 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7793 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7794 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7795 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7796 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7797 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7798 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7799 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7800 { set_advertising, MGMT_SETTING_SIZE },
7801 { set_bredr, MGMT_SETTING_SIZE },
7802 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7803 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7804 { set_secure_conn, MGMT_SETTING_SIZE },
7805 { set_debug_keys, MGMT_SETTING_SIZE },
7806 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7807 { load_irks, MGMT_LOAD_IRKS_SIZE,
7809 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7810 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7811 { add_device, MGMT_ADD_DEVICE_SIZE },
7812 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7813 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7815 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7817 HCI_MGMT_UNTRUSTED },
7818 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7819 HCI_MGMT_UNCONFIGURED |
7820 HCI_MGMT_UNTRUSTED },
7821 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7822 HCI_MGMT_UNCONFIGURED },
7823 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7824 HCI_MGMT_UNCONFIGURED },
7825 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7827 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7828 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7830 HCI_MGMT_UNTRUSTED },
7831 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7832 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7834 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7835 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
7836 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7837 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7838 HCI_MGMT_UNTRUSTED },
7839 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
7840 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
7841 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
7842 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7844 { set_wideband_speech, MGMT_SETTING_SIZE },
7845 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
7846 HCI_MGMT_UNTRUSTED },
7847 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
7848 HCI_MGMT_UNTRUSTED |
7849 HCI_MGMT_HDEV_OPTIONAL },
7850 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
7852 HCI_MGMT_HDEV_OPTIONAL },
7853 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7854 HCI_MGMT_UNTRUSTED },
7855 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7857 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7858 HCI_MGMT_UNTRUSTED },
7859 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7861 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
7862 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
7863 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7864 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7866 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
7869 void mgmt_index_added(struct hci_dev *hdev)
7871 struct mgmt_ev_ext_index ev;
7873 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7876 switch (hdev->dev_type) {
7878 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7879 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7880 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7883 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7884 HCI_MGMT_INDEX_EVENTS);
7897 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7898 HCI_MGMT_EXT_INDEX_EVENTS);
7901 void mgmt_index_removed(struct hci_dev *hdev)
7903 struct mgmt_ev_ext_index ev;
7904 u8 status = MGMT_STATUS_INVALID_INDEX;
7906 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7909 switch (hdev->dev_type) {
7911 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7913 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7914 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7915 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7918 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7919 HCI_MGMT_INDEX_EVENTS);
7932 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7933 HCI_MGMT_EXT_INDEX_EVENTS);
7936 /* This function requires the caller holds hdev->lock */
7937 static void restart_le_actions(struct hci_dev *hdev)
7939 struct hci_conn_params *p;
7941 list_for_each_entry(p, &hdev->le_conn_params, list) {
7942 /* Needed for AUTO_OFF case where might not "really"
7943 * have been powered off.
7945 list_del_init(&p->action);
7947 switch (p->auto_connect) {
7948 case HCI_AUTO_CONN_DIRECT:
7949 case HCI_AUTO_CONN_ALWAYS:
7950 list_add(&p->action, &hdev->pend_le_conns);
7952 case HCI_AUTO_CONN_REPORT:
7953 list_add(&p->action, &hdev->pend_le_reports);
7961 void mgmt_power_on(struct hci_dev *hdev, int err)
7963 struct cmd_lookup match = { NULL, hdev };
7965 bt_dev_dbg(hdev, "err %d", err);
7970 restart_le_actions(hdev);
7971 hci_update_background_scan(hdev);
7974 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7976 new_settings(hdev, match.sk);
7981 hci_dev_unlock(hdev);
7984 void __mgmt_power_off(struct hci_dev *hdev)
7986 struct cmd_lookup match = { NULL, hdev };
7987 u8 status, zero_cod[] = { 0, 0, 0 };
7989 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7991 /* If the power off is because of hdev unregistration let
7992 * use the appropriate INVALID_INDEX status. Otherwise use
7993 * NOT_POWERED. We cover both scenarios here since later in
7994 * mgmt_index_removed() any hci_conn callbacks will have already
7995 * been triggered, potentially causing misleading DISCONNECTED
7998 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7999 status = MGMT_STATUS_INVALID_INDEX;
8001 status = MGMT_STATUS_NOT_POWERED;
8003 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8005 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8006 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8007 zero_cod, sizeof(zero_cod),
8008 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8009 ext_info_changed(hdev, NULL);
8012 new_settings(hdev, match.sk);
8018 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8020 struct mgmt_pending_cmd *cmd;
8023 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8027 if (err == -ERFKILL)
8028 status = MGMT_STATUS_RFKILLED;
8030 status = MGMT_STATUS_FAILED;
8032 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8034 mgmt_pending_remove(cmd);
8037 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8040 struct mgmt_ev_new_link_key ev;
8042 memset(&ev, 0, sizeof(ev));
8044 ev.store_hint = persistent;
8045 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8046 ev.key.addr.type = BDADDR_BREDR;
8047 ev.key.type = key->type;
8048 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8049 ev.key.pin_len = key->pin_len;
8051 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8054 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8056 switch (ltk->type) {
8059 if (ltk->authenticated)
8060 return MGMT_LTK_AUTHENTICATED;
8061 return MGMT_LTK_UNAUTHENTICATED;
8063 if (ltk->authenticated)
8064 return MGMT_LTK_P256_AUTH;
8065 return MGMT_LTK_P256_UNAUTH;
8066 case SMP_LTK_P256_DEBUG:
8067 return MGMT_LTK_P256_DEBUG;
8070 return MGMT_LTK_UNAUTHENTICATED;
8073 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8075 struct mgmt_ev_new_long_term_key ev;
8077 memset(&ev, 0, sizeof(ev));
8079 /* Devices using resolvable or non-resolvable random addresses
8080 * without providing an identity resolving key don't require
8081 * to store long term keys. Their addresses will change the
8084 * Only when a remote device provides an identity address
8085 * make sure the long term key is stored. If the remote
8086 * identity is known, the long term keys are internally
8087 * mapped to the identity address. So allow static random
8088 * and public addresses here.
8090 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8091 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8092 ev.store_hint = 0x00;
8094 ev.store_hint = persistent;
8096 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8097 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8098 ev.key.type = mgmt_ltk_type(key);
8099 ev.key.enc_size = key->enc_size;
8100 ev.key.ediv = key->ediv;
8101 ev.key.rand = key->rand;
8103 if (key->type == SMP_LTK)
8106 /* Make sure we copy only the significant bytes based on the
8107 * encryption key size, and set the rest of the value to zeroes.
8109 memcpy(ev.key.val, key->val, key->enc_size);
8110 memset(ev.key.val + key->enc_size, 0,
8111 sizeof(ev.key.val) - key->enc_size);
8113 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8116 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8118 struct mgmt_ev_new_irk ev;
8120 memset(&ev, 0, sizeof(ev));
8122 ev.store_hint = persistent;
8124 bacpy(&ev.rpa, &irk->rpa);
8125 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8126 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8127 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8129 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8132 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8135 struct mgmt_ev_new_csrk ev;
8137 memset(&ev, 0, sizeof(ev));
8139 /* Devices using resolvable or non-resolvable random addresses
8140 * without providing an identity resolving key don't require
8141 * to store signature resolving keys. Their addresses will change
8142 * the next time around.
8144 * Only when a remote device provides an identity address
8145 * make sure the signature resolving key is stored. So allow
8146 * static random and public addresses here.
8148 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8149 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8150 ev.store_hint = 0x00;
8152 ev.store_hint = persistent;
8154 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8155 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8156 ev.key.type = csrk->type;
8157 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8159 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8162 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8163 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8164 u16 max_interval, u16 latency, u16 timeout)
8166 struct mgmt_ev_new_conn_param ev;
8168 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8171 memset(&ev, 0, sizeof(ev));
8172 bacpy(&ev.addr.bdaddr, bdaddr);
8173 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8174 ev.store_hint = store_hint;
8175 ev.min_interval = cpu_to_le16(min_interval);
8176 ev.max_interval = cpu_to_le16(max_interval);
8177 ev.latency = cpu_to_le16(latency);
8178 ev.timeout = cpu_to_le16(timeout);
8180 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8183 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8184 u32 flags, u8 *name, u8 name_len)
8187 struct mgmt_ev_device_connected *ev = (void *) buf;
8190 bacpy(&ev->addr.bdaddr, &conn->dst);
8191 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8193 ev->flags = __cpu_to_le32(flags);
8195 /* We must ensure that the EIR Data fields are ordered and
8196 * unique. Keep it simple for now and avoid the problem by not
8197 * adding any BR/EDR data to the LE adv.
8199 if (conn->le_adv_data_len > 0) {
8200 memcpy(&ev->eir[eir_len],
8201 conn->le_adv_data, conn->le_adv_data_len);
8202 eir_len = conn->le_adv_data_len;
8205 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8208 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8209 eir_len = eir_append_data(ev->eir, eir_len,
8211 conn->dev_class, 3);
8214 ev->eir_len = cpu_to_le16(eir_len);
8216 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8217 sizeof(*ev) + eir_len, NULL);
8220 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8222 struct sock **sk = data;
8224 cmd->cmd_complete(cmd, 0);
8229 mgmt_pending_remove(cmd);
8232 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8234 struct hci_dev *hdev = data;
8235 struct mgmt_cp_unpair_device *cp = cmd->param;
8237 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8239 cmd->cmd_complete(cmd, 0);
8240 mgmt_pending_remove(cmd);
8243 bool mgmt_powering_down(struct hci_dev *hdev)
8245 struct mgmt_pending_cmd *cmd;
8246 struct mgmt_mode *cp;
8248 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8259 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8260 u8 link_type, u8 addr_type, u8 reason,
8261 bool mgmt_connected)
8263 struct mgmt_ev_device_disconnected ev;
8264 struct sock *sk = NULL;
8266 /* The connection is still in hci_conn_hash so test for 1
8267 * instead of 0 to know if this is the last one.
8269 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8270 cancel_delayed_work(&hdev->power_off);
8271 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8274 if (!mgmt_connected)
8277 if (link_type != ACL_LINK && link_type != LE_LINK)
8280 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8282 bacpy(&ev.addr.bdaddr, bdaddr);
8283 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8286 /* Report disconnects due to suspend */
8287 if (hdev->suspended)
8288 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8290 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8295 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8299 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8300 u8 link_type, u8 addr_type, u8 status)
8302 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8303 struct mgmt_cp_disconnect *cp;
8304 struct mgmt_pending_cmd *cmd;
8306 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8309 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8315 if (bacmp(bdaddr, &cp->addr.bdaddr))
8318 if (cp->addr.type != bdaddr_type)
8321 cmd->cmd_complete(cmd, mgmt_status(status));
8322 mgmt_pending_remove(cmd);
8325 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8326 u8 addr_type, u8 status)
8328 struct mgmt_ev_connect_failed ev;
8330 /* The connection is still in hci_conn_hash so test for 1
8331 * instead of 0 to know if this is the last one.
8333 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8334 cancel_delayed_work(&hdev->power_off);
8335 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8338 bacpy(&ev.addr.bdaddr, bdaddr);
8339 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8340 ev.status = mgmt_status(status);
8342 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8345 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8347 struct mgmt_ev_pin_code_request ev;
8349 bacpy(&ev.addr.bdaddr, bdaddr);
8350 ev.addr.type = BDADDR_BREDR;
8353 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8356 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8359 struct mgmt_pending_cmd *cmd;
8361 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8365 cmd->cmd_complete(cmd, mgmt_status(status));
8366 mgmt_pending_remove(cmd);
8369 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8372 struct mgmt_pending_cmd *cmd;
8374 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8378 cmd->cmd_complete(cmd, mgmt_status(status));
8379 mgmt_pending_remove(cmd);
8382 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8383 u8 link_type, u8 addr_type, u32 value,
8386 struct mgmt_ev_user_confirm_request ev;
8388 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8390 bacpy(&ev.addr.bdaddr, bdaddr);
8391 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8392 ev.confirm_hint = confirm_hint;
8393 ev.value = cpu_to_le32(value);
8395 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8399 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8400 u8 link_type, u8 addr_type)
8402 struct mgmt_ev_user_passkey_request ev;
8404 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8406 bacpy(&ev.addr.bdaddr, bdaddr);
8407 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8409 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8413 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8414 u8 link_type, u8 addr_type, u8 status,
8417 struct mgmt_pending_cmd *cmd;
8419 cmd = pending_find(opcode, hdev);
8423 cmd->cmd_complete(cmd, mgmt_status(status));
8424 mgmt_pending_remove(cmd);
8429 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8430 u8 link_type, u8 addr_type, u8 status)
8432 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8433 status, MGMT_OP_USER_CONFIRM_REPLY);
8436 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8437 u8 link_type, u8 addr_type, u8 status)
8439 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8441 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8444 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8445 u8 link_type, u8 addr_type, u8 status)
8447 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8448 status, MGMT_OP_USER_PASSKEY_REPLY);
8451 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8452 u8 link_type, u8 addr_type, u8 status)
8454 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8456 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8459 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8460 u8 link_type, u8 addr_type, u32 passkey,
8463 struct mgmt_ev_passkey_notify ev;
8465 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8467 bacpy(&ev.addr.bdaddr, bdaddr);
8468 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8469 ev.passkey = __cpu_to_le32(passkey);
8470 ev.entered = entered;
8472 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8475 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8477 struct mgmt_ev_auth_failed ev;
8478 struct mgmt_pending_cmd *cmd;
8479 u8 status = mgmt_status(hci_status);
8481 bacpy(&ev.addr.bdaddr, &conn->dst);
8482 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8485 cmd = find_pairing(conn);
8487 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8488 cmd ? cmd->sk : NULL);
8491 cmd->cmd_complete(cmd, status);
8492 mgmt_pending_remove(cmd);
8496 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8498 struct cmd_lookup match = { NULL, hdev };
8502 u8 mgmt_err = mgmt_status(status);
8503 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8504 cmd_status_rsp, &mgmt_err);
8508 if (test_bit(HCI_AUTH, &hdev->flags))
8509 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8511 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8513 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8517 new_settings(hdev, match.sk);
8523 static void clear_eir(struct hci_request *req)
8525 struct hci_dev *hdev = req->hdev;
8526 struct hci_cp_write_eir cp;
8528 if (!lmp_ext_inq_capable(hdev))
8531 memset(hdev->eir, 0, sizeof(hdev->eir));
8533 memset(&cp, 0, sizeof(cp));
8535 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8538 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8540 struct cmd_lookup match = { NULL, hdev };
8541 struct hci_request req;
8542 bool changed = false;
8545 u8 mgmt_err = mgmt_status(status);
8547 if (enable && hci_dev_test_and_clear_flag(hdev,
8549 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8550 new_settings(hdev, NULL);
8553 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8559 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8561 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8563 changed = hci_dev_test_and_clear_flag(hdev,
8566 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8569 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8572 new_settings(hdev, match.sk);
8577 hci_req_init(&req, hdev);
8579 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8580 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8581 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8582 sizeof(enable), &enable);
8583 __hci_req_update_eir(&req);
8588 hci_req_run(&req, NULL);
8591 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8593 struct cmd_lookup *match = data;
8595 if (match->sk == NULL) {
8596 match->sk = cmd->sk;
8597 sock_hold(match->sk);
8601 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8604 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8606 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8607 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8608 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8611 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8612 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8613 ext_info_changed(hdev, NULL);
8620 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8622 struct mgmt_cp_set_local_name ev;
8623 struct mgmt_pending_cmd *cmd;
8628 memset(&ev, 0, sizeof(ev));
8629 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8630 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8632 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8634 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8636 /* If this is a HCI command related to powering on the
8637 * HCI dev don't send any mgmt signals.
8639 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8643 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8644 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8645 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8648 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8652 for (i = 0; i < uuid_count; i++) {
8653 if (!memcmp(uuid, uuids[i], 16))
8660 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8664 while (parsed < eir_len) {
8665 u8 field_len = eir[0];
8672 if (eir_len - parsed < field_len + 1)
8676 case EIR_UUID16_ALL:
8677 case EIR_UUID16_SOME:
8678 for (i = 0; i + 3 <= field_len; i += 2) {
8679 memcpy(uuid, bluetooth_base_uuid, 16);
8680 uuid[13] = eir[i + 3];
8681 uuid[12] = eir[i + 2];
8682 if (has_uuid(uuid, uuid_count, uuids))
8686 case EIR_UUID32_ALL:
8687 case EIR_UUID32_SOME:
8688 for (i = 0; i + 5 <= field_len; i += 4) {
8689 memcpy(uuid, bluetooth_base_uuid, 16);
8690 uuid[15] = eir[i + 5];
8691 uuid[14] = eir[i + 4];
8692 uuid[13] = eir[i + 3];
8693 uuid[12] = eir[i + 2];
8694 if (has_uuid(uuid, uuid_count, uuids))
8698 case EIR_UUID128_ALL:
8699 case EIR_UUID128_SOME:
8700 for (i = 0; i + 17 <= field_len; i += 16) {
8701 memcpy(uuid, eir + i + 2, 16);
8702 if (has_uuid(uuid, uuid_count, uuids))
8708 parsed += field_len + 1;
8709 eir += field_len + 1;
8715 static void restart_le_scan(struct hci_dev *hdev)
8717 /* If controller is not scanning we are done. */
8718 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8721 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8722 hdev->discovery.scan_start +
8723 hdev->discovery.scan_duration))
8726 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8727 DISCOV_LE_RESTART_DELAY);
8730 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8731 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8733 /* If a RSSI threshold has been specified, and
8734 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8735 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8736 * is set, let it through for further processing, as we might need to
8739 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8740 * the results are also dropped.
8742 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8743 (rssi == HCI_RSSI_INVALID ||
8744 (rssi < hdev->discovery.rssi &&
8745 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8748 if (hdev->discovery.uuid_count != 0) {
8749 /* If a list of UUIDs is provided in filter, results with no
8750 * matching UUID should be dropped.
8752 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8753 hdev->discovery.uuids) &&
8754 !eir_has_uuids(scan_rsp, scan_rsp_len,
8755 hdev->discovery.uuid_count,
8756 hdev->discovery.uuids))
8760 /* If duplicate filtering does not report RSSI changes, then restart
8761 * scanning to ensure updated result with updated RSSI values.
8763 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8764 restart_le_scan(hdev);
8766 /* Validate RSSI value against the RSSI threshold once more. */
8767 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8768 rssi < hdev->discovery.rssi)
8775 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8776 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8777 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8780 struct mgmt_ev_device_found *ev = (void *)buf;
8783 /* Don't send events for a non-kernel initiated discovery. With
8784 * LE one exception is if we have pend_le_reports > 0 in which
8785 * case we're doing passive scanning and want these events.
8787 if (!hci_discovery_active(hdev)) {
8788 if (link_type == ACL_LINK)
8790 if (link_type == LE_LINK &&
8791 list_empty(&hdev->pend_le_reports) &&
8792 !hci_is_adv_monitoring(hdev)) {
8797 if (hdev->discovery.result_filtering) {
8798 /* We are using service discovery */
8799 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8804 if (hdev->discovery.limited) {
8805 /* Check for limited discoverable bit */
8807 if (!(dev_class[1] & 0x20))
8810 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8811 if (!flags || !(flags[0] & LE_AD_LIMITED))
8816 /* Make sure that the buffer is big enough. The 5 extra bytes
8817 * are for the potential CoD field.
8819 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8822 memset(buf, 0, sizeof(buf));
8824 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8825 * RSSI value was reported as 0 when not available. This behavior
8826 * is kept when using device discovery. This is required for full
8827 * backwards compatibility with the API.
8829 * However when using service discovery, the value 127 will be
8830 * returned when the RSSI is not available.
8832 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8833 link_type == ACL_LINK)
8836 bacpy(&ev->addr.bdaddr, bdaddr);
8837 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8839 ev->flags = cpu_to_le32(flags);
8842 /* Copy EIR or advertising data into event */
8843 memcpy(ev->eir, eir, eir_len);
8845 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8847 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8850 if (scan_rsp_len > 0)
8851 /* Append scan response data to event */
8852 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8854 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8855 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8857 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8860 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8861 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8863 struct mgmt_ev_device_found *ev;
8864 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8867 ev = (struct mgmt_ev_device_found *) buf;
8869 memset(buf, 0, sizeof(buf));
8871 bacpy(&ev->addr.bdaddr, bdaddr);
8872 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8875 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8878 ev->eir_len = cpu_to_le16(eir_len);
8880 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8883 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8885 struct mgmt_ev_discovering ev;
8887 bt_dev_dbg(hdev, "discovering %u", discovering);
8889 memset(&ev, 0, sizeof(ev));
8890 ev.type = hdev->discovery.type;
8891 ev.discovering = discovering;
8893 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8896 void mgmt_suspending(struct hci_dev *hdev, u8 state)
8898 struct mgmt_ev_controller_suspend ev;
8900 ev.suspend_state = state;
8901 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
8904 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
8907 struct mgmt_ev_controller_resume ev;
8909 ev.wake_reason = reason;
8911 bacpy(&ev.addr.bdaddr, bdaddr);
8912 ev.addr.type = addr_type;
8914 memset(&ev.addr, 0, sizeof(ev.addr));
8917 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
8920 static struct hci_mgmt_chan chan = {
8921 .channel = HCI_CHANNEL_CONTROL,
8922 .handler_count = ARRAY_SIZE(mgmt_handlers),
8923 .handlers = mgmt_handlers,
8924 .hdev_init = mgmt_init_hdev,
8929 return hci_mgmt_chan_register(&chan);
8932 void mgmt_exit(void)
8934 hci_mgmt_chan_unregister(&chan);