2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 21
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_LINK_SECURITY,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_GET_PHY_CONFIGURATION,
112 MGMT_OP_SET_PHY_CONFIGURATION,
113 MGMT_OP_SET_BLOCKED_KEYS,
114 MGMT_OP_SET_WIDEBAND_SPEECH,
115 MGMT_OP_READ_CONTROLLER_CAP,
116 MGMT_OP_READ_EXP_FEATURES_INFO,
117 MGMT_OP_SET_EXP_FEATURE,
118 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
119 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
120 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
121 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
122 MGMT_OP_GET_DEVICE_FLAGS,
123 MGMT_OP_SET_DEVICE_FLAGS,
124 MGMT_OP_READ_ADV_MONITOR_FEATURES,
125 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
126 MGMT_OP_REMOVE_ADV_MONITOR,
127 MGMT_OP_ADD_EXT_ADV_PARAMS,
128 MGMT_OP_ADD_EXT_ADV_DATA,
129 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 static const u16 mgmt_events[] = {
133 MGMT_EV_CONTROLLER_ERROR,
135 MGMT_EV_INDEX_REMOVED,
136 MGMT_EV_NEW_SETTINGS,
137 MGMT_EV_CLASS_OF_DEV_CHANGED,
138 MGMT_EV_LOCAL_NAME_CHANGED,
139 MGMT_EV_NEW_LINK_KEY,
140 MGMT_EV_NEW_LONG_TERM_KEY,
141 MGMT_EV_DEVICE_CONNECTED,
142 MGMT_EV_DEVICE_DISCONNECTED,
143 MGMT_EV_CONNECT_FAILED,
144 MGMT_EV_PIN_CODE_REQUEST,
145 MGMT_EV_USER_CONFIRM_REQUEST,
146 MGMT_EV_USER_PASSKEY_REQUEST,
148 MGMT_EV_DEVICE_FOUND,
150 MGMT_EV_DEVICE_BLOCKED,
151 MGMT_EV_DEVICE_UNBLOCKED,
152 MGMT_EV_DEVICE_UNPAIRED,
153 MGMT_EV_PASSKEY_NOTIFY,
156 MGMT_EV_DEVICE_ADDED,
157 MGMT_EV_DEVICE_REMOVED,
158 MGMT_EV_NEW_CONN_PARAM,
159 MGMT_EV_UNCONF_INDEX_ADDED,
160 MGMT_EV_UNCONF_INDEX_REMOVED,
161 MGMT_EV_NEW_CONFIG_OPTIONS,
162 MGMT_EV_EXT_INDEX_ADDED,
163 MGMT_EV_EXT_INDEX_REMOVED,
164 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
165 MGMT_EV_ADVERTISING_ADDED,
166 MGMT_EV_ADVERTISING_REMOVED,
167 MGMT_EV_EXT_INFO_CHANGED,
168 MGMT_EV_PHY_CONFIGURATION_CHANGED,
169 MGMT_EV_EXP_FEATURE_CHANGED,
170 MGMT_EV_DEVICE_FLAGS_CHANGED,
171 MGMT_EV_ADV_MONITOR_ADDED,
172 MGMT_EV_ADV_MONITOR_REMOVED,
173 MGMT_EV_CONTROLLER_SUSPEND,
174 MGMT_EV_CONTROLLER_RESUME,
177 static const u16 mgmt_untrusted_commands[] = {
178 MGMT_OP_READ_INDEX_LIST,
180 MGMT_OP_READ_UNCONF_INDEX_LIST,
181 MGMT_OP_READ_CONFIG_INFO,
182 MGMT_OP_READ_EXT_INDEX_LIST,
183 MGMT_OP_READ_EXT_INFO,
184 MGMT_OP_READ_CONTROLLER_CAP,
185 MGMT_OP_READ_EXP_FEATURES_INFO,
186 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
187 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
190 static const u16 mgmt_untrusted_events[] = {
192 MGMT_EV_INDEX_REMOVED,
193 MGMT_EV_NEW_SETTINGS,
194 MGMT_EV_CLASS_OF_DEV_CHANGED,
195 MGMT_EV_LOCAL_NAME_CHANGED,
196 MGMT_EV_UNCONF_INDEX_ADDED,
197 MGMT_EV_UNCONF_INDEX_REMOVED,
198 MGMT_EV_NEW_CONFIG_OPTIONS,
199 MGMT_EV_EXT_INDEX_ADDED,
200 MGMT_EV_EXT_INDEX_REMOVED,
201 MGMT_EV_EXT_INFO_CHANGED,
202 MGMT_EV_EXP_FEATURE_CHANGED,
205 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
207 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
208 "\x00\x00\x00\x00\x00\x00\x00\x00"
210 /* HCI to MGMT error code conversion table */
211 static const u8 mgmt_status_table[] = {
213 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
214 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
215 MGMT_STATUS_FAILED, /* Hardware Failure */
216 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
217 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
218 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
219 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
220 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
221 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
222 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
223 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
224 MGMT_STATUS_BUSY, /* Command Disallowed */
225 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
226 MGMT_STATUS_REJECTED, /* Rejected Security */
227 MGMT_STATUS_REJECTED, /* Rejected Personal */
228 MGMT_STATUS_TIMEOUT, /* Host Timeout */
229 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
230 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
231 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
232 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
233 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
234 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
235 MGMT_STATUS_BUSY, /* Repeated Attempts */
236 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
237 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
238 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
239 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
240 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
241 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
242 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
243 MGMT_STATUS_FAILED, /* Unspecified Error */
244 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
245 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
246 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
247 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
248 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
249 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
250 MGMT_STATUS_FAILED, /* Unit Link Key Used */
251 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
252 MGMT_STATUS_TIMEOUT, /* Instant Passed */
253 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
254 MGMT_STATUS_FAILED, /* Transaction Collision */
255 MGMT_STATUS_FAILED, /* Reserved for future use */
256 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
257 MGMT_STATUS_REJECTED, /* QoS Rejected */
258 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
259 MGMT_STATUS_REJECTED, /* Insufficient Security */
260 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
261 MGMT_STATUS_FAILED, /* Reserved for future use */
262 MGMT_STATUS_BUSY, /* Role Switch Pending */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_FAILED, /* Slot Violation */
265 MGMT_STATUS_FAILED, /* Role Switch Failed */
266 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
267 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
268 MGMT_STATUS_BUSY, /* Host Busy Pairing */
269 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
270 MGMT_STATUS_BUSY, /* Controller Busy */
271 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
272 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
273 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
274 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
275 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
278 static u8 mgmt_status(u8 hci_status)
280 if (hci_status < ARRAY_SIZE(mgmt_status_table))
281 return mgmt_status_table[hci_status];
283 return MGMT_STATUS_FAILED;
286 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
289 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
293 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
294 u16 len, int flag, struct sock *skip_sk)
296 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
300 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
301 struct sock *skip_sk)
303 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
304 HCI_SOCK_TRUSTED, skip_sk);
307 static u8 le_addr_type(u8 mgmt_addr_type)
309 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
310 return ADDR_LE_DEV_PUBLIC;
312 return ADDR_LE_DEV_RANDOM;
315 void mgmt_fill_version_info(void *ver)
317 struct mgmt_rp_read_version *rp = ver;
319 rp->version = MGMT_VERSION;
320 rp->revision = cpu_to_le16(MGMT_REVISION);
323 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
326 struct mgmt_rp_read_version rp;
328 bt_dev_dbg(hdev, "sock %p", sk);
330 mgmt_fill_version_info(&rp);
332 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
336 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
339 struct mgmt_rp_read_commands *rp;
340 u16 num_commands, num_events;
344 bt_dev_dbg(hdev, "sock %p", sk);
346 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
347 num_commands = ARRAY_SIZE(mgmt_commands);
348 num_events = ARRAY_SIZE(mgmt_events);
350 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
351 num_events = ARRAY_SIZE(mgmt_untrusted_events);
354 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
356 rp = kmalloc(rp_size, GFP_KERNEL);
360 rp->num_commands = cpu_to_le16(num_commands);
361 rp->num_events = cpu_to_le16(num_events);
363 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
364 __le16 *opcode = rp->opcodes;
366 for (i = 0; i < num_commands; i++, opcode++)
367 put_unaligned_le16(mgmt_commands[i], opcode);
369 for (i = 0; i < num_events; i++, opcode++)
370 put_unaligned_le16(mgmt_events[i], opcode);
372 __le16 *opcode = rp->opcodes;
374 for (i = 0; i < num_commands; i++, opcode++)
375 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
377 for (i = 0; i < num_events; i++, opcode++)
378 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
381 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
388 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
391 struct mgmt_rp_read_index_list *rp;
397 bt_dev_dbg(hdev, "sock %p", sk);
399 read_lock(&hci_dev_list_lock);
402 list_for_each_entry(d, &hci_dev_list, list) {
403 if (d->dev_type == HCI_PRIMARY &&
404 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
408 rp_len = sizeof(*rp) + (2 * count);
409 rp = kmalloc(rp_len, GFP_ATOMIC);
411 read_unlock(&hci_dev_list_lock);
416 list_for_each_entry(d, &hci_dev_list, list) {
417 if (hci_dev_test_flag(d, HCI_SETUP) ||
418 hci_dev_test_flag(d, HCI_CONFIG) ||
419 hci_dev_test_flag(d, HCI_USER_CHANNEL))
422 /* Devices marked as raw-only are neither configured
423 * nor unconfigured controllers.
425 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
428 if (d->dev_type == HCI_PRIMARY &&
429 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
430 rp->index[count++] = cpu_to_le16(d->id);
431 bt_dev_dbg(hdev, "Added hci%u", d->id);
435 rp->num_controllers = cpu_to_le16(count);
436 rp_len = sizeof(*rp) + (2 * count);
438 read_unlock(&hci_dev_list_lock);
440 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
448 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
449 void *data, u16 data_len)
451 struct mgmt_rp_read_unconf_index_list *rp;
457 bt_dev_dbg(hdev, "sock %p", sk);
459 read_lock(&hci_dev_list_lock);
462 list_for_each_entry(d, &hci_dev_list, list) {
463 if (d->dev_type == HCI_PRIMARY &&
464 hci_dev_test_flag(d, HCI_UNCONFIGURED))
468 rp_len = sizeof(*rp) + (2 * count);
469 rp = kmalloc(rp_len, GFP_ATOMIC);
471 read_unlock(&hci_dev_list_lock);
476 list_for_each_entry(d, &hci_dev_list, list) {
477 if (hci_dev_test_flag(d, HCI_SETUP) ||
478 hci_dev_test_flag(d, HCI_CONFIG) ||
479 hci_dev_test_flag(d, HCI_USER_CHANNEL))
482 /* Devices marked as raw-only are neither configured
483 * nor unconfigured controllers.
485 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
488 if (d->dev_type == HCI_PRIMARY &&
489 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
490 rp->index[count++] = cpu_to_le16(d->id);
491 bt_dev_dbg(hdev, "Added hci%u", d->id);
495 rp->num_controllers = cpu_to_le16(count);
496 rp_len = sizeof(*rp) + (2 * count);
498 read_unlock(&hci_dev_list_lock);
500 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
501 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
508 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
509 void *data, u16 data_len)
511 struct mgmt_rp_read_ext_index_list *rp;
516 bt_dev_dbg(hdev, "sock %p", sk);
518 read_lock(&hci_dev_list_lock);
521 list_for_each_entry(d, &hci_dev_list, list) {
522 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
526 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
528 read_unlock(&hci_dev_list_lock);
533 list_for_each_entry(d, &hci_dev_list, list) {
534 if (hci_dev_test_flag(d, HCI_SETUP) ||
535 hci_dev_test_flag(d, HCI_CONFIG) ||
536 hci_dev_test_flag(d, HCI_USER_CHANNEL))
539 /* Devices marked as raw-only are neither configured
540 * nor unconfigured controllers.
542 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
545 if (d->dev_type == HCI_PRIMARY) {
546 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
547 rp->entry[count].type = 0x01;
549 rp->entry[count].type = 0x00;
550 } else if (d->dev_type == HCI_AMP) {
551 rp->entry[count].type = 0x02;
556 rp->entry[count].bus = d->bus;
557 rp->entry[count++].index = cpu_to_le16(d->id);
558 bt_dev_dbg(hdev, "Added hci%u", d->id);
561 rp->num_controllers = cpu_to_le16(count);
563 read_unlock(&hci_dev_list_lock);
565 /* If this command is called at least once, then all the
566 * default index and unconfigured index events are disabled
567 * and from now on only extended index events are used.
569 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
570 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
571 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
573 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
574 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
575 struct_size(rp, entry, count));
582 static bool is_configured(struct hci_dev *hdev)
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
585 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
588 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
589 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
590 !bacmp(&hdev->public_addr, BDADDR_ANY))
596 static __le32 get_missing_options(struct hci_dev *hdev)
600 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
601 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
602 options |= MGMT_OPTION_EXTERNAL_CONFIG;
604 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
605 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
606 !bacmp(&hdev->public_addr, BDADDR_ANY))
607 options |= MGMT_OPTION_PUBLIC_ADDRESS;
609 return cpu_to_le32(options);
612 static int new_options(struct hci_dev *hdev, struct sock *skip)
614 __le32 options = get_missing_options(hdev);
616 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
617 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
620 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
622 __le32 options = get_missing_options(hdev);
624 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
628 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
629 void *data, u16 data_len)
631 struct mgmt_rp_read_config_info rp;
634 bt_dev_dbg(hdev, "sock %p", sk);
638 memset(&rp, 0, sizeof(rp));
639 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
641 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
642 options |= MGMT_OPTION_EXTERNAL_CONFIG;
644 if (hdev->set_bdaddr)
645 options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 rp.supported_options = cpu_to_le32(options);
648 rp.missing_options = get_missing_options(hdev);
650 hci_dev_unlock(hdev);
652 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
656 static u32 get_supported_phys(struct hci_dev *hdev)
658 u32 supported_phys = 0;
660 if (lmp_bredr_capable(hdev)) {
661 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
663 if (hdev->features[0][0] & LMP_3SLOT)
664 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
666 if (hdev->features[0][0] & LMP_5SLOT)
667 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
669 if (lmp_edr_2m_capable(hdev)) {
670 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
672 if (lmp_edr_3slot_capable(hdev))
673 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
675 if (lmp_edr_5slot_capable(hdev))
676 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
678 if (lmp_edr_3m_capable(hdev)) {
679 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
681 if (lmp_edr_3slot_capable(hdev))
682 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
684 if (lmp_edr_5slot_capable(hdev))
685 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
690 if (lmp_le_capable(hdev)) {
691 supported_phys |= MGMT_PHY_LE_1M_TX;
692 supported_phys |= MGMT_PHY_LE_1M_RX;
694 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
695 supported_phys |= MGMT_PHY_LE_2M_TX;
696 supported_phys |= MGMT_PHY_LE_2M_RX;
699 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
700 supported_phys |= MGMT_PHY_LE_CODED_TX;
701 supported_phys |= MGMT_PHY_LE_CODED_RX;
705 return supported_phys;
708 static u32 get_selected_phys(struct hci_dev *hdev)
710 u32 selected_phys = 0;
712 if (lmp_bredr_capable(hdev)) {
713 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
715 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
716 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
718 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
719 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
721 if (lmp_edr_2m_capable(hdev)) {
722 if (!(hdev->pkt_type & HCI_2DH1))
723 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
725 if (lmp_edr_3slot_capable(hdev) &&
726 !(hdev->pkt_type & HCI_2DH3))
727 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
729 if (lmp_edr_5slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_2DH5))
731 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
733 if (lmp_edr_3m_capable(hdev)) {
734 if (!(hdev->pkt_type & HCI_3DH1))
735 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
737 if (lmp_edr_3slot_capable(hdev) &&
738 !(hdev->pkt_type & HCI_3DH3))
739 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
741 if (lmp_edr_5slot_capable(hdev) &&
742 !(hdev->pkt_type & HCI_3DH5))
743 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
748 if (lmp_le_capable(hdev)) {
749 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
750 selected_phys |= MGMT_PHY_LE_1M_TX;
752 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
753 selected_phys |= MGMT_PHY_LE_1M_RX;
755 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
756 selected_phys |= MGMT_PHY_LE_2M_TX;
758 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
759 selected_phys |= MGMT_PHY_LE_2M_RX;
761 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
762 selected_phys |= MGMT_PHY_LE_CODED_TX;
764 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
765 selected_phys |= MGMT_PHY_LE_CODED_RX;
768 return selected_phys;
771 static u32 get_configurable_phys(struct hci_dev *hdev)
773 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
774 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
777 static u32 get_supported_settings(struct hci_dev *hdev)
781 settings |= MGMT_SETTING_POWERED;
782 settings |= MGMT_SETTING_BONDABLE;
783 settings |= MGMT_SETTING_DEBUG_KEYS;
784 settings |= MGMT_SETTING_CONNECTABLE;
785 settings |= MGMT_SETTING_DISCOVERABLE;
787 if (lmp_bredr_capable(hdev)) {
788 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
789 settings |= MGMT_SETTING_FAST_CONNECTABLE;
790 settings |= MGMT_SETTING_BREDR;
791 settings |= MGMT_SETTING_LINK_SECURITY;
793 if (lmp_ssp_capable(hdev)) {
794 settings |= MGMT_SETTING_SSP;
795 if (IS_ENABLED(CONFIG_BT_HS))
796 settings |= MGMT_SETTING_HS;
799 if (lmp_sc_capable(hdev))
800 settings |= MGMT_SETTING_SECURE_CONN;
802 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
804 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
807 if (lmp_le_capable(hdev)) {
808 settings |= MGMT_SETTING_LE;
809 settings |= MGMT_SETTING_SECURE_CONN;
810 settings |= MGMT_SETTING_PRIVACY;
811 settings |= MGMT_SETTING_STATIC_ADDRESS;
813 /* When the experimental feature for LL Privacy support is
814 * enabled, then advertising is no longer supported.
816 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
817 settings |= MGMT_SETTING_ADVERTISING;
820 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
822 settings |= MGMT_SETTING_CONFIGURATION;
824 settings |= MGMT_SETTING_PHY_CONFIGURATION;
829 static u32 get_current_settings(struct hci_dev *hdev)
833 if (hdev_is_powered(hdev))
834 settings |= MGMT_SETTING_POWERED;
836 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
837 settings |= MGMT_SETTING_CONNECTABLE;
839 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
840 settings |= MGMT_SETTING_FAST_CONNECTABLE;
842 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
843 settings |= MGMT_SETTING_DISCOVERABLE;
845 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
846 settings |= MGMT_SETTING_BONDABLE;
848 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
849 settings |= MGMT_SETTING_BREDR;
851 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
852 settings |= MGMT_SETTING_LE;
854 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
855 settings |= MGMT_SETTING_LINK_SECURITY;
857 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
858 settings |= MGMT_SETTING_SSP;
860 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
861 settings |= MGMT_SETTING_HS;
863 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
864 settings |= MGMT_SETTING_ADVERTISING;
866 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
867 settings |= MGMT_SETTING_SECURE_CONN;
869 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
870 settings |= MGMT_SETTING_DEBUG_KEYS;
872 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
873 settings |= MGMT_SETTING_PRIVACY;
875 /* The current setting for static address has two purposes. The
876 * first is to indicate if the static address will be used and
877 * the second is to indicate if it is actually set.
879 * This means if the static address is not configured, this flag
880 * will never be set. If the address is configured, then if the
881 * address is actually used decides if the flag is set or not.
883 * For single mode LE only controllers and dual-mode controllers
884 * with BR/EDR disabled, the existence of the static address will
887 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
888 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
889 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
890 if (bacmp(&hdev->static_addr, BDADDR_ANY))
891 settings |= MGMT_SETTING_STATIC_ADDRESS;
894 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
895 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
900 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
902 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
905 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
906 struct hci_dev *hdev,
909 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
912 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
914 struct mgmt_pending_cmd *cmd;
916 /* If there's a pending mgmt command the flags will not yet have
917 * their final values, so check for this first.
919 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
921 struct mgmt_mode *cp = cmd->param;
923 return LE_AD_GENERAL;
924 else if (cp->val == 0x02)
925 return LE_AD_LIMITED;
927 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
928 return LE_AD_LIMITED;
929 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
930 return LE_AD_GENERAL;
936 bool mgmt_get_connectable(struct hci_dev *hdev)
938 struct mgmt_pending_cmd *cmd;
940 /* If there's a pending mgmt command the flag will not yet have
941 * it's final value, so check for this first.
943 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
945 struct mgmt_mode *cp = cmd->param;
950 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
953 static void service_cache_off(struct work_struct *work)
955 struct hci_dev *hdev = container_of(work, struct hci_dev,
957 struct hci_request req;
959 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
962 hci_req_init(&req, hdev);
966 __hci_req_update_eir(&req);
967 __hci_req_update_class(&req);
969 hci_dev_unlock(hdev);
971 hci_req_run(&req, NULL);
974 static void rpa_expired(struct work_struct *work)
976 struct hci_dev *hdev = container_of(work, struct hci_dev,
978 struct hci_request req;
980 bt_dev_dbg(hdev, "");
982 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
984 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
987 /* The generation of a new RPA and programming it into the
988 * controller happens in the hci_req_enable_advertising()
991 hci_req_init(&req, hdev);
992 if (ext_adv_capable(hdev))
993 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
995 __hci_req_enable_advertising(&req);
996 hci_req_run(&req, NULL);
999 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1001 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1004 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1005 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1007 /* Non-mgmt controlled devices get this bit set
1008 * implicitly so that pairing works for them, however
1009 * for mgmt we require user-space to explicitly enable
1012 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1015 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1016 void *data, u16 data_len)
1018 struct mgmt_rp_read_info rp;
1020 bt_dev_dbg(hdev, "sock %p", sk);
1024 memset(&rp, 0, sizeof(rp));
1026 bacpy(&rp.bdaddr, &hdev->bdaddr);
1028 rp.version = hdev->hci_ver;
1029 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1031 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1032 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1034 memcpy(rp.dev_class, hdev->dev_class, 3);
1036 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1037 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1039 hci_dev_unlock(hdev);
1041 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1045 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1050 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1051 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1052 hdev->dev_class, 3);
1054 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1055 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1058 name_len = strlen(hdev->dev_name);
1059 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1060 hdev->dev_name, name_len);
1062 name_len = strlen(hdev->short_name);
1063 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1064 hdev->short_name, name_len);
1069 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1070 void *data, u16 data_len)
1073 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1076 bt_dev_dbg(hdev, "sock %p", sk);
1078 memset(&buf, 0, sizeof(buf));
1082 bacpy(&rp->bdaddr, &hdev->bdaddr);
1084 rp->version = hdev->hci_ver;
1085 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1087 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1088 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1091 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1092 rp->eir_len = cpu_to_le16(eir_len);
1094 hci_dev_unlock(hdev);
1096 /* If this command is called at least once, then the events
1097 * for class of device and local name changes are disabled
1098 * and only the new extended controller information event
1101 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1102 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1103 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1105 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1106 sizeof(*rp) + eir_len);
1109 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1112 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1115 memset(buf, 0, sizeof(buf));
1117 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1118 ev->eir_len = cpu_to_le16(eir_len);
1120 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1121 sizeof(*ev) + eir_len,
1122 HCI_MGMT_EXT_INFO_EVENTS, skip);
1125 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1127 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1129 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1133 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1135 bt_dev_dbg(hdev, "status 0x%02x", status);
1137 if (hci_conn_count(hdev) == 0) {
1138 cancel_delayed_work(&hdev->power_off);
1139 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1143 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1145 struct mgmt_ev_advertising_added ev;
1147 ev.instance = instance;
1149 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1152 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1155 struct mgmt_ev_advertising_removed ev;
1157 ev.instance = instance;
1159 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1162 static void cancel_adv_timeout(struct hci_dev *hdev)
1164 if (hdev->adv_instance_timeout) {
1165 hdev->adv_instance_timeout = 0;
1166 cancel_delayed_work(&hdev->adv_instance_expire);
1170 static int clean_up_hci_state(struct hci_dev *hdev)
1172 struct hci_request req;
1173 struct hci_conn *conn;
1174 bool discov_stopped;
1177 hci_req_init(&req, hdev);
1179 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1180 test_bit(HCI_PSCAN, &hdev->flags)) {
1182 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1185 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1187 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1188 __hci_req_disable_advertising(&req);
1190 discov_stopped = hci_req_stop_discovery(&req);
1192 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1193 /* 0x15 == Terminated due to Power Off */
1194 __hci_abort_conn(&req, conn, 0x15);
1197 err = hci_req_run(&req, clean_up_hci_complete);
1198 if (!err && discov_stopped)
1199 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1204 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1207 struct mgmt_mode *cp = data;
1208 struct mgmt_pending_cmd *cmd;
1211 bt_dev_dbg(hdev, "sock %p", sk);
1213 if (cp->val != 0x00 && cp->val != 0x01)
1214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1215 MGMT_STATUS_INVALID_PARAMS);
1219 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1220 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1225 if (!!cp->val == hdev_is_powered(hdev)) {
1226 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1237 queue_work(hdev->req_workqueue, &hdev->power_on);
1240 /* Disconnect connections, stop scans, etc */
1241 err = clean_up_hci_state(hdev);
1243 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1244 HCI_POWER_OFF_TIMEOUT);
1246 /* ENODATA means there were no HCI commands queued */
1247 if (err == -ENODATA) {
1248 cancel_delayed_work(&hdev->power_off);
1249 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1255 hci_dev_unlock(hdev);
1259 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1261 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1263 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1264 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1267 int mgmt_new_settings(struct hci_dev *hdev)
1269 return new_settings(hdev, NULL);
1274 struct hci_dev *hdev;
1278 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1280 struct cmd_lookup *match = data;
1282 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1284 list_del(&cmd->list);
1286 if (match->sk == NULL) {
1287 match->sk = cmd->sk;
1288 sock_hold(match->sk);
1291 mgmt_pending_free(cmd);
1294 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1298 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1299 mgmt_pending_remove(cmd);
1302 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1304 if (cmd->cmd_complete) {
1307 cmd->cmd_complete(cmd, *status);
1308 mgmt_pending_remove(cmd);
1313 cmd_status_rsp(cmd, data);
1316 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1318 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1319 cmd->param, cmd->param_len);
1322 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1324 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1325 cmd->param, sizeof(struct mgmt_addr_info));
1328 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1330 if (!lmp_bredr_capable(hdev))
1331 return MGMT_STATUS_NOT_SUPPORTED;
1332 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1333 return MGMT_STATUS_REJECTED;
1335 return MGMT_STATUS_SUCCESS;
1338 static u8 mgmt_le_support(struct hci_dev *hdev)
1340 if (!lmp_le_capable(hdev))
1341 return MGMT_STATUS_NOT_SUPPORTED;
1342 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1343 return MGMT_STATUS_REJECTED;
1345 return MGMT_STATUS_SUCCESS;
1348 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1350 struct mgmt_pending_cmd *cmd;
1352 bt_dev_dbg(hdev, "status 0x%02x", status);
1356 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1361 u8 mgmt_err = mgmt_status(status);
1362 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1363 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1367 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1368 hdev->discov_timeout > 0) {
1369 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1370 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1373 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1374 new_settings(hdev, cmd->sk);
1377 mgmt_pending_remove(cmd);
1380 hci_dev_unlock(hdev);
1383 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1386 struct mgmt_cp_set_discoverable *cp = data;
1387 struct mgmt_pending_cmd *cmd;
1391 bt_dev_dbg(hdev, "sock %p", sk);
1393 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1394 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1395 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1396 MGMT_STATUS_REJECTED);
1398 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_INVALID_PARAMS);
1402 timeout = __le16_to_cpu(cp->timeout);
1404 /* Disabling discoverable requires that no timeout is set,
1405 * and enabling limited discoverable requires a timeout.
1407 if ((cp->val == 0x00 && timeout > 0) ||
1408 (cp->val == 0x02 && timeout == 0))
1409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1410 MGMT_STATUS_INVALID_PARAMS);
1414 if (!hdev_is_powered(hdev) && timeout > 0) {
1415 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1416 MGMT_STATUS_NOT_POWERED);
1420 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1421 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1427 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1429 MGMT_STATUS_REJECTED);
1433 if (hdev->advertising_paused) {
1434 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1439 if (!hdev_is_powered(hdev)) {
1440 bool changed = false;
1442 /* Setting limited discoverable when powered off is
1443 * not a valid operation since it requires a timeout
1444 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1446 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1447 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1451 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1456 err = new_settings(hdev, sk);
1461 /* If the current mode is the same, then just update the timeout
1462 * value with the new value. And if only the timeout gets updated,
1463 * then no need for any HCI transactions.
1465 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1466 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1467 HCI_LIMITED_DISCOVERABLE)) {
1468 cancel_delayed_work(&hdev->discov_off);
1469 hdev->discov_timeout = timeout;
1471 if (cp->val && hdev->discov_timeout > 0) {
1472 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1473 queue_delayed_work(hdev->req_workqueue,
1474 &hdev->discov_off, to);
1477 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1481 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1487 /* Cancel any potential discoverable timeout that might be
1488 * still active and store new timeout value. The arming of
1489 * the timeout happens in the complete handler.
1491 cancel_delayed_work(&hdev->discov_off);
1492 hdev->discov_timeout = timeout;
1495 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1497 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1499 /* Limited discoverable mode */
1500 if (cp->val == 0x02)
1501 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1503 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1505 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1509 hci_dev_unlock(hdev);
1513 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1515 struct mgmt_pending_cmd *cmd;
1517 bt_dev_dbg(hdev, "status 0x%02x", status);
1521 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1526 u8 mgmt_err = mgmt_status(status);
1527 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1531 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1532 new_settings(hdev, cmd->sk);
1535 mgmt_pending_remove(cmd);
1538 hci_dev_unlock(hdev);
1541 static int set_connectable_update_settings(struct hci_dev *hdev,
1542 struct sock *sk, u8 val)
1544 bool changed = false;
1547 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1551 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1553 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1554 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1557 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1562 hci_req_update_scan(hdev);
1563 hci_update_background_scan(hdev);
1564 return new_settings(hdev, sk);
1570 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1573 struct mgmt_mode *cp = data;
1574 struct mgmt_pending_cmd *cmd;
1577 bt_dev_dbg(hdev, "sock %p", sk);
1579 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1580 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1582 MGMT_STATUS_REJECTED);
1584 if (cp->val != 0x00 && cp->val != 0x01)
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1586 MGMT_STATUS_INVALID_PARAMS);
1590 if (!hdev_is_powered(hdev)) {
1591 err = set_connectable_update_settings(hdev, sk, cp->val);
1595 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1596 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1602 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1609 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1611 if (hdev->discov_timeout > 0)
1612 cancel_delayed_work(&hdev->discov_off);
1614 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1615 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1616 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1619 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1623 hci_dev_unlock(hdev);
1627 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1630 struct mgmt_mode *cp = data;
1634 bt_dev_dbg(hdev, "sock %p", sk);
1636 if (cp->val != 0x00 && cp->val != 0x01)
1637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1638 MGMT_STATUS_INVALID_PARAMS);
1643 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1645 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1647 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1652 /* In limited privacy mode the change of bondable mode
1653 * may affect the local advertising address.
1655 if (hdev_is_powered(hdev) &&
1656 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1657 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1658 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1659 queue_work(hdev->req_workqueue,
1660 &hdev->discoverable_update);
1662 err = new_settings(hdev, sk);
1666 hci_dev_unlock(hdev);
1670 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1673 struct mgmt_mode *cp = data;
1674 struct mgmt_pending_cmd *cmd;
1678 bt_dev_dbg(hdev, "sock %p", sk);
1680 status = mgmt_bredr_support(hdev);
1682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1685 if (cp->val != 0x00 && cp->val != 0x01)
1686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1687 MGMT_STATUS_INVALID_PARAMS);
1691 if (!hdev_is_powered(hdev)) {
1692 bool changed = false;
1694 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1695 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1699 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1704 err = new_settings(hdev, sk);
1709 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1710 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1717 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1718 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1722 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1728 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1730 mgmt_pending_remove(cmd);
1735 hci_dev_unlock(hdev);
1739 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1741 struct mgmt_mode *cp = data;
1742 struct mgmt_pending_cmd *cmd;
1746 bt_dev_dbg(hdev, "sock %p", sk);
1748 status = mgmt_bredr_support(hdev);
1750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1752 if (!lmp_ssp_capable(hdev))
1753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1754 MGMT_STATUS_NOT_SUPPORTED);
1756 if (cp->val != 0x00 && cp->val != 0x01)
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 MGMT_STATUS_INVALID_PARAMS);
1762 if (!hdev_is_powered(hdev)) {
1766 changed = !hci_dev_test_and_set_flag(hdev,
1769 changed = hci_dev_test_and_clear_flag(hdev,
1772 changed = hci_dev_test_and_clear_flag(hdev,
1775 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1778 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1783 err = new_settings(hdev, sk);
1788 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1789 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1794 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1795 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1799 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1805 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1806 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1807 sizeof(cp->val), &cp->val);
1809 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1811 mgmt_pending_remove(cmd);
1816 hci_dev_unlock(hdev);
1820 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1822 struct mgmt_mode *cp = data;
1827 bt_dev_dbg(hdev, "sock %p", sk);
1829 if (!IS_ENABLED(CONFIG_BT_HS))
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1831 MGMT_STATUS_NOT_SUPPORTED);
1833 status = mgmt_bredr_support(hdev);
1835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1837 if (!lmp_ssp_capable(hdev))
1838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1839 MGMT_STATUS_NOT_SUPPORTED);
1841 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1843 MGMT_STATUS_REJECTED);
1845 if (cp->val != 0x00 && cp->val != 0x01)
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_INVALID_PARAMS);
1851 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1852 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1858 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1860 if (hdev_is_powered(hdev)) {
1861 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1862 MGMT_STATUS_REJECTED);
1866 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1869 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1874 err = new_settings(hdev, sk);
1877 hci_dev_unlock(hdev);
1881 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1883 struct cmd_lookup match = { NULL, hdev };
1888 u8 mgmt_err = mgmt_status(status);
1890 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1895 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1897 new_settings(hdev, match.sk);
1902 /* Make sure the controller has a good default for
1903 * advertising data. Restrict the update to when LE
1904 * has actually been enabled. During power on, the
1905 * update in powered_update_hci will take care of it.
1907 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1908 struct hci_request req;
1909 hci_req_init(&req, hdev);
1910 if (ext_adv_capable(hdev)) {
1913 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1915 __hci_req_update_scan_rsp_data(&req, 0x00);
1917 __hci_req_update_adv_data(&req, 0x00);
1918 __hci_req_update_scan_rsp_data(&req, 0x00);
1920 hci_req_run(&req, NULL);
1921 hci_update_background_scan(hdev);
1925 hci_dev_unlock(hdev);
1928 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1930 struct mgmt_mode *cp = data;
1931 struct hci_cp_write_le_host_supported hci_cp;
1932 struct mgmt_pending_cmd *cmd;
1933 struct hci_request req;
1937 bt_dev_dbg(hdev, "sock %p", sk);
1939 if (!lmp_le_capable(hdev))
1940 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1941 MGMT_STATUS_NOT_SUPPORTED);
1943 if (cp->val != 0x00 && cp->val != 0x01)
1944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1945 MGMT_STATUS_INVALID_PARAMS);
1947 /* Bluetooth single mode LE only controllers or dual-mode
1948 * controllers configured as LE only devices, do not allow
1949 * switching LE off. These have either LE enabled explicitly
1950 * or BR/EDR has been previously switched off.
1952 * When trying to enable an already enabled LE, then gracefully
1953 * send a positive response. Trying to disable it however will
1954 * result into rejection.
1956 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1957 if (cp->val == 0x01)
1958 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1961 MGMT_STATUS_REJECTED);
1967 enabled = lmp_host_le_capable(hdev);
1970 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1972 if (!hdev_is_powered(hdev) || val == enabled) {
1973 bool changed = false;
1975 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1976 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1980 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1981 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1985 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1990 err = new_settings(hdev, sk);
1995 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1996 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1997 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2002 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2008 hci_req_init(&req, hdev);
2010 memset(&hci_cp, 0, sizeof(hci_cp));
2014 hci_cp.simul = 0x00;
2016 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2017 __hci_req_disable_advertising(&req);
2019 if (ext_adv_capable(hdev))
2020 __hci_req_clear_ext_adv_sets(&req);
2023 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2026 err = hci_req_run(&req, le_enable_complete);
2028 mgmt_pending_remove(cmd);
2031 hci_dev_unlock(hdev);
2035 /* This is a helper function to test for pending mgmt commands that can
2036 * cause CoD or EIR HCI commands. We can only allow one such pending
2037 * mgmt command at a time since otherwise we cannot easily track what
2038 * the current values are, will be, and based on that calculate if a new
2039 * HCI command needs to be sent and if yes with what value.
2041 static bool pending_eir_or_class(struct hci_dev *hdev)
2043 struct mgmt_pending_cmd *cmd;
2045 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2046 switch (cmd->opcode) {
2047 case MGMT_OP_ADD_UUID:
2048 case MGMT_OP_REMOVE_UUID:
2049 case MGMT_OP_SET_DEV_CLASS:
2050 case MGMT_OP_SET_POWERED:
2058 static const u8 bluetooth_base_uuid[] = {
2059 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2060 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2063 static u8 get_uuid_size(const u8 *uuid)
2067 if (memcmp(uuid, bluetooth_base_uuid, 12))
2070 val = get_unaligned_le32(&uuid[12]);
2077 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2079 struct mgmt_pending_cmd *cmd;
2083 cmd = pending_find(mgmt_op, hdev);
2087 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2088 mgmt_status(status), hdev->dev_class, 3);
2090 mgmt_pending_remove(cmd);
2093 hci_dev_unlock(hdev);
2096 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2098 bt_dev_dbg(hdev, "status 0x%02x", status);
2100 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2103 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2105 struct mgmt_cp_add_uuid *cp = data;
2106 struct mgmt_pending_cmd *cmd;
2107 struct hci_request req;
2108 struct bt_uuid *uuid;
2111 bt_dev_dbg(hdev, "sock %p", sk);
2115 if (pending_eir_or_class(hdev)) {
2116 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2121 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2127 memcpy(uuid->uuid, cp->uuid, 16);
2128 uuid->svc_hint = cp->svc_hint;
2129 uuid->size = get_uuid_size(cp->uuid);
2131 list_add_tail(&uuid->list, &hdev->uuids);
2133 hci_req_init(&req, hdev);
2135 __hci_req_update_class(&req);
2136 __hci_req_update_eir(&req);
2138 err = hci_req_run(&req, add_uuid_complete);
2140 if (err != -ENODATA)
2143 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2144 hdev->dev_class, 3);
2148 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2157 hci_dev_unlock(hdev);
2161 static bool enable_service_cache(struct hci_dev *hdev)
2163 if (!hdev_is_powered(hdev))
2166 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2167 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2175 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2177 bt_dev_dbg(hdev, "status 0x%02x", status);
2179 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2182 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2185 struct mgmt_cp_remove_uuid *cp = data;
2186 struct mgmt_pending_cmd *cmd;
2187 struct bt_uuid *match, *tmp;
2188 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2189 struct hci_request req;
2192 bt_dev_dbg(hdev, "sock %p", sk);
2196 if (pending_eir_or_class(hdev)) {
2197 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2202 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2203 hci_uuids_clear(hdev);
2205 if (enable_service_cache(hdev)) {
2206 err = mgmt_cmd_complete(sk, hdev->id,
2207 MGMT_OP_REMOVE_UUID,
2208 0, hdev->dev_class, 3);
2217 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2218 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2221 list_del(&match->list);
2227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2228 MGMT_STATUS_INVALID_PARAMS);
2233 hci_req_init(&req, hdev);
2235 __hci_req_update_class(&req);
2236 __hci_req_update_eir(&req);
2238 err = hci_req_run(&req, remove_uuid_complete);
2240 if (err != -ENODATA)
2243 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2244 hdev->dev_class, 3);
2248 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2257 hci_dev_unlock(hdev);
2261 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2263 bt_dev_dbg(hdev, "status 0x%02x", status);
2265 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2268 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2271 struct mgmt_cp_set_dev_class *cp = data;
2272 struct mgmt_pending_cmd *cmd;
2273 struct hci_request req;
2276 bt_dev_dbg(hdev, "sock %p", sk);
2278 if (!lmp_bredr_capable(hdev))
2279 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2280 MGMT_STATUS_NOT_SUPPORTED);
2284 if (pending_eir_or_class(hdev)) {
2285 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2290 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2291 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2292 MGMT_STATUS_INVALID_PARAMS);
2296 hdev->major_class = cp->major;
2297 hdev->minor_class = cp->minor;
2299 if (!hdev_is_powered(hdev)) {
2300 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2301 hdev->dev_class, 3);
2305 hci_req_init(&req, hdev);
2307 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2308 hci_dev_unlock(hdev);
2309 cancel_delayed_work_sync(&hdev->service_cache);
2311 __hci_req_update_eir(&req);
2314 __hci_req_update_class(&req);
2316 err = hci_req_run(&req, set_class_complete);
2318 if (err != -ENODATA)
2321 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2322 hdev->dev_class, 3);
2326 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2335 hci_dev_unlock(hdev);
2339 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2342 struct mgmt_cp_load_link_keys *cp = data;
2343 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2344 sizeof(struct mgmt_link_key_info));
2345 u16 key_count, expected_len;
2349 bt_dev_dbg(hdev, "sock %p", sk);
2351 if (!lmp_bredr_capable(hdev))
2352 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2353 MGMT_STATUS_NOT_SUPPORTED);
2355 key_count = __le16_to_cpu(cp->key_count);
2356 if (key_count > max_key_count) {
2357 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2359 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2360 MGMT_STATUS_INVALID_PARAMS);
2363 expected_len = struct_size(cp, keys, key_count);
2364 if (expected_len != len) {
2365 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2368 MGMT_STATUS_INVALID_PARAMS);
2371 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2373 MGMT_STATUS_INVALID_PARAMS);
2375 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2378 for (i = 0; i < key_count; i++) {
2379 struct mgmt_link_key_info *key = &cp->keys[i];
2381 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2382 return mgmt_cmd_status(sk, hdev->id,
2383 MGMT_OP_LOAD_LINK_KEYS,
2384 MGMT_STATUS_INVALID_PARAMS);
2389 hci_link_keys_clear(hdev);
2392 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2394 changed = hci_dev_test_and_clear_flag(hdev,
2395 HCI_KEEP_DEBUG_KEYS);
2398 new_settings(hdev, NULL);
2400 for (i = 0; i < key_count; i++) {
2401 struct mgmt_link_key_info *key = &cp->keys[i];
2403 if (hci_is_blocked_key(hdev,
2404 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2406 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2411 /* Always ignore debug keys and require a new pairing if
2412 * the user wants to use them.
2414 if (key->type == HCI_LK_DEBUG_COMBINATION)
2417 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2418 key->type, key->pin_len, NULL);
2421 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2423 hci_dev_unlock(hdev);
2428 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2429 u8 addr_type, struct sock *skip_sk)
2431 struct mgmt_ev_device_unpaired ev;
2433 bacpy(&ev.addr.bdaddr, bdaddr);
2434 ev.addr.type = addr_type;
2436 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2440 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2443 struct mgmt_cp_unpair_device *cp = data;
2444 struct mgmt_rp_unpair_device rp;
2445 struct hci_conn_params *params;
2446 struct mgmt_pending_cmd *cmd;
2447 struct hci_conn *conn;
2451 memset(&rp, 0, sizeof(rp));
2452 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2453 rp.addr.type = cp->addr.type;
2455 if (!bdaddr_type_is_valid(cp->addr.type))
2456 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2457 MGMT_STATUS_INVALID_PARAMS,
2460 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2461 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2462 MGMT_STATUS_INVALID_PARAMS,
2467 if (!hdev_is_powered(hdev)) {
2468 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2469 MGMT_STATUS_NOT_POWERED, &rp,
2474 if (cp->addr.type == BDADDR_BREDR) {
2475 /* If disconnection is requested, then look up the
2476 * connection. If the remote device is connected, it
2477 * will be later used to terminate the link.
2479 * Setting it to NULL explicitly will cause no
2480 * termination of the link.
2483 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2488 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2490 err = mgmt_cmd_complete(sk, hdev->id,
2491 MGMT_OP_UNPAIR_DEVICE,
2492 MGMT_STATUS_NOT_PAIRED, &rp,
2500 /* LE address type */
2501 addr_type = le_addr_type(cp->addr.type);
2503 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2504 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2506 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2507 MGMT_STATUS_NOT_PAIRED, &rp,
2512 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2514 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2519 /* Defer clearing up the connection parameters until closing to
2520 * give a chance of keeping them if a repairing happens.
2522 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2524 /* Disable auto-connection parameters if present */
2525 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2527 if (params->explicit_connect)
2528 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2530 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2533 /* If disconnection is not requested, then clear the connection
2534 * variable so that the link is not terminated.
2536 if (!cp->disconnect)
2540 /* If the connection variable is set, then termination of the
2541 * link is requested.
2544 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2546 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2550 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2557 cmd->cmd_complete = addr_cmd_complete;
2559 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2561 mgmt_pending_remove(cmd);
2564 hci_dev_unlock(hdev);
2568 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2571 struct mgmt_cp_disconnect *cp = data;
2572 struct mgmt_rp_disconnect rp;
2573 struct mgmt_pending_cmd *cmd;
2574 struct hci_conn *conn;
2577 bt_dev_dbg(hdev, "sock %p", sk);
2579 memset(&rp, 0, sizeof(rp));
2580 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2581 rp.addr.type = cp->addr.type;
2583 if (!bdaddr_type_is_valid(cp->addr.type))
2584 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2585 MGMT_STATUS_INVALID_PARAMS,
2590 if (!test_bit(HCI_UP, &hdev->flags)) {
2591 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2592 MGMT_STATUS_NOT_POWERED, &rp,
2597 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2598 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2599 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2603 if (cp->addr.type == BDADDR_BREDR)
2604 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2607 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2608 le_addr_type(cp->addr.type));
2610 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2611 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2612 MGMT_STATUS_NOT_CONNECTED, &rp,
2617 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2623 cmd->cmd_complete = generic_cmd_complete;
2625 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2627 mgmt_pending_remove(cmd);
2630 hci_dev_unlock(hdev);
2634 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2636 switch (link_type) {
2638 switch (addr_type) {
2639 case ADDR_LE_DEV_PUBLIC:
2640 return BDADDR_LE_PUBLIC;
2643 /* Fallback to LE Random address type */
2644 return BDADDR_LE_RANDOM;
2648 /* Fallback to BR/EDR type */
2649 return BDADDR_BREDR;
2653 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2656 struct mgmt_rp_get_connections *rp;
2661 bt_dev_dbg(hdev, "sock %p", sk);
2665 if (!hdev_is_powered(hdev)) {
2666 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2667 MGMT_STATUS_NOT_POWERED);
2672 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2673 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2677 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2684 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2685 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2687 bacpy(&rp->addr[i].bdaddr, &c->dst);
2688 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2689 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2694 rp->conn_count = cpu_to_le16(i);
2696 /* Recalculate length in case of filtered SCO connections, etc */
2697 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2698 struct_size(rp, addr, i));
2703 hci_dev_unlock(hdev);
2707 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2708 struct mgmt_cp_pin_code_neg_reply *cp)
2710 struct mgmt_pending_cmd *cmd;
2713 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2718 cmd->cmd_complete = addr_cmd_complete;
2720 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2721 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2723 mgmt_pending_remove(cmd);
2728 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2731 struct hci_conn *conn;
2732 struct mgmt_cp_pin_code_reply *cp = data;
2733 struct hci_cp_pin_code_reply reply;
2734 struct mgmt_pending_cmd *cmd;
2737 bt_dev_dbg(hdev, "sock %p", sk);
2741 if (!hdev_is_powered(hdev)) {
2742 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2743 MGMT_STATUS_NOT_POWERED);
2747 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2749 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2750 MGMT_STATUS_NOT_CONNECTED);
2754 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2755 struct mgmt_cp_pin_code_neg_reply ncp;
2757 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2759 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2761 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2764 MGMT_STATUS_INVALID_PARAMS);
2769 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2775 cmd->cmd_complete = addr_cmd_complete;
2777 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2778 reply.pin_len = cp->pin_len;
2779 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2781 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2783 mgmt_pending_remove(cmd);
2786 hci_dev_unlock(hdev);
2790 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2793 struct mgmt_cp_set_io_capability *cp = data;
2795 bt_dev_dbg(hdev, "sock %p", sk);
2797 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2798 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2799 MGMT_STATUS_INVALID_PARAMS);
2803 hdev->io_capability = cp->io_capability;
2805 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2807 hci_dev_unlock(hdev);
2809 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2813 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2815 struct hci_dev *hdev = conn->hdev;
2816 struct mgmt_pending_cmd *cmd;
2818 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2819 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2822 if (cmd->user_data != conn)
2831 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2833 struct mgmt_rp_pair_device rp;
2834 struct hci_conn *conn = cmd->user_data;
2837 bacpy(&rp.addr.bdaddr, &conn->dst);
2838 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2840 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2841 status, &rp, sizeof(rp));
2843 /* So we don't get further callbacks for this connection */
2844 conn->connect_cfm_cb = NULL;
2845 conn->security_cfm_cb = NULL;
2846 conn->disconn_cfm_cb = NULL;
2848 hci_conn_drop(conn);
2850 /* The device is paired so there is no need to remove
2851 * its connection parameters anymore.
2853 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2860 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2862 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2863 struct mgmt_pending_cmd *cmd;
2865 cmd = find_pairing(conn);
2867 cmd->cmd_complete(cmd, status);
2868 mgmt_pending_remove(cmd);
2872 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2874 struct mgmt_pending_cmd *cmd;
2876 BT_DBG("status %u", status);
2878 cmd = find_pairing(conn);
2880 BT_DBG("Unable to find a pending command");
2884 cmd->cmd_complete(cmd, mgmt_status(status));
2885 mgmt_pending_remove(cmd);
2888 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2890 struct mgmt_pending_cmd *cmd;
2892 BT_DBG("status %u", status);
2897 cmd = find_pairing(conn);
2899 BT_DBG("Unable to find a pending command");
2903 cmd->cmd_complete(cmd, mgmt_status(status));
2904 mgmt_pending_remove(cmd);
2907 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2910 struct mgmt_cp_pair_device *cp = data;
2911 struct mgmt_rp_pair_device rp;
2912 struct mgmt_pending_cmd *cmd;
2913 u8 sec_level, auth_type;
2914 struct hci_conn *conn;
2917 bt_dev_dbg(hdev, "sock %p", sk);
2919 memset(&rp, 0, sizeof(rp));
2920 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2921 rp.addr.type = cp->addr.type;
2923 if (!bdaddr_type_is_valid(cp->addr.type))
2924 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2925 MGMT_STATUS_INVALID_PARAMS,
2928 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2929 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2930 MGMT_STATUS_INVALID_PARAMS,
2935 if (!hdev_is_powered(hdev)) {
2936 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2937 MGMT_STATUS_NOT_POWERED, &rp,
2942 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2943 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2944 MGMT_STATUS_ALREADY_PAIRED, &rp,
2949 sec_level = BT_SECURITY_MEDIUM;
2950 auth_type = HCI_AT_DEDICATED_BONDING;
2952 if (cp->addr.type == BDADDR_BREDR) {
2953 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2954 auth_type, CONN_REASON_PAIR_DEVICE);
2956 u8 addr_type = le_addr_type(cp->addr.type);
2957 struct hci_conn_params *p;
2959 /* When pairing a new device, it is expected to remember
2960 * this device for future connections. Adding the connection
2961 * parameter information ahead of time allows tracking
2962 * of the peripheral preferred values and will speed up any
2963 * further connection establishment.
2965 * If connection parameters already exist, then they
2966 * will be kept and this function does nothing.
2968 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2970 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2971 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2973 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2974 sec_level, HCI_LE_CONN_TIMEOUT,
2975 CONN_REASON_PAIR_DEVICE);
2981 if (PTR_ERR(conn) == -EBUSY)
2982 status = MGMT_STATUS_BUSY;
2983 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2984 status = MGMT_STATUS_NOT_SUPPORTED;
2985 else if (PTR_ERR(conn) == -ECONNREFUSED)
2986 status = MGMT_STATUS_REJECTED;
2988 status = MGMT_STATUS_CONNECT_FAILED;
2990 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2991 status, &rp, sizeof(rp));
2995 if (conn->connect_cfm_cb) {
2996 hci_conn_drop(conn);
2997 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2998 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3002 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3005 hci_conn_drop(conn);
3009 cmd->cmd_complete = pairing_complete;
3011 /* For LE, just connecting isn't a proof that the pairing finished */
3012 if (cp->addr.type == BDADDR_BREDR) {
3013 conn->connect_cfm_cb = pairing_complete_cb;
3014 conn->security_cfm_cb = pairing_complete_cb;
3015 conn->disconn_cfm_cb = pairing_complete_cb;
3017 conn->connect_cfm_cb = le_pairing_complete_cb;
3018 conn->security_cfm_cb = le_pairing_complete_cb;
3019 conn->disconn_cfm_cb = le_pairing_complete_cb;
3022 conn->io_capability = cp->io_cap;
3023 cmd->user_data = hci_conn_get(conn);
3025 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3026 hci_conn_security(conn, sec_level, auth_type, true)) {
3027 cmd->cmd_complete(cmd, 0);
3028 mgmt_pending_remove(cmd);
3034 hci_dev_unlock(hdev);
3038 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3041 struct mgmt_addr_info *addr = data;
3042 struct mgmt_pending_cmd *cmd;
3043 struct hci_conn *conn;
3046 bt_dev_dbg(hdev, "sock %p", sk);
3050 if (!hdev_is_powered(hdev)) {
3051 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3052 MGMT_STATUS_NOT_POWERED);
3056 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3058 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3059 MGMT_STATUS_INVALID_PARAMS);
3063 conn = cmd->user_data;
3065 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3066 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3067 MGMT_STATUS_INVALID_PARAMS);
3071 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3072 mgmt_pending_remove(cmd);
3074 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3075 addr, sizeof(*addr));
3077 /* Since user doesn't want to proceed with the connection, abort any
3078 * ongoing pairing and then terminate the link if it was created
3079 * because of the pair device action.
3081 if (addr->type == BDADDR_BREDR)
3082 hci_remove_link_key(hdev, &addr->bdaddr);
3084 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3085 le_addr_type(addr->type));
3087 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3088 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3091 hci_dev_unlock(hdev);
3095 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3096 struct mgmt_addr_info *addr, u16 mgmt_op,
3097 u16 hci_op, __le32 passkey)
3099 struct mgmt_pending_cmd *cmd;
3100 struct hci_conn *conn;
3105 if (!hdev_is_powered(hdev)) {
3106 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3107 MGMT_STATUS_NOT_POWERED, addr,
3112 if (addr->type == BDADDR_BREDR)
3113 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3115 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3116 le_addr_type(addr->type));
3119 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3120 MGMT_STATUS_NOT_CONNECTED, addr,
3125 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3126 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3128 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3129 MGMT_STATUS_SUCCESS, addr,
3132 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3133 MGMT_STATUS_FAILED, addr,
3139 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3145 cmd->cmd_complete = addr_cmd_complete;
3147 /* Continue with pairing via HCI */
3148 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3149 struct hci_cp_user_passkey_reply cp;
3151 bacpy(&cp.bdaddr, &addr->bdaddr);
3152 cp.passkey = passkey;
3153 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3155 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3159 mgmt_pending_remove(cmd);
3162 hci_dev_unlock(hdev);
3166 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3167 void *data, u16 len)
3169 struct mgmt_cp_pin_code_neg_reply *cp = data;
3171 bt_dev_dbg(hdev, "sock %p", sk);
3173 return user_pairing_resp(sk, hdev, &cp->addr,
3174 MGMT_OP_PIN_CODE_NEG_REPLY,
3175 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3178 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3181 struct mgmt_cp_user_confirm_reply *cp = data;
3183 bt_dev_dbg(hdev, "sock %p", sk);
3185 if (len != sizeof(*cp))
3186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3187 MGMT_STATUS_INVALID_PARAMS);
3189 return user_pairing_resp(sk, hdev, &cp->addr,
3190 MGMT_OP_USER_CONFIRM_REPLY,
3191 HCI_OP_USER_CONFIRM_REPLY, 0);
3194 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3195 void *data, u16 len)
3197 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3199 bt_dev_dbg(hdev, "sock %p", sk);
3201 return user_pairing_resp(sk, hdev, &cp->addr,
3202 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3203 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3206 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3209 struct mgmt_cp_user_passkey_reply *cp = data;
3211 bt_dev_dbg(hdev, "sock %p", sk);
3213 return user_pairing_resp(sk, hdev, &cp->addr,
3214 MGMT_OP_USER_PASSKEY_REPLY,
3215 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3218 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3219 void *data, u16 len)
3221 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3223 bt_dev_dbg(hdev, "sock %p", sk);
3225 return user_pairing_resp(sk, hdev, &cp->addr,
3226 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3227 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3230 static void adv_expire(struct hci_dev *hdev, u32 flags)
3232 struct adv_info *adv_instance;
3233 struct hci_request req;
3236 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3240 /* stop if current instance doesn't need to be changed */
3241 if (!(adv_instance->flags & flags))
3244 cancel_adv_timeout(hdev);
3246 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3250 hci_req_init(&req, hdev);
3251 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3256 hci_req_run(&req, NULL);
3259 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3261 struct mgmt_cp_set_local_name *cp;
3262 struct mgmt_pending_cmd *cmd;
3264 bt_dev_dbg(hdev, "status 0x%02x", status);
3268 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3275 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3276 mgmt_status(status));
3278 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3281 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3282 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3285 mgmt_pending_remove(cmd);
3288 hci_dev_unlock(hdev);
3291 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3294 struct mgmt_cp_set_local_name *cp = data;
3295 struct mgmt_pending_cmd *cmd;
3296 struct hci_request req;
3299 bt_dev_dbg(hdev, "sock %p", sk);
3303 /* If the old values are the same as the new ones just return a
3304 * direct command complete event.
3306 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3307 !memcmp(hdev->short_name, cp->short_name,
3308 sizeof(hdev->short_name))) {
3309 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3314 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3316 if (!hdev_is_powered(hdev)) {
3317 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3319 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3324 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3325 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3326 ext_info_changed(hdev, sk);
3331 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3337 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3339 hci_req_init(&req, hdev);
3341 if (lmp_bredr_capable(hdev)) {
3342 __hci_req_update_name(&req);
3343 __hci_req_update_eir(&req);
3346 /* The name is stored in the scan response data and so
3347 * no need to update the advertising data here.
3349 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3350 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3352 err = hci_req_run(&req, set_name_complete);
3354 mgmt_pending_remove(cmd);
3357 hci_dev_unlock(hdev);
3361 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3364 struct mgmt_cp_set_appearance *cp = data;
3368 bt_dev_dbg(hdev, "sock %p", sk);
3370 if (!lmp_le_capable(hdev))
3371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3372 MGMT_STATUS_NOT_SUPPORTED);
3374 appearance = le16_to_cpu(cp->appearance);
3378 if (hdev->appearance != appearance) {
3379 hdev->appearance = appearance;
3381 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3382 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3384 ext_info_changed(hdev, sk);
3387 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3390 hci_dev_unlock(hdev);
3395 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3396 void *data, u16 len)
3398 struct mgmt_rp_get_phy_configuration rp;
3400 bt_dev_dbg(hdev, "sock %p", sk);
3404 memset(&rp, 0, sizeof(rp));
3406 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3407 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3408 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3410 hci_dev_unlock(hdev);
3412 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3416 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3418 struct mgmt_ev_phy_configuration_changed ev;
3420 memset(&ev, 0, sizeof(ev));
3422 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3424 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3428 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3429 u16 opcode, struct sk_buff *skb)
3431 struct mgmt_pending_cmd *cmd;
3433 bt_dev_dbg(hdev, "status 0x%02x", status);
3437 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3442 mgmt_cmd_status(cmd->sk, hdev->id,
3443 MGMT_OP_SET_PHY_CONFIGURATION,
3444 mgmt_status(status));
3446 mgmt_cmd_complete(cmd->sk, hdev->id,
3447 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3450 mgmt_phy_configuration_changed(hdev, cmd->sk);
3453 mgmt_pending_remove(cmd);
3456 hci_dev_unlock(hdev);
3459 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3460 void *data, u16 len)
3462 struct mgmt_cp_set_phy_configuration *cp = data;
3463 struct hci_cp_le_set_default_phy cp_phy;
3464 struct mgmt_pending_cmd *cmd;
3465 struct hci_request req;
3466 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3467 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3468 bool changed = false;
3471 bt_dev_dbg(hdev, "sock %p", sk);
3473 configurable_phys = get_configurable_phys(hdev);
3474 supported_phys = get_supported_phys(hdev);
3475 selected_phys = __le32_to_cpu(cp->selected_phys);
3477 if (selected_phys & ~supported_phys)
3478 return mgmt_cmd_status(sk, hdev->id,
3479 MGMT_OP_SET_PHY_CONFIGURATION,
3480 MGMT_STATUS_INVALID_PARAMS);
3482 unconfigure_phys = supported_phys & ~configurable_phys;
3484 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3485 return mgmt_cmd_status(sk, hdev->id,
3486 MGMT_OP_SET_PHY_CONFIGURATION,
3487 MGMT_STATUS_INVALID_PARAMS);
3489 if (selected_phys == get_selected_phys(hdev))
3490 return mgmt_cmd_complete(sk, hdev->id,
3491 MGMT_OP_SET_PHY_CONFIGURATION,
3496 if (!hdev_is_powered(hdev)) {
3497 err = mgmt_cmd_status(sk, hdev->id,
3498 MGMT_OP_SET_PHY_CONFIGURATION,
3499 MGMT_STATUS_REJECTED);
3503 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3504 err = mgmt_cmd_status(sk, hdev->id,
3505 MGMT_OP_SET_PHY_CONFIGURATION,
3510 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3511 pkt_type |= (HCI_DH3 | HCI_DM3);
3513 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3515 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3516 pkt_type |= (HCI_DH5 | HCI_DM5);
3518 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3520 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3521 pkt_type &= ~HCI_2DH1;
3523 pkt_type |= HCI_2DH1;
3525 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3526 pkt_type &= ~HCI_2DH3;
3528 pkt_type |= HCI_2DH3;
3530 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3531 pkt_type &= ~HCI_2DH5;
3533 pkt_type |= HCI_2DH5;
3535 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3536 pkt_type &= ~HCI_3DH1;
3538 pkt_type |= HCI_3DH1;
3540 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3541 pkt_type &= ~HCI_3DH3;
3543 pkt_type |= HCI_3DH3;
3545 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3546 pkt_type &= ~HCI_3DH5;
3548 pkt_type |= HCI_3DH5;
3550 if (pkt_type != hdev->pkt_type) {
3551 hdev->pkt_type = pkt_type;
3555 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3556 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3558 mgmt_phy_configuration_changed(hdev, sk);
3560 err = mgmt_cmd_complete(sk, hdev->id,
3561 MGMT_OP_SET_PHY_CONFIGURATION,
3567 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3574 hci_req_init(&req, hdev);
3576 memset(&cp_phy, 0, sizeof(cp_phy));
3578 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3579 cp_phy.all_phys |= 0x01;
3581 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3582 cp_phy.all_phys |= 0x02;
3584 if (selected_phys & MGMT_PHY_LE_1M_TX)
3585 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3587 if (selected_phys & MGMT_PHY_LE_2M_TX)
3588 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3590 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3591 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3593 if (selected_phys & MGMT_PHY_LE_1M_RX)
3594 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3596 if (selected_phys & MGMT_PHY_LE_2M_RX)
3597 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3599 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3600 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3602 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3604 err = hci_req_run_skb(&req, set_default_phy_complete);
3606 mgmt_pending_remove(cmd);
3609 hci_dev_unlock(hdev);
3614 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3617 int err = MGMT_STATUS_SUCCESS;
3618 struct mgmt_cp_set_blocked_keys *keys = data;
3619 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3620 sizeof(struct mgmt_blocked_key_info));
3621 u16 key_count, expected_len;
3624 bt_dev_dbg(hdev, "sock %p", sk);
3626 key_count = __le16_to_cpu(keys->key_count);
3627 if (key_count > max_key_count) {
3628 bt_dev_err(hdev, "too big key_count value %u", key_count);
3629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3630 MGMT_STATUS_INVALID_PARAMS);
3633 expected_len = struct_size(keys, keys, key_count);
3634 if (expected_len != len) {
3635 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3638 MGMT_STATUS_INVALID_PARAMS);
3643 hci_blocked_keys_clear(hdev);
3645 for (i = 0; i < keys->key_count; ++i) {
3646 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3649 err = MGMT_STATUS_NO_RESOURCES;
3653 b->type = keys->keys[i].type;
3654 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3655 list_add_rcu(&b->list, &hdev->blocked_keys);
3657 hci_dev_unlock(hdev);
3659 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3663 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3664 void *data, u16 len)
3666 struct mgmt_mode *cp = data;
3668 bool changed = false;
3670 bt_dev_dbg(hdev, "sock %p", sk);
3672 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3673 return mgmt_cmd_status(sk, hdev->id,
3674 MGMT_OP_SET_WIDEBAND_SPEECH,
3675 MGMT_STATUS_NOT_SUPPORTED);
3677 if (cp->val != 0x00 && cp->val != 0x01)
3678 return mgmt_cmd_status(sk, hdev->id,
3679 MGMT_OP_SET_WIDEBAND_SPEECH,
3680 MGMT_STATUS_INVALID_PARAMS);
3684 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3685 err = mgmt_cmd_status(sk, hdev->id,
3686 MGMT_OP_SET_WIDEBAND_SPEECH,
3691 if (hdev_is_powered(hdev) &&
3692 !!cp->val != hci_dev_test_flag(hdev,
3693 HCI_WIDEBAND_SPEECH_ENABLED)) {
3694 err = mgmt_cmd_status(sk, hdev->id,
3695 MGMT_OP_SET_WIDEBAND_SPEECH,
3696 MGMT_STATUS_REJECTED);
3701 changed = !hci_dev_test_and_set_flag(hdev,
3702 HCI_WIDEBAND_SPEECH_ENABLED);
3704 changed = hci_dev_test_and_clear_flag(hdev,
3705 HCI_WIDEBAND_SPEECH_ENABLED);
3707 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3712 err = new_settings(hdev, sk);
3715 hci_dev_unlock(hdev);
3719 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3720 void *data, u16 data_len)
3723 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3726 u8 tx_power_range[2];
3728 bt_dev_dbg(hdev, "sock %p", sk);
3730 memset(&buf, 0, sizeof(buf));
3734 /* When the Read Simple Pairing Options command is supported, then
3735 * the remote public key validation is supported.
3737 * Alternatively, when Microsoft extensions are available, they can
3738 * indicate support for public key validation as well.
3740 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3741 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3743 flags |= 0x02; /* Remote public key validation (LE) */
3745 /* When the Read Encryption Key Size command is supported, then the
3746 * encryption key size is enforced.
3748 if (hdev->commands[20] & 0x10)
3749 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3751 flags |= 0x08; /* Encryption key size enforcement (LE) */
3753 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3756 /* When the Read Simple Pairing Options command is supported, then
3757 * also max encryption key size information is provided.
3759 if (hdev->commands[41] & 0x08)
3760 cap_len = eir_append_le16(rp->cap, cap_len,
3761 MGMT_CAP_MAX_ENC_KEY_SIZE,
3762 hdev->max_enc_key_size);
3764 cap_len = eir_append_le16(rp->cap, cap_len,
3765 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3766 SMP_MAX_ENC_KEY_SIZE);
3768 /* Append the min/max LE tx power parameters if we were able to fetch
3769 * it from the controller
3771 if (hdev->commands[38] & 0x80) {
3772 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3773 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3774 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3778 rp->cap_len = cpu_to_le16(cap_len);
3780 hci_dev_unlock(hdev);
3782 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3783 rp, sizeof(*rp) + cap_len);
3786 #ifdef CONFIG_BT_FEATURE_DEBUG
3787 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3788 static const u8 debug_uuid[16] = {
3789 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3790 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3794 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3795 static const u8 simult_central_periph_uuid[16] = {
3796 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3797 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3800 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3801 static const u8 rpa_resolution_uuid[16] = {
3802 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3803 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3806 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3807 void *data, u16 data_len)
3809 char buf[62]; /* Enough space for 3 features */
3810 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3814 bt_dev_dbg(hdev, "sock %p", sk);
3816 memset(&buf, 0, sizeof(buf));
3818 #ifdef CONFIG_BT_FEATURE_DEBUG
3820 flags = bt_dbg_get() ? BIT(0) : 0;
3822 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3823 rp->features[idx].flags = cpu_to_le32(flags);
3829 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3830 (hdev->le_states[4] & 0x08) && /* Central */
3831 (hdev->le_states[4] & 0x40) && /* Peripheral */
3832 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3837 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3838 rp->features[idx].flags = cpu_to_le32(flags);
3842 if (hdev && use_ll_privacy(hdev)) {
3843 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3844 flags = BIT(0) | BIT(1);
3848 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3849 rp->features[idx].flags = cpu_to_le32(flags);
3853 rp->feature_count = cpu_to_le16(idx);
3855 /* After reading the experimental features information, enable
3856 * the events to update client on any future change.
3858 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3860 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3861 MGMT_OP_READ_EXP_FEATURES_INFO,
3862 0, rp, sizeof(*rp) + (20 * idx));
3865 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3868 struct mgmt_ev_exp_feature_changed ev;
3870 memset(&ev, 0, sizeof(ev));
3871 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3872 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3874 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3876 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3880 #ifdef CONFIG_BT_FEATURE_DEBUG
3881 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3883 struct mgmt_ev_exp_feature_changed ev;
3885 memset(&ev, 0, sizeof(ev));
3886 memcpy(ev.uuid, debug_uuid, 16);
3887 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3889 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3891 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3895 #define EXP_FEAT(_uuid, _set_func) \
3898 .set_func = _set_func, \
3901 /* The zero key uuid is special. Multiple exp features are set through it. */
3902 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3903 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3905 struct mgmt_rp_set_exp_feature rp;
3907 memset(rp.uuid, 0, 16);
3908 rp.flags = cpu_to_le32(0);
3910 #ifdef CONFIG_BT_FEATURE_DEBUG
3912 bool changed = bt_dbg_get();
3917 exp_debug_feature_changed(false, sk);
3921 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3924 changed = hci_dev_test_and_clear_flag(hdev,
3925 HCI_ENABLE_LL_PRIVACY);
3927 exp_ll_privacy_feature_changed(false, hdev, sk);
3930 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3932 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3933 MGMT_OP_SET_EXP_FEATURE, 0,
3937 #ifdef CONFIG_BT_FEATURE_DEBUG
3938 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3939 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3941 struct mgmt_rp_set_exp_feature rp;
3946 /* Command requires to use the non-controller index */
3948 return mgmt_cmd_status(sk, hdev->id,
3949 MGMT_OP_SET_EXP_FEATURE,
3950 MGMT_STATUS_INVALID_INDEX);
3952 /* Parameters are limited to a single octet */
3953 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3954 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3955 MGMT_OP_SET_EXP_FEATURE,
3956 MGMT_STATUS_INVALID_PARAMS);
3958 /* Only boolean on/off is supported */
3959 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3960 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3961 MGMT_OP_SET_EXP_FEATURE,
3962 MGMT_STATUS_INVALID_PARAMS);
3964 val = !!cp->param[0];
3965 changed = val ? !bt_dbg_get() : bt_dbg_get();
3968 memcpy(rp.uuid, debug_uuid, 16);
3969 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3971 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3973 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3974 MGMT_OP_SET_EXP_FEATURE, 0,
3978 exp_debug_feature_changed(val, sk);
3984 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
3985 struct mgmt_cp_set_exp_feature *cp,
3988 struct mgmt_rp_set_exp_feature rp;
3993 /* Command requires to use the controller index */
3995 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3996 MGMT_OP_SET_EXP_FEATURE,
3997 MGMT_STATUS_INVALID_INDEX);
3999 /* Changes can only be made when controller is powered down */
4000 if (hdev_is_powered(hdev))
4001 return mgmt_cmd_status(sk, hdev->id,
4002 MGMT_OP_SET_EXP_FEATURE,
4003 MGMT_STATUS_REJECTED);
4005 /* Parameters are limited to a single octet */
4006 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4007 return mgmt_cmd_status(sk, hdev->id,
4008 MGMT_OP_SET_EXP_FEATURE,
4009 MGMT_STATUS_INVALID_PARAMS);
4011 /* Only boolean on/off is supported */
4012 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4013 return mgmt_cmd_status(sk, hdev->id,
4014 MGMT_OP_SET_EXP_FEATURE,
4015 MGMT_STATUS_INVALID_PARAMS);
4017 val = !!cp->param[0];
4020 changed = !hci_dev_test_and_set_flag(hdev,
4021 HCI_ENABLE_LL_PRIVACY);
4022 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4024 /* Enable LL privacy + supported settings changed */
4025 flags = BIT(0) | BIT(1);
4027 changed = hci_dev_test_and_clear_flag(hdev,
4028 HCI_ENABLE_LL_PRIVACY);
4030 /* Disable LL privacy + supported settings changed */
4034 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4035 rp.flags = cpu_to_le32(flags);
4037 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4039 err = mgmt_cmd_complete(sk, hdev->id,
4040 MGMT_OP_SET_EXP_FEATURE, 0,
4044 exp_ll_privacy_feature_changed(val, hdev, sk);
4049 static const struct mgmt_exp_feature {
4051 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4052 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4053 } exp_features[] = {
4054 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4055 #ifdef CONFIG_BT_FEATURE_DEBUG
4056 EXP_FEAT(debug_uuid, set_debug_func),
4058 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4060 /* end with a null feature */
4061 EXP_FEAT(NULL, NULL)
4064 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4065 void *data, u16 data_len)
4067 struct mgmt_cp_set_exp_feature *cp = data;
4070 bt_dev_dbg(hdev, "sock %p", sk);
4072 for (i = 0; exp_features[i].uuid; i++) {
4073 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4074 return exp_features[i].set_func(sk, hdev, cp, data_len);
4077 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4078 MGMT_OP_SET_EXP_FEATURE,
4079 MGMT_STATUS_NOT_SUPPORTED);
4082 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4084 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4087 struct mgmt_cp_get_device_flags *cp = data;
4088 struct mgmt_rp_get_device_flags rp;
4089 struct bdaddr_list_with_flags *br_params;
4090 struct hci_conn_params *params;
4091 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4092 u32 current_flags = 0;
4093 u8 status = MGMT_STATUS_INVALID_PARAMS;
4095 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4096 &cp->addr.bdaddr, cp->addr.type);
4100 memset(&rp, 0, sizeof(rp));
4102 if (cp->addr.type == BDADDR_BREDR) {
4103 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4109 current_flags = br_params->current_flags;
4111 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4112 le_addr_type(cp->addr.type));
4117 current_flags = params->current_flags;
4120 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4121 rp.addr.type = cp->addr.type;
4122 rp.supported_flags = cpu_to_le32(supported_flags);
4123 rp.current_flags = cpu_to_le32(current_flags);
4125 status = MGMT_STATUS_SUCCESS;
4128 hci_dev_unlock(hdev);
4130 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4134 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4135 bdaddr_t *bdaddr, u8 bdaddr_type,
4136 u32 supported_flags, u32 current_flags)
4138 struct mgmt_ev_device_flags_changed ev;
4140 bacpy(&ev.addr.bdaddr, bdaddr);
4141 ev.addr.type = bdaddr_type;
4142 ev.supported_flags = cpu_to_le32(supported_flags);
4143 ev.current_flags = cpu_to_le32(current_flags);
4145 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4148 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4151 struct mgmt_cp_set_device_flags *cp = data;
4152 struct bdaddr_list_with_flags *br_params;
4153 struct hci_conn_params *params;
4154 u8 status = MGMT_STATUS_INVALID_PARAMS;
4155 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4156 u32 current_flags = __le32_to_cpu(cp->current_flags);
4158 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4159 &cp->addr.bdaddr, cp->addr.type,
4160 __le32_to_cpu(current_flags));
4162 if ((supported_flags | current_flags) != supported_flags) {
4163 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4164 current_flags, supported_flags);
4170 if (cp->addr.type == BDADDR_BREDR) {
4171 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4176 br_params->current_flags = current_flags;
4177 status = MGMT_STATUS_SUCCESS;
4179 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4180 &cp->addr.bdaddr, cp->addr.type);
4183 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4184 le_addr_type(cp->addr.type));
4186 params->current_flags = current_flags;
4187 status = MGMT_STATUS_SUCCESS;
4189 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4191 le_addr_type(cp->addr.type));
4196 hci_dev_unlock(hdev);
4198 if (status == MGMT_STATUS_SUCCESS)
4199 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4200 supported_flags, current_flags);
4202 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4203 &cp->addr, sizeof(cp->addr));
4206 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4209 struct mgmt_ev_adv_monitor_added ev;
4211 ev.monitor_handle = cpu_to_le16(handle);
4213 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4216 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4218 struct mgmt_ev_adv_monitor_removed ev;
4219 struct mgmt_pending_cmd *cmd;
4220 struct sock *sk_skip = NULL;
4221 struct mgmt_cp_remove_adv_monitor *cp;
4223 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4227 if (cp->monitor_handle)
4231 ev.monitor_handle = cpu_to_le16(handle);
4233 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4236 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4237 void *data, u16 len)
4239 struct adv_monitor *monitor = NULL;
4240 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4243 __u32 supported = 0;
4245 __u16 num_handles = 0;
4246 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4248 BT_DBG("request for %s", hdev->name);
4252 if (msft_monitor_supported(hdev))
4253 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4255 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4256 handles[num_handles++] = monitor->handle;
4258 hci_dev_unlock(hdev);
4260 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4261 rp = kmalloc(rp_size, GFP_KERNEL);
4265 /* All supported features are currently enabled */
4266 enabled = supported;
4268 rp->supported_features = cpu_to_le32(supported);
4269 rp->enabled_features = cpu_to_le32(enabled);
4270 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4271 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4272 rp->num_handles = cpu_to_le16(num_handles);
4274 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4276 err = mgmt_cmd_complete(sk, hdev->id,
4277 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4278 MGMT_STATUS_SUCCESS, rp, rp_size);
4285 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4287 struct mgmt_rp_add_adv_patterns_monitor rp;
4288 struct mgmt_pending_cmd *cmd;
4289 struct adv_monitor *monitor;
4294 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4296 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4301 monitor = cmd->user_data;
4302 rp.monitor_handle = cpu_to_le16(monitor->handle);
4305 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4306 hdev->adv_monitors_cnt++;
4307 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4308 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4309 hci_update_background_scan(hdev);
4312 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4313 mgmt_status(status), &rp, sizeof(rp));
4314 mgmt_pending_remove(cmd);
4317 hci_dev_unlock(hdev);
4318 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4319 rp.monitor_handle, status);
4324 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4325 struct adv_monitor *m, u8 status,
4326 void *data, u16 len, u16 op)
4328 struct mgmt_rp_add_adv_patterns_monitor rp;
4329 struct mgmt_pending_cmd *cmd;
4338 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4339 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4340 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4341 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4342 status = MGMT_STATUS_BUSY;
4346 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4348 status = MGMT_STATUS_NO_RESOURCES;
4353 pending = hci_add_adv_monitor(hdev, m, &err);
4355 if (err == -ENOSPC || err == -ENOMEM)
4356 status = MGMT_STATUS_NO_RESOURCES;
4357 else if (err == -EINVAL)
4358 status = MGMT_STATUS_INVALID_PARAMS;
4360 status = MGMT_STATUS_FAILED;
4362 mgmt_pending_remove(cmd);
4367 mgmt_pending_remove(cmd);
4368 rp.monitor_handle = cpu_to_le16(m->handle);
4369 mgmt_adv_monitor_added(sk, hdev, m->handle);
4370 m->state = ADV_MONITOR_STATE_REGISTERED;
4371 hdev->adv_monitors_cnt++;
4373 hci_dev_unlock(hdev);
4374 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4378 hci_dev_unlock(hdev);
4383 hci_free_adv_monitor(hdev, m);
4384 hci_dev_unlock(hdev);
4385 return mgmt_cmd_status(sk, hdev->id, op, status);
4388 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4389 struct mgmt_adv_rssi_thresholds *rssi)
4392 m->rssi.low_threshold = rssi->low_threshold;
4393 m->rssi.low_threshold_timeout =
4394 __le16_to_cpu(rssi->low_threshold_timeout);
4395 m->rssi.high_threshold = rssi->high_threshold;
4396 m->rssi.high_threshold_timeout =
4397 __le16_to_cpu(rssi->high_threshold_timeout);
4398 m->rssi.sampling_period = rssi->sampling_period;
4400 /* Default values. These numbers are the least constricting
4401 * parameters for MSFT API to work, so it behaves as if there
4402 * are no rssi parameter to consider. May need to be changed
4403 * if other API are to be supported.
4405 m->rssi.low_threshold = -127;
4406 m->rssi.low_threshold_timeout = 60;
4407 m->rssi.high_threshold = -127;
4408 m->rssi.high_threshold_timeout = 0;
4409 m->rssi.sampling_period = 0;
4413 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4414 struct mgmt_adv_pattern *patterns)
4416 u8 offset = 0, length = 0;
4417 struct adv_pattern *p = NULL;
4420 for (i = 0; i < pattern_count; i++) {
4421 offset = patterns[i].offset;
4422 length = patterns[i].length;
4423 if (offset >= HCI_MAX_AD_LENGTH ||
4424 length > HCI_MAX_AD_LENGTH ||
4425 (offset + length) > HCI_MAX_AD_LENGTH)
4426 return MGMT_STATUS_INVALID_PARAMS;
4428 p = kmalloc(sizeof(*p), GFP_KERNEL);
4430 return MGMT_STATUS_NO_RESOURCES;
4432 p->ad_type = patterns[i].ad_type;
4433 p->offset = patterns[i].offset;
4434 p->length = patterns[i].length;
4435 memcpy(p->value, patterns[i].value, p->length);
4437 INIT_LIST_HEAD(&p->list);
4438 list_add(&p->list, &m->patterns);
4441 return MGMT_STATUS_SUCCESS;
4444 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4445 void *data, u16 len)
4447 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4448 struct adv_monitor *m = NULL;
4449 u8 status = MGMT_STATUS_SUCCESS;
4450 size_t expected_size = sizeof(*cp);
4452 BT_DBG("request for %s", hdev->name);
4454 if (len <= sizeof(*cp)) {
4455 status = MGMT_STATUS_INVALID_PARAMS;
4459 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4460 if (len != expected_size) {
4461 status = MGMT_STATUS_INVALID_PARAMS;
4465 m = kzalloc(sizeof(*m), GFP_KERNEL);
4467 status = MGMT_STATUS_NO_RESOURCES;
4471 INIT_LIST_HEAD(&m->patterns);
4473 parse_adv_monitor_rssi(m, NULL);
4474 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4477 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4478 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4481 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4482 void *data, u16 len)
4484 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4485 struct adv_monitor *m = NULL;
4486 u8 status = MGMT_STATUS_SUCCESS;
4487 size_t expected_size = sizeof(*cp);
4489 BT_DBG("request for %s", hdev->name);
4491 if (len <= sizeof(*cp)) {
4492 status = MGMT_STATUS_INVALID_PARAMS;
4496 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4497 if (len != expected_size) {
4498 status = MGMT_STATUS_INVALID_PARAMS;
4502 m = kzalloc(sizeof(*m), GFP_KERNEL);
4504 status = MGMT_STATUS_NO_RESOURCES;
4508 INIT_LIST_HEAD(&m->patterns);
4510 parse_adv_monitor_rssi(m, &cp->rssi);
4511 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4514 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4515 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4518 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4520 struct mgmt_rp_remove_adv_monitor rp;
4521 struct mgmt_cp_remove_adv_monitor *cp;
4522 struct mgmt_pending_cmd *cmd;
4527 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4532 rp.monitor_handle = cp->monitor_handle;
4535 hci_update_background_scan(hdev);
4537 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4538 mgmt_status(status), &rp, sizeof(rp));
4539 mgmt_pending_remove(cmd);
4542 hci_dev_unlock(hdev);
4543 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4544 rp.monitor_handle, status);
4549 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4550 void *data, u16 len)
4552 struct mgmt_cp_remove_adv_monitor *cp = data;
4553 struct mgmt_rp_remove_adv_monitor rp;
4554 struct mgmt_pending_cmd *cmd;
4555 u16 handle = __le16_to_cpu(cp->monitor_handle);
4559 BT_DBG("request for %s", hdev->name);
4560 rp.monitor_handle = cp->monitor_handle;
4564 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4565 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4566 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4567 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4568 status = MGMT_STATUS_BUSY;
4572 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4574 status = MGMT_STATUS_NO_RESOURCES;
4579 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4581 pending = hci_remove_all_adv_monitor(hdev, &err);
4584 mgmt_pending_remove(cmd);
4587 status = MGMT_STATUS_INVALID_INDEX;
4589 status = MGMT_STATUS_FAILED;
4594 /* monitor can be removed without forwarding request to controller */
4596 mgmt_pending_remove(cmd);
4597 hci_dev_unlock(hdev);
4599 return mgmt_cmd_complete(sk, hdev->id,
4600 MGMT_OP_REMOVE_ADV_MONITOR,
4601 MGMT_STATUS_SUCCESS,
4605 hci_dev_unlock(hdev);
4609 hci_dev_unlock(hdev);
4610 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4614 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4615 u16 opcode, struct sk_buff *skb)
4617 struct mgmt_rp_read_local_oob_data mgmt_rp;
4618 size_t rp_size = sizeof(mgmt_rp);
4619 struct mgmt_pending_cmd *cmd;
4621 bt_dev_dbg(hdev, "status %u", status);
4623 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4627 if (status || !skb) {
4628 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4629 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4633 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4635 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4636 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4638 if (skb->len < sizeof(*rp)) {
4639 mgmt_cmd_status(cmd->sk, hdev->id,
4640 MGMT_OP_READ_LOCAL_OOB_DATA,
4641 MGMT_STATUS_FAILED);
4645 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4646 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4648 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4650 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4652 if (skb->len < sizeof(*rp)) {
4653 mgmt_cmd_status(cmd->sk, hdev->id,
4654 MGMT_OP_READ_LOCAL_OOB_DATA,
4655 MGMT_STATUS_FAILED);
4659 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4660 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4662 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4663 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4666 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4667 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4670 mgmt_pending_remove(cmd);
4673 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4674 void *data, u16 data_len)
4676 struct mgmt_pending_cmd *cmd;
4677 struct hci_request req;
4680 bt_dev_dbg(hdev, "sock %p", sk);
4684 if (!hdev_is_powered(hdev)) {
4685 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4686 MGMT_STATUS_NOT_POWERED);
4690 if (!lmp_ssp_capable(hdev)) {
4691 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4692 MGMT_STATUS_NOT_SUPPORTED);
4696 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4697 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4702 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4708 hci_req_init(&req, hdev);
4710 if (bredr_sc_enabled(hdev))
4711 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4713 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4715 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4717 mgmt_pending_remove(cmd);
4720 hci_dev_unlock(hdev);
4724 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4725 void *data, u16 len)
4727 struct mgmt_addr_info *addr = data;
4730 bt_dev_dbg(hdev, "sock %p", sk);
4732 if (!bdaddr_type_is_valid(addr->type))
4733 return mgmt_cmd_complete(sk, hdev->id,
4734 MGMT_OP_ADD_REMOTE_OOB_DATA,
4735 MGMT_STATUS_INVALID_PARAMS,
4736 addr, sizeof(*addr));
4740 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4741 struct mgmt_cp_add_remote_oob_data *cp = data;
4744 if (cp->addr.type != BDADDR_BREDR) {
4745 err = mgmt_cmd_complete(sk, hdev->id,
4746 MGMT_OP_ADD_REMOTE_OOB_DATA,
4747 MGMT_STATUS_INVALID_PARAMS,
4748 &cp->addr, sizeof(cp->addr));
4752 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4753 cp->addr.type, cp->hash,
4754 cp->rand, NULL, NULL);
4756 status = MGMT_STATUS_FAILED;
4758 status = MGMT_STATUS_SUCCESS;
4760 err = mgmt_cmd_complete(sk, hdev->id,
4761 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4762 &cp->addr, sizeof(cp->addr));
4763 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4764 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4765 u8 *rand192, *hash192, *rand256, *hash256;
4768 if (bdaddr_type_is_le(cp->addr.type)) {
4769 /* Enforce zero-valued 192-bit parameters as
4770 * long as legacy SMP OOB isn't implemented.
4772 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4773 memcmp(cp->hash192, ZERO_KEY, 16)) {
4774 err = mgmt_cmd_complete(sk, hdev->id,
4775 MGMT_OP_ADD_REMOTE_OOB_DATA,
4776 MGMT_STATUS_INVALID_PARAMS,
4777 addr, sizeof(*addr));
4784 /* In case one of the P-192 values is set to zero,
4785 * then just disable OOB data for P-192.
4787 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4788 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4792 rand192 = cp->rand192;
4793 hash192 = cp->hash192;
4797 /* In case one of the P-256 values is set to zero, then just
4798 * disable OOB data for P-256.
4800 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4801 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4805 rand256 = cp->rand256;
4806 hash256 = cp->hash256;
4809 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4810 cp->addr.type, hash192, rand192,
4813 status = MGMT_STATUS_FAILED;
4815 status = MGMT_STATUS_SUCCESS;
4817 err = mgmt_cmd_complete(sk, hdev->id,
4818 MGMT_OP_ADD_REMOTE_OOB_DATA,
4819 status, &cp->addr, sizeof(cp->addr));
4821 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4823 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4824 MGMT_STATUS_INVALID_PARAMS);
4828 hci_dev_unlock(hdev);
4832 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4833 void *data, u16 len)
4835 struct mgmt_cp_remove_remote_oob_data *cp = data;
4839 bt_dev_dbg(hdev, "sock %p", sk);
4841 if (cp->addr.type != BDADDR_BREDR)
4842 return mgmt_cmd_complete(sk, hdev->id,
4843 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4844 MGMT_STATUS_INVALID_PARAMS,
4845 &cp->addr, sizeof(cp->addr));
4849 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4850 hci_remote_oob_data_clear(hdev);
4851 status = MGMT_STATUS_SUCCESS;
4855 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4857 status = MGMT_STATUS_INVALID_PARAMS;
4859 status = MGMT_STATUS_SUCCESS;
4862 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4863 status, &cp->addr, sizeof(cp->addr));
4865 hci_dev_unlock(hdev);
4869 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4871 struct mgmt_pending_cmd *cmd;
4873 bt_dev_dbg(hdev, "status %u", status);
4877 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4879 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4882 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4885 cmd->cmd_complete(cmd, mgmt_status(status));
4886 mgmt_pending_remove(cmd);
4889 hci_dev_unlock(hdev);
4891 /* Handle suspend notifier */
4892 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4893 hdev->suspend_tasks)) {
4894 bt_dev_dbg(hdev, "Unpaused discovery");
4895 wake_up(&hdev->suspend_wait_q);
4899 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4900 uint8_t *mgmt_status)
4903 case DISCOV_TYPE_LE:
4904 *mgmt_status = mgmt_le_support(hdev);
4908 case DISCOV_TYPE_INTERLEAVED:
4909 *mgmt_status = mgmt_le_support(hdev);
4913 case DISCOV_TYPE_BREDR:
4914 *mgmt_status = mgmt_bredr_support(hdev);
4919 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4926 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4927 u16 op, void *data, u16 len)
4929 struct mgmt_cp_start_discovery *cp = data;
4930 struct mgmt_pending_cmd *cmd;
4934 bt_dev_dbg(hdev, "sock %p", sk);
4938 if (!hdev_is_powered(hdev)) {
4939 err = mgmt_cmd_complete(sk, hdev->id, op,
4940 MGMT_STATUS_NOT_POWERED,
4941 &cp->type, sizeof(cp->type));
4945 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4946 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4947 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4948 &cp->type, sizeof(cp->type));
4952 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4953 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4954 &cp->type, sizeof(cp->type));
4958 /* Can't start discovery when it is paused */
4959 if (hdev->discovery_paused) {
4960 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4961 &cp->type, sizeof(cp->type));
4965 /* Clear the discovery filter first to free any previously
4966 * allocated memory for the UUID list.
4968 hci_discovery_filter_clear(hdev);
4970 hdev->discovery.type = cp->type;
4971 hdev->discovery.report_invalid_rssi = false;
4972 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4973 hdev->discovery.limited = true;
4975 hdev->discovery.limited = false;
4977 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4983 cmd->cmd_complete = generic_cmd_complete;
4985 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4986 queue_work(hdev->req_workqueue, &hdev->discov_update);
4990 hci_dev_unlock(hdev);
4994 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4995 void *data, u16 len)
4997 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5001 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5002 void *data, u16 len)
5004 return start_discovery_internal(sk, hdev,
5005 MGMT_OP_START_LIMITED_DISCOVERY,
5009 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5012 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5016 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5017 void *data, u16 len)
5019 struct mgmt_cp_start_service_discovery *cp = data;
5020 struct mgmt_pending_cmd *cmd;
5021 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5022 u16 uuid_count, expected_len;
5026 bt_dev_dbg(hdev, "sock %p", sk);
5030 if (!hdev_is_powered(hdev)) {
5031 err = mgmt_cmd_complete(sk, hdev->id,
5032 MGMT_OP_START_SERVICE_DISCOVERY,
5033 MGMT_STATUS_NOT_POWERED,
5034 &cp->type, sizeof(cp->type));
5038 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5039 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5040 err = mgmt_cmd_complete(sk, hdev->id,
5041 MGMT_OP_START_SERVICE_DISCOVERY,
5042 MGMT_STATUS_BUSY, &cp->type,
5047 if (hdev->discovery_paused) {
5048 err = mgmt_cmd_complete(sk, hdev->id,
5049 MGMT_OP_START_SERVICE_DISCOVERY,
5050 MGMT_STATUS_BUSY, &cp->type,
5055 uuid_count = __le16_to_cpu(cp->uuid_count);
5056 if (uuid_count > max_uuid_count) {
5057 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5059 err = mgmt_cmd_complete(sk, hdev->id,
5060 MGMT_OP_START_SERVICE_DISCOVERY,
5061 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5066 expected_len = sizeof(*cp) + uuid_count * 16;
5067 if (expected_len != len) {
5068 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5070 err = mgmt_cmd_complete(sk, hdev->id,
5071 MGMT_OP_START_SERVICE_DISCOVERY,
5072 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5077 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5078 err = mgmt_cmd_complete(sk, hdev->id,
5079 MGMT_OP_START_SERVICE_DISCOVERY,
5080 status, &cp->type, sizeof(cp->type));
5084 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5091 cmd->cmd_complete = service_discovery_cmd_complete;
5093 /* Clear the discovery filter first to free any previously
5094 * allocated memory for the UUID list.
5096 hci_discovery_filter_clear(hdev);
5098 hdev->discovery.result_filtering = true;
5099 hdev->discovery.type = cp->type;
5100 hdev->discovery.rssi = cp->rssi;
5101 hdev->discovery.uuid_count = uuid_count;
5103 if (uuid_count > 0) {
5104 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5106 if (!hdev->discovery.uuids) {
5107 err = mgmt_cmd_complete(sk, hdev->id,
5108 MGMT_OP_START_SERVICE_DISCOVERY,
5110 &cp->type, sizeof(cp->type));
5111 mgmt_pending_remove(cmd);
5116 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5117 queue_work(hdev->req_workqueue, &hdev->discov_update);
5121 hci_dev_unlock(hdev);
5125 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5127 struct mgmt_pending_cmd *cmd;
5129 bt_dev_dbg(hdev, "status %u", status);
5133 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5135 cmd->cmd_complete(cmd, mgmt_status(status));
5136 mgmt_pending_remove(cmd);
5139 hci_dev_unlock(hdev);
5141 /* Handle suspend notifier */
5142 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5143 bt_dev_dbg(hdev, "Paused discovery");
5144 wake_up(&hdev->suspend_wait_q);
5148 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5151 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5152 struct mgmt_pending_cmd *cmd;
5155 bt_dev_dbg(hdev, "sock %p", sk);
5159 if (!hci_discovery_active(hdev)) {
5160 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5161 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5162 sizeof(mgmt_cp->type));
5166 if (hdev->discovery.type != mgmt_cp->type) {
5167 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5168 MGMT_STATUS_INVALID_PARAMS,
5169 &mgmt_cp->type, sizeof(mgmt_cp->type));
5173 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5179 cmd->cmd_complete = generic_cmd_complete;
5181 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5182 queue_work(hdev->req_workqueue, &hdev->discov_update);
5186 hci_dev_unlock(hdev);
5190 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5193 struct mgmt_cp_confirm_name *cp = data;
5194 struct inquiry_entry *e;
5197 bt_dev_dbg(hdev, "sock %p", sk);
5201 if (!hci_discovery_active(hdev)) {
5202 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5203 MGMT_STATUS_FAILED, &cp->addr,
5208 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5210 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5211 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5216 if (cp->name_known) {
5217 e->name_state = NAME_KNOWN;
5220 e->name_state = NAME_NEEDED;
5221 hci_inquiry_cache_update_resolve(hdev, e);
5224 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5225 &cp->addr, sizeof(cp->addr));
5228 hci_dev_unlock(hdev);
5232 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5235 struct mgmt_cp_block_device *cp = data;
5239 bt_dev_dbg(hdev, "sock %p", sk);
5241 if (!bdaddr_type_is_valid(cp->addr.type))
5242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5243 MGMT_STATUS_INVALID_PARAMS,
5244 &cp->addr, sizeof(cp->addr));
5248 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5251 status = MGMT_STATUS_FAILED;
5255 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5257 status = MGMT_STATUS_SUCCESS;
5260 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5261 &cp->addr, sizeof(cp->addr));
5263 hci_dev_unlock(hdev);
5268 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5271 struct mgmt_cp_unblock_device *cp = data;
5275 bt_dev_dbg(hdev, "sock %p", sk);
5277 if (!bdaddr_type_is_valid(cp->addr.type))
5278 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5279 MGMT_STATUS_INVALID_PARAMS,
5280 &cp->addr, sizeof(cp->addr));
5284 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5287 status = MGMT_STATUS_INVALID_PARAMS;
5291 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5293 status = MGMT_STATUS_SUCCESS;
5296 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5297 &cp->addr, sizeof(cp->addr));
5299 hci_dev_unlock(hdev);
5304 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5307 struct mgmt_cp_set_device_id *cp = data;
5308 struct hci_request req;
5312 bt_dev_dbg(hdev, "sock %p", sk);
5314 source = __le16_to_cpu(cp->source);
5316 if (source > 0x0002)
5317 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5318 MGMT_STATUS_INVALID_PARAMS);
5322 hdev->devid_source = source;
5323 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5324 hdev->devid_product = __le16_to_cpu(cp->product);
5325 hdev->devid_version = __le16_to_cpu(cp->version);
5327 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5330 hci_req_init(&req, hdev);
5331 __hci_req_update_eir(&req);
5332 hci_req_run(&req, NULL);
5334 hci_dev_unlock(hdev);
5339 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5342 bt_dev_dbg(hdev, "status %u", status);
5345 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5348 struct cmd_lookup match = { NULL, hdev };
5349 struct hci_request req;
5351 struct adv_info *adv_instance;
5357 u8 mgmt_err = mgmt_status(status);
5359 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5360 cmd_status_rsp, &mgmt_err);
5364 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5365 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5367 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5369 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5372 new_settings(hdev, match.sk);
5377 /* Handle suspend notifier */
5378 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5379 hdev->suspend_tasks)) {
5380 bt_dev_dbg(hdev, "Paused advertising");
5381 wake_up(&hdev->suspend_wait_q);
5382 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5383 hdev->suspend_tasks)) {
5384 bt_dev_dbg(hdev, "Unpaused advertising");
5385 wake_up(&hdev->suspend_wait_q);
5388 /* If "Set Advertising" was just disabled and instance advertising was
5389 * set up earlier, then re-enable multi-instance advertising.
5391 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5392 list_empty(&hdev->adv_instances))
5395 instance = hdev->cur_adv_instance;
5397 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5398 struct adv_info, list);
5402 instance = adv_instance->instance;
5405 hci_req_init(&req, hdev);
5407 err = __hci_req_schedule_adv_instance(&req, instance, true);
5410 err = hci_req_run(&req, enable_advertising_instance);
5413 bt_dev_err(hdev, "failed to re-configure advertising");
5416 hci_dev_unlock(hdev);
5419 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5422 struct mgmt_mode *cp = data;
5423 struct mgmt_pending_cmd *cmd;
5424 struct hci_request req;
5428 bt_dev_dbg(hdev, "sock %p", sk);
5430 status = mgmt_le_support(hdev);
5432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5435 /* Enabling the experimental LL Privay support disables support for
5438 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5439 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5440 MGMT_STATUS_NOT_SUPPORTED);
5442 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5444 MGMT_STATUS_INVALID_PARAMS);
5446 if (hdev->advertising_paused)
5447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5454 /* The following conditions are ones which mean that we should
5455 * not do any HCI communication but directly send a mgmt
5456 * response to user space (after toggling the flag if
5459 if (!hdev_is_powered(hdev) ||
5460 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5461 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5462 hci_conn_num(hdev, LE_LINK) > 0 ||
5463 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5464 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5468 hdev->cur_adv_instance = 0x00;
5469 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5470 if (cp->val == 0x02)
5471 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5473 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5475 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5479 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5484 err = new_settings(hdev, sk);
5489 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5490 pending_find(MGMT_OP_SET_LE, hdev)) {
5491 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5496 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5502 hci_req_init(&req, hdev);
5504 if (cp->val == 0x02)
5505 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5507 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5509 cancel_adv_timeout(hdev);
5512 /* Switch to instance "0" for the Set Advertising setting.
5513 * We cannot use update_[adv|scan_rsp]_data() here as the
5514 * HCI_ADVERTISING flag is not yet set.
5516 hdev->cur_adv_instance = 0x00;
5518 if (ext_adv_capable(hdev)) {
5519 __hci_req_start_ext_adv(&req, 0x00);
5521 __hci_req_update_adv_data(&req, 0x00);
5522 __hci_req_update_scan_rsp_data(&req, 0x00);
5523 __hci_req_enable_advertising(&req);
5526 __hci_req_disable_advertising(&req);
5529 err = hci_req_run(&req, set_advertising_complete);
5531 mgmt_pending_remove(cmd);
5534 hci_dev_unlock(hdev);
5538 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5539 void *data, u16 len)
5541 struct mgmt_cp_set_static_address *cp = data;
5544 bt_dev_dbg(hdev, "sock %p", sk);
5546 if (!lmp_le_capable(hdev))
5547 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5548 MGMT_STATUS_NOT_SUPPORTED);
5550 if (hdev_is_powered(hdev))
5551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5552 MGMT_STATUS_REJECTED);
5554 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5555 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5556 return mgmt_cmd_status(sk, hdev->id,
5557 MGMT_OP_SET_STATIC_ADDRESS,
5558 MGMT_STATUS_INVALID_PARAMS);
5560 /* Two most significant bits shall be set */
5561 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5562 return mgmt_cmd_status(sk, hdev->id,
5563 MGMT_OP_SET_STATIC_ADDRESS,
5564 MGMT_STATUS_INVALID_PARAMS);
5569 bacpy(&hdev->static_addr, &cp->bdaddr);
5571 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5575 err = new_settings(hdev, sk);
5578 hci_dev_unlock(hdev);
5582 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5583 void *data, u16 len)
5585 struct mgmt_cp_set_scan_params *cp = data;
5586 __u16 interval, window;
5589 bt_dev_dbg(hdev, "sock %p", sk);
5591 if (!lmp_le_capable(hdev))
5592 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5593 MGMT_STATUS_NOT_SUPPORTED);
5595 interval = __le16_to_cpu(cp->interval);
5597 if (interval < 0x0004 || interval > 0x4000)
5598 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5599 MGMT_STATUS_INVALID_PARAMS);
5601 window = __le16_to_cpu(cp->window);
5603 if (window < 0x0004 || window > 0x4000)
5604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5605 MGMT_STATUS_INVALID_PARAMS);
5607 if (window > interval)
5608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5609 MGMT_STATUS_INVALID_PARAMS);
5613 hdev->le_scan_interval = interval;
5614 hdev->le_scan_window = window;
5616 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5619 /* If background scan is running, restart it so new parameters are
5622 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5623 hdev->discovery.state == DISCOVERY_STOPPED) {
5624 struct hci_request req;
5626 hci_req_init(&req, hdev);
5628 hci_req_add_le_scan_disable(&req, false);
5629 hci_req_add_le_passive_scan(&req);
5631 hci_req_run(&req, NULL);
5634 hci_dev_unlock(hdev);
5639 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5642 struct mgmt_pending_cmd *cmd;
5644 bt_dev_dbg(hdev, "status 0x%02x", status);
5648 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5653 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5654 mgmt_status(status));
5656 struct mgmt_mode *cp = cmd->param;
5659 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5661 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5663 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5664 new_settings(hdev, cmd->sk);
5667 mgmt_pending_remove(cmd);
5670 hci_dev_unlock(hdev);
5673 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5674 void *data, u16 len)
5676 struct mgmt_mode *cp = data;
5677 struct mgmt_pending_cmd *cmd;
5678 struct hci_request req;
5681 bt_dev_dbg(hdev, "sock %p", sk);
5683 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5684 hdev->hci_ver < BLUETOOTH_VER_1_2)
5685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5686 MGMT_STATUS_NOT_SUPPORTED);
5688 if (cp->val != 0x00 && cp->val != 0x01)
5689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5690 MGMT_STATUS_INVALID_PARAMS);
5694 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5695 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5700 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5701 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5706 if (!hdev_is_powered(hdev)) {
5707 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5708 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5710 new_settings(hdev, sk);
5714 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5721 hci_req_init(&req, hdev);
5723 __hci_req_write_fast_connectable(&req, cp->val);
5725 err = hci_req_run(&req, fast_connectable_complete);
5727 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5728 MGMT_STATUS_FAILED);
5729 mgmt_pending_remove(cmd);
5733 hci_dev_unlock(hdev);
5738 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5740 struct mgmt_pending_cmd *cmd;
5742 bt_dev_dbg(hdev, "status 0x%02x", status);
5746 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5751 u8 mgmt_err = mgmt_status(status);
5753 /* We need to restore the flag if related HCI commands
5756 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5758 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5760 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5761 new_settings(hdev, cmd->sk);
5764 mgmt_pending_remove(cmd);
5767 hci_dev_unlock(hdev);
5770 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5772 struct mgmt_mode *cp = data;
5773 struct mgmt_pending_cmd *cmd;
5774 struct hci_request req;
5777 bt_dev_dbg(hdev, "sock %p", sk);
5779 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5780 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5781 MGMT_STATUS_NOT_SUPPORTED);
5783 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5785 MGMT_STATUS_REJECTED);
5787 if (cp->val != 0x00 && cp->val != 0x01)
5788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5789 MGMT_STATUS_INVALID_PARAMS);
5793 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5794 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5798 if (!hdev_is_powered(hdev)) {
5800 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5801 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5802 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5803 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5804 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5807 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5809 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5813 err = new_settings(hdev, sk);
5817 /* Reject disabling when powered on */
5819 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5820 MGMT_STATUS_REJECTED);
5823 /* When configuring a dual-mode controller to operate
5824 * with LE only and using a static address, then switching
5825 * BR/EDR back on is not allowed.
5827 * Dual-mode controllers shall operate with the public
5828 * address as its identity address for BR/EDR and LE. So
5829 * reject the attempt to create an invalid configuration.
5831 * The same restrictions applies when secure connections
5832 * has been enabled. For BR/EDR this is a controller feature
5833 * while for LE it is a host stack feature. This means that
5834 * switching BR/EDR back on when secure connections has been
5835 * enabled is not a supported transaction.
5837 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5838 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5839 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5840 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5841 MGMT_STATUS_REJECTED);
5846 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5847 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5852 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5858 /* We need to flip the bit already here so that
5859 * hci_req_update_adv_data generates the correct flags.
5861 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5863 hci_req_init(&req, hdev);
5865 __hci_req_write_fast_connectable(&req, false);
5866 __hci_req_update_scan(&req);
5868 /* Since only the advertising data flags will change, there
5869 * is no need to update the scan response data.
5871 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5873 err = hci_req_run(&req, set_bredr_complete);
5875 mgmt_pending_remove(cmd);
5878 hci_dev_unlock(hdev);
5882 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5884 struct mgmt_pending_cmd *cmd;
5885 struct mgmt_mode *cp;
5887 bt_dev_dbg(hdev, "status %u", status);
5891 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5896 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5897 mgmt_status(status));
5905 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5906 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5909 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5910 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5913 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5914 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5918 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5919 new_settings(hdev, cmd->sk);
5922 mgmt_pending_remove(cmd);
5924 hci_dev_unlock(hdev);
5927 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5928 void *data, u16 len)
5930 struct mgmt_mode *cp = data;
5931 struct mgmt_pending_cmd *cmd;
5932 struct hci_request req;
5936 bt_dev_dbg(hdev, "sock %p", sk);
5938 if (!lmp_sc_capable(hdev) &&
5939 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5940 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5941 MGMT_STATUS_NOT_SUPPORTED);
5943 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5944 lmp_sc_capable(hdev) &&
5945 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5946 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5947 MGMT_STATUS_REJECTED);
5949 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5951 MGMT_STATUS_INVALID_PARAMS);
5955 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5956 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5960 changed = !hci_dev_test_and_set_flag(hdev,
5962 if (cp->val == 0x02)
5963 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5965 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5967 changed = hci_dev_test_and_clear_flag(hdev,
5969 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5972 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5977 err = new_settings(hdev, sk);
5982 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5983 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5990 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5991 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5992 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5996 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6002 hci_req_init(&req, hdev);
6003 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6004 err = hci_req_run(&req, sc_enable_complete);
6006 mgmt_pending_remove(cmd);
6011 hci_dev_unlock(hdev);
6015 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6016 void *data, u16 len)
6018 struct mgmt_mode *cp = data;
6019 bool changed, use_changed;
6022 bt_dev_dbg(hdev, "sock %p", sk);
6024 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6026 MGMT_STATUS_INVALID_PARAMS);
6031 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6033 changed = hci_dev_test_and_clear_flag(hdev,
6034 HCI_KEEP_DEBUG_KEYS);
6036 if (cp->val == 0x02)
6037 use_changed = !hci_dev_test_and_set_flag(hdev,
6038 HCI_USE_DEBUG_KEYS);
6040 use_changed = hci_dev_test_and_clear_flag(hdev,
6041 HCI_USE_DEBUG_KEYS);
6043 if (hdev_is_powered(hdev) && use_changed &&
6044 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6045 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6046 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6047 sizeof(mode), &mode);
6050 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6055 err = new_settings(hdev, sk);
6058 hci_dev_unlock(hdev);
6062 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6065 struct mgmt_cp_set_privacy *cp = cp_data;
6069 bt_dev_dbg(hdev, "sock %p", sk);
6071 if (!lmp_le_capable(hdev))
6072 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6073 MGMT_STATUS_NOT_SUPPORTED);
6075 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6077 MGMT_STATUS_INVALID_PARAMS);
6079 if (hdev_is_powered(hdev))
6080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6081 MGMT_STATUS_REJECTED);
6085 /* If user space supports this command it is also expected to
6086 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6088 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6091 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6092 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6093 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6094 hci_adv_instances_set_rpa_expired(hdev, true);
6095 if (cp->privacy == 0x02)
6096 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6098 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6100 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6101 memset(hdev->irk, 0, sizeof(hdev->irk));
6102 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6103 hci_adv_instances_set_rpa_expired(hdev, false);
6104 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6107 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6112 err = new_settings(hdev, sk);
6115 hci_dev_unlock(hdev);
6119 static bool irk_is_valid(struct mgmt_irk_info *irk)
6121 switch (irk->addr.type) {
6122 case BDADDR_LE_PUBLIC:
6125 case BDADDR_LE_RANDOM:
6126 /* Two most significant bits shall be set */
6127 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6135 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6138 struct mgmt_cp_load_irks *cp = cp_data;
6139 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6140 sizeof(struct mgmt_irk_info));
6141 u16 irk_count, expected_len;
6144 bt_dev_dbg(hdev, "sock %p", sk);
6146 if (!lmp_le_capable(hdev))
6147 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6148 MGMT_STATUS_NOT_SUPPORTED);
6150 irk_count = __le16_to_cpu(cp->irk_count);
6151 if (irk_count > max_irk_count) {
6152 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6154 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6155 MGMT_STATUS_INVALID_PARAMS);
6158 expected_len = struct_size(cp, irks, irk_count);
6159 if (expected_len != len) {
6160 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6163 MGMT_STATUS_INVALID_PARAMS);
6166 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6168 for (i = 0; i < irk_count; i++) {
6169 struct mgmt_irk_info *key = &cp->irks[i];
6171 if (!irk_is_valid(key))
6172 return mgmt_cmd_status(sk, hdev->id,
6174 MGMT_STATUS_INVALID_PARAMS);
6179 hci_smp_irks_clear(hdev);
6181 for (i = 0; i < irk_count; i++) {
6182 struct mgmt_irk_info *irk = &cp->irks[i];
6184 if (hci_is_blocked_key(hdev,
6185 HCI_BLOCKED_KEY_TYPE_IRK,
6187 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6192 hci_add_irk(hdev, &irk->addr.bdaddr,
6193 le_addr_type(irk->addr.type), irk->val,
6197 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6199 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6201 hci_dev_unlock(hdev);
6206 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6208 if (key->initiator != 0x00 && key->initiator != 0x01)
6211 switch (key->addr.type) {
6212 case BDADDR_LE_PUBLIC:
6215 case BDADDR_LE_RANDOM:
6216 /* Two most significant bits shall be set */
6217 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6225 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6226 void *cp_data, u16 len)
6228 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6229 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6230 sizeof(struct mgmt_ltk_info));
6231 u16 key_count, expected_len;
6234 bt_dev_dbg(hdev, "sock %p", sk);
6236 if (!lmp_le_capable(hdev))
6237 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6238 MGMT_STATUS_NOT_SUPPORTED);
6240 key_count = __le16_to_cpu(cp->key_count);
6241 if (key_count > max_key_count) {
6242 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6244 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6245 MGMT_STATUS_INVALID_PARAMS);
6248 expected_len = struct_size(cp, keys, key_count);
6249 if (expected_len != len) {
6250 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6252 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6253 MGMT_STATUS_INVALID_PARAMS);
6256 bt_dev_dbg(hdev, "key_count %u", key_count);
6258 for (i = 0; i < key_count; i++) {
6259 struct mgmt_ltk_info *key = &cp->keys[i];
6261 if (!ltk_is_valid(key))
6262 return mgmt_cmd_status(sk, hdev->id,
6263 MGMT_OP_LOAD_LONG_TERM_KEYS,
6264 MGMT_STATUS_INVALID_PARAMS);
6269 hci_smp_ltks_clear(hdev);
6271 for (i = 0; i < key_count; i++) {
6272 struct mgmt_ltk_info *key = &cp->keys[i];
6273 u8 type, authenticated;
6275 if (hci_is_blocked_key(hdev,
6276 HCI_BLOCKED_KEY_TYPE_LTK,
6278 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6283 switch (key->type) {
6284 case MGMT_LTK_UNAUTHENTICATED:
6285 authenticated = 0x00;
6286 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6288 case MGMT_LTK_AUTHENTICATED:
6289 authenticated = 0x01;
6290 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6292 case MGMT_LTK_P256_UNAUTH:
6293 authenticated = 0x00;
6294 type = SMP_LTK_P256;
6296 case MGMT_LTK_P256_AUTH:
6297 authenticated = 0x01;
6298 type = SMP_LTK_P256;
6300 case MGMT_LTK_P256_DEBUG:
6301 authenticated = 0x00;
6302 type = SMP_LTK_P256_DEBUG;
6308 hci_add_ltk(hdev, &key->addr.bdaddr,
6309 le_addr_type(key->addr.type), type, authenticated,
6310 key->val, key->enc_size, key->ediv, key->rand);
6313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6316 hci_dev_unlock(hdev);
6321 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6323 struct hci_conn *conn = cmd->user_data;
6324 struct mgmt_rp_get_conn_info rp;
6327 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6329 if (status == MGMT_STATUS_SUCCESS) {
6330 rp.rssi = conn->rssi;
6331 rp.tx_power = conn->tx_power;
6332 rp.max_tx_power = conn->max_tx_power;
6334 rp.rssi = HCI_RSSI_INVALID;
6335 rp.tx_power = HCI_TX_POWER_INVALID;
6336 rp.max_tx_power = HCI_TX_POWER_INVALID;
6339 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6340 status, &rp, sizeof(rp));
6342 hci_conn_drop(conn);
6348 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6351 struct hci_cp_read_rssi *cp;
6352 struct mgmt_pending_cmd *cmd;
6353 struct hci_conn *conn;
6357 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6361 /* Commands sent in request are either Read RSSI or Read Transmit Power
6362 * Level so we check which one was last sent to retrieve connection
6363 * handle. Both commands have handle as first parameter so it's safe to
6364 * cast data on the same command struct.
6366 * First command sent is always Read RSSI and we fail only if it fails.
6367 * In other case we simply override error to indicate success as we
6368 * already remembered if TX power value is actually valid.
6370 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6372 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6373 status = MGMT_STATUS_SUCCESS;
6375 status = mgmt_status(hci_status);
6379 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6383 handle = __le16_to_cpu(cp->handle);
6384 conn = hci_conn_hash_lookup_handle(hdev, handle);
6386 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
6391 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6395 cmd->cmd_complete(cmd, status);
6396 mgmt_pending_remove(cmd);
6399 hci_dev_unlock(hdev);
6402 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6405 struct mgmt_cp_get_conn_info *cp = data;
6406 struct mgmt_rp_get_conn_info rp;
6407 struct hci_conn *conn;
6408 unsigned long conn_info_age;
6411 bt_dev_dbg(hdev, "sock %p", sk);
6413 memset(&rp, 0, sizeof(rp));
6414 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6415 rp.addr.type = cp->addr.type;
6417 if (!bdaddr_type_is_valid(cp->addr.type))
6418 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6419 MGMT_STATUS_INVALID_PARAMS,
6424 if (!hdev_is_powered(hdev)) {
6425 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6426 MGMT_STATUS_NOT_POWERED, &rp,
6431 if (cp->addr.type == BDADDR_BREDR)
6432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6435 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6437 if (!conn || conn->state != BT_CONNECTED) {
6438 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6439 MGMT_STATUS_NOT_CONNECTED, &rp,
6444 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6445 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6446 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6450 /* To avoid client trying to guess when to poll again for information we
6451 * calculate conn info age as random value between min/max set in hdev.
6453 conn_info_age = hdev->conn_info_min_age +
6454 prandom_u32_max(hdev->conn_info_max_age -
6455 hdev->conn_info_min_age);
6457 /* Query controller to refresh cached values if they are too old or were
6460 if (time_after(jiffies, conn->conn_info_timestamp +
6461 msecs_to_jiffies(conn_info_age)) ||
6462 !conn->conn_info_timestamp) {
6463 struct hci_request req;
6464 struct hci_cp_read_tx_power req_txp_cp;
6465 struct hci_cp_read_rssi req_rssi_cp;
6466 struct mgmt_pending_cmd *cmd;
6468 hci_req_init(&req, hdev);
6469 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6470 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6473 /* For LE links TX power does not change thus we don't need to
6474 * query for it once value is known.
6476 if (!bdaddr_type_is_le(cp->addr.type) ||
6477 conn->tx_power == HCI_TX_POWER_INVALID) {
6478 req_txp_cp.handle = cpu_to_le16(conn->handle);
6479 req_txp_cp.type = 0x00;
6480 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6481 sizeof(req_txp_cp), &req_txp_cp);
6484 /* Max TX power needs to be read only once per connection */
6485 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6486 req_txp_cp.handle = cpu_to_le16(conn->handle);
6487 req_txp_cp.type = 0x01;
6488 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6489 sizeof(req_txp_cp), &req_txp_cp);
6492 err = hci_req_run(&req, conn_info_refresh_complete);
6496 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6503 hci_conn_hold(conn);
6504 cmd->user_data = hci_conn_get(conn);
6505 cmd->cmd_complete = conn_info_cmd_complete;
6507 conn->conn_info_timestamp = jiffies;
6509 /* Cache is valid, just reply with values cached in hci_conn */
6510 rp.rssi = conn->rssi;
6511 rp.tx_power = conn->tx_power;
6512 rp.max_tx_power = conn->max_tx_power;
6514 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6515 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6519 hci_dev_unlock(hdev);
6523 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6525 struct hci_conn *conn = cmd->user_data;
6526 struct mgmt_rp_get_clock_info rp;
6527 struct hci_dev *hdev;
6530 memset(&rp, 0, sizeof(rp));
6531 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6536 hdev = hci_dev_get(cmd->index);
6538 rp.local_clock = cpu_to_le32(hdev->clock);
6543 rp.piconet_clock = cpu_to_le32(conn->clock);
6544 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6548 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6552 hci_conn_drop(conn);
6559 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6561 struct hci_cp_read_clock *hci_cp;
6562 struct mgmt_pending_cmd *cmd;
6563 struct hci_conn *conn;
6565 bt_dev_dbg(hdev, "status %u", status);
6569 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6573 if (hci_cp->which) {
6574 u16 handle = __le16_to_cpu(hci_cp->handle);
6575 conn = hci_conn_hash_lookup_handle(hdev, handle);
6580 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6584 cmd->cmd_complete(cmd, mgmt_status(status));
6585 mgmt_pending_remove(cmd);
6588 hci_dev_unlock(hdev);
6591 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6594 struct mgmt_cp_get_clock_info *cp = data;
6595 struct mgmt_rp_get_clock_info rp;
6596 struct hci_cp_read_clock hci_cp;
6597 struct mgmt_pending_cmd *cmd;
6598 struct hci_request req;
6599 struct hci_conn *conn;
6602 bt_dev_dbg(hdev, "sock %p", sk);
6604 memset(&rp, 0, sizeof(rp));
6605 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6606 rp.addr.type = cp->addr.type;
6608 if (cp->addr.type != BDADDR_BREDR)
6609 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6610 MGMT_STATUS_INVALID_PARAMS,
6615 if (!hdev_is_powered(hdev)) {
6616 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6617 MGMT_STATUS_NOT_POWERED, &rp,
6622 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6623 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6625 if (!conn || conn->state != BT_CONNECTED) {
6626 err = mgmt_cmd_complete(sk, hdev->id,
6627 MGMT_OP_GET_CLOCK_INFO,
6628 MGMT_STATUS_NOT_CONNECTED,
6636 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6642 cmd->cmd_complete = clock_info_cmd_complete;
6644 hci_req_init(&req, hdev);
6646 memset(&hci_cp, 0, sizeof(hci_cp));
6647 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6650 hci_conn_hold(conn);
6651 cmd->user_data = hci_conn_get(conn);
6653 hci_cp.handle = cpu_to_le16(conn->handle);
6654 hci_cp.which = 0x01; /* Piconet clock */
6655 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6658 err = hci_req_run(&req, get_clock_info_complete);
6660 mgmt_pending_remove(cmd);
6663 hci_dev_unlock(hdev);
6667 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6669 struct hci_conn *conn;
6671 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6675 if (conn->dst_type != type)
6678 if (conn->state != BT_CONNECTED)
6684 /* This function requires the caller holds hdev->lock */
6685 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6686 u8 addr_type, u8 auto_connect)
6688 struct hci_conn_params *params;
6690 params = hci_conn_params_add(hdev, addr, addr_type);
6694 if (params->auto_connect == auto_connect)
6697 list_del_init(¶ms->action);
6699 switch (auto_connect) {
6700 case HCI_AUTO_CONN_DISABLED:
6701 case HCI_AUTO_CONN_LINK_LOSS:
6702 /* If auto connect is being disabled when we're trying to
6703 * connect to device, keep connecting.
6705 if (params->explicit_connect)
6706 list_add(¶ms->action, &hdev->pend_le_conns);
6708 case HCI_AUTO_CONN_REPORT:
6709 if (params->explicit_connect)
6710 list_add(¶ms->action, &hdev->pend_le_conns);
6712 list_add(¶ms->action, &hdev->pend_le_reports);
6714 case HCI_AUTO_CONN_DIRECT:
6715 case HCI_AUTO_CONN_ALWAYS:
6716 if (!is_connected(hdev, addr, addr_type))
6717 list_add(¶ms->action, &hdev->pend_le_conns);
6721 params->auto_connect = auto_connect;
6723 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6724 addr, addr_type, auto_connect);
6729 static void device_added(struct sock *sk, struct hci_dev *hdev,
6730 bdaddr_t *bdaddr, u8 type, u8 action)
6732 struct mgmt_ev_device_added ev;
6734 bacpy(&ev.addr.bdaddr, bdaddr);
6735 ev.addr.type = type;
6738 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6741 static int add_device(struct sock *sk, struct hci_dev *hdev,
6742 void *data, u16 len)
6744 struct mgmt_cp_add_device *cp = data;
6745 u8 auto_conn, addr_type;
6746 struct hci_conn_params *params;
6748 u32 current_flags = 0;
6750 bt_dev_dbg(hdev, "sock %p", sk);
6752 if (!bdaddr_type_is_valid(cp->addr.type) ||
6753 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6754 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6755 MGMT_STATUS_INVALID_PARAMS,
6756 &cp->addr, sizeof(cp->addr));
6758 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6759 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6760 MGMT_STATUS_INVALID_PARAMS,
6761 &cp->addr, sizeof(cp->addr));
6765 if (cp->addr.type == BDADDR_BREDR) {
6766 /* Only incoming connections action is supported for now */
6767 if (cp->action != 0x01) {
6768 err = mgmt_cmd_complete(sk, hdev->id,
6770 MGMT_STATUS_INVALID_PARAMS,
6771 &cp->addr, sizeof(cp->addr));
6775 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6781 hci_req_update_scan(hdev);
6786 addr_type = le_addr_type(cp->addr.type);
6788 if (cp->action == 0x02)
6789 auto_conn = HCI_AUTO_CONN_ALWAYS;
6790 else if (cp->action == 0x01)
6791 auto_conn = HCI_AUTO_CONN_DIRECT;
6793 auto_conn = HCI_AUTO_CONN_REPORT;
6795 /* Kernel internally uses conn_params with resolvable private
6796 * address, but Add Device allows only identity addresses.
6797 * Make sure it is enforced before calling
6798 * hci_conn_params_lookup.
6800 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6801 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6802 MGMT_STATUS_INVALID_PARAMS,
6803 &cp->addr, sizeof(cp->addr));
6807 /* If the connection parameters don't exist for this device,
6808 * they will be created and configured with defaults.
6810 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6812 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6813 MGMT_STATUS_FAILED, &cp->addr,
6817 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6820 current_flags = params->current_flags;
6823 hci_update_background_scan(hdev);
6826 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6827 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6828 SUPPORTED_DEVICE_FLAGS(), current_flags);
6830 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6831 MGMT_STATUS_SUCCESS, &cp->addr,
6835 hci_dev_unlock(hdev);
6839 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6840 bdaddr_t *bdaddr, u8 type)
6842 struct mgmt_ev_device_removed ev;
6844 bacpy(&ev.addr.bdaddr, bdaddr);
6845 ev.addr.type = type;
6847 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6850 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6851 void *data, u16 len)
6853 struct mgmt_cp_remove_device *cp = data;
6856 bt_dev_dbg(hdev, "sock %p", sk);
6860 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6861 struct hci_conn_params *params;
6864 if (!bdaddr_type_is_valid(cp->addr.type)) {
6865 err = mgmt_cmd_complete(sk, hdev->id,
6866 MGMT_OP_REMOVE_DEVICE,
6867 MGMT_STATUS_INVALID_PARAMS,
6868 &cp->addr, sizeof(cp->addr));
6872 if (cp->addr.type == BDADDR_BREDR) {
6873 err = hci_bdaddr_list_del(&hdev->accept_list,
6877 err = mgmt_cmd_complete(sk, hdev->id,
6878 MGMT_OP_REMOVE_DEVICE,
6879 MGMT_STATUS_INVALID_PARAMS,
6885 hci_req_update_scan(hdev);
6887 device_removed(sk, hdev, &cp->addr.bdaddr,
6892 addr_type = le_addr_type(cp->addr.type);
6894 /* Kernel internally uses conn_params with resolvable private
6895 * address, but Remove Device allows only identity addresses.
6896 * Make sure it is enforced before calling
6897 * hci_conn_params_lookup.
6899 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6900 err = mgmt_cmd_complete(sk, hdev->id,
6901 MGMT_OP_REMOVE_DEVICE,
6902 MGMT_STATUS_INVALID_PARAMS,
6903 &cp->addr, sizeof(cp->addr));
6907 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6910 err = mgmt_cmd_complete(sk, hdev->id,
6911 MGMT_OP_REMOVE_DEVICE,
6912 MGMT_STATUS_INVALID_PARAMS,
6913 &cp->addr, sizeof(cp->addr));
6917 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6918 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6919 err = mgmt_cmd_complete(sk, hdev->id,
6920 MGMT_OP_REMOVE_DEVICE,
6921 MGMT_STATUS_INVALID_PARAMS,
6922 &cp->addr, sizeof(cp->addr));
6926 list_del(¶ms->action);
6927 list_del(¶ms->list);
6929 hci_update_background_scan(hdev);
6931 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6933 struct hci_conn_params *p, *tmp;
6934 struct bdaddr_list *b, *btmp;
6936 if (cp->addr.type) {
6937 err = mgmt_cmd_complete(sk, hdev->id,
6938 MGMT_OP_REMOVE_DEVICE,
6939 MGMT_STATUS_INVALID_PARAMS,
6940 &cp->addr, sizeof(cp->addr));
6944 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
6945 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6950 hci_req_update_scan(hdev);
6952 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6953 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6955 device_removed(sk, hdev, &p->addr, p->addr_type);
6956 if (p->explicit_connect) {
6957 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6960 list_del(&p->action);
6965 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6967 hci_update_background_scan(hdev);
6971 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6972 MGMT_STATUS_SUCCESS, &cp->addr,
6975 hci_dev_unlock(hdev);
6979 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6982 struct mgmt_cp_load_conn_param *cp = data;
6983 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6984 sizeof(struct mgmt_conn_param));
6985 u16 param_count, expected_len;
6988 if (!lmp_le_capable(hdev))
6989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6990 MGMT_STATUS_NOT_SUPPORTED);
6992 param_count = __le16_to_cpu(cp->param_count);
6993 if (param_count > max_param_count) {
6994 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6997 MGMT_STATUS_INVALID_PARAMS);
7000 expected_len = struct_size(cp, params, param_count);
7001 if (expected_len != len) {
7002 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7004 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7005 MGMT_STATUS_INVALID_PARAMS);
7008 bt_dev_dbg(hdev, "param_count %u", param_count);
7012 hci_conn_params_clear_disabled(hdev);
7014 for (i = 0; i < param_count; i++) {
7015 struct mgmt_conn_param *param = &cp->params[i];
7016 struct hci_conn_params *hci_param;
7017 u16 min, max, latency, timeout;
7020 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7023 if (param->addr.type == BDADDR_LE_PUBLIC) {
7024 addr_type = ADDR_LE_DEV_PUBLIC;
7025 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7026 addr_type = ADDR_LE_DEV_RANDOM;
7028 bt_dev_err(hdev, "ignoring invalid connection parameters");
7032 min = le16_to_cpu(param->min_interval);
7033 max = le16_to_cpu(param->max_interval);
7034 latency = le16_to_cpu(param->latency);
7035 timeout = le16_to_cpu(param->timeout);
7037 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7038 min, max, latency, timeout);
7040 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7041 bt_dev_err(hdev, "ignoring invalid connection parameters");
7045 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7048 bt_dev_err(hdev, "failed to add connection parameters");
7052 hci_param->conn_min_interval = min;
7053 hci_param->conn_max_interval = max;
7054 hci_param->conn_latency = latency;
7055 hci_param->supervision_timeout = timeout;
7058 hci_dev_unlock(hdev);
7060 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7064 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7065 void *data, u16 len)
7067 struct mgmt_cp_set_external_config *cp = data;
7071 bt_dev_dbg(hdev, "sock %p", sk);
7073 if (hdev_is_powered(hdev))
7074 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7075 MGMT_STATUS_REJECTED);
7077 if (cp->config != 0x00 && cp->config != 0x01)
7078 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7079 MGMT_STATUS_INVALID_PARAMS);
7081 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7082 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7083 MGMT_STATUS_NOT_SUPPORTED);
7088 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7090 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7092 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7099 err = new_options(hdev, sk);
7101 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7102 mgmt_index_removed(hdev);
7104 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7105 hci_dev_set_flag(hdev, HCI_CONFIG);
7106 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7108 queue_work(hdev->req_workqueue, &hdev->power_on);
7110 set_bit(HCI_RAW, &hdev->flags);
7111 mgmt_index_added(hdev);
7116 hci_dev_unlock(hdev);
7120 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7121 void *data, u16 len)
7123 struct mgmt_cp_set_public_address *cp = data;
7127 bt_dev_dbg(hdev, "sock %p", sk);
7129 if (hdev_is_powered(hdev))
7130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7131 MGMT_STATUS_REJECTED);
7133 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7134 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7135 MGMT_STATUS_INVALID_PARAMS);
7137 if (!hdev->set_bdaddr)
7138 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7139 MGMT_STATUS_NOT_SUPPORTED);
7143 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7144 bacpy(&hdev->public_addr, &cp->bdaddr);
7146 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7153 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7154 err = new_options(hdev, sk);
7156 if (is_configured(hdev)) {
7157 mgmt_index_removed(hdev);
7159 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7161 hci_dev_set_flag(hdev, HCI_CONFIG);
7162 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7164 queue_work(hdev->req_workqueue, &hdev->power_on);
7168 hci_dev_unlock(hdev);
7172 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7173 u16 opcode, struct sk_buff *skb)
7175 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7176 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7177 u8 *h192, *r192, *h256, *r256;
7178 struct mgmt_pending_cmd *cmd;
7182 bt_dev_dbg(hdev, "status %u", status);
7184 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7188 mgmt_cp = cmd->param;
7191 status = mgmt_status(status);
7198 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7199 struct hci_rp_read_local_oob_data *rp;
7201 if (skb->len != sizeof(*rp)) {
7202 status = MGMT_STATUS_FAILED;
7205 status = MGMT_STATUS_SUCCESS;
7206 rp = (void *)skb->data;
7208 eir_len = 5 + 18 + 18;
7215 struct hci_rp_read_local_oob_ext_data *rp;
7217 if (skb->len != sizeof(*rp)) {
7218 status = MGMT_STATUS_FAILED;
7221 status = MGMT_STATUS_SUCCESS;
7222 rp = (void *)skb->data;
7224 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7225 eir_len = 5 + 18 + 18;
7229 eir_len = 5 + 18 + 18 + 18 + 18;
7239 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7246 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7247 hdev->dev_class, 3);
7250 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7251 EIR_SSP_HASH_C192, h192, 16);
7252 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7253 EIR_SSP_RAND_R192, r192, 16);
7257 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7258 EIR_SSP_HASH_C256, h256, 16);
7259 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7260 EIR_SSP_RAND_R256, r256, 16);
7264 mgmt_rp->type = mgmt_cp->type;
7265 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7267 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7268 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7269 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7270 if (err < 0 || status)
7273 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7275 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7276 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7277 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7280 mgmt_pending_remove(cmd);
7283 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7284 struct mgmt_cp_read_local_oob_ext_data *cp)
7286 struct mgmt_pending_cmd *cmd;
7287 struct hci_request req;
7290 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7295 hci_req_init(&req, hdev);
7297 if (bredr_sc_enabled(hdev))
7298 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7300 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7302 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7304 mgmt_pending_remove(cmd);
7311 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7312 void *data, u16 data_len)
7314 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7315 struct mgmt_rp_read_local_oob_ext_data *rp;
7318 u8 status, flags, role, addr[7], hash[16], rand[16];
7321 bt_dev_dbg(hdev, "sock %p", sk);
7323 if (hdev_is_powered(hdev)) {
7325 case BIT(BDADDR_BREDR):
7326 status = mgmt_bredr_support(hdev);
7332 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7333 status = mgmt_le_support(hdev);
7337 eir_len = 9 + 3 + 18 + 18 + 3;
7340 status = MGMT_STATUS_INVALID_PARAMS;
7345 status = MGMT_STATUS_NOT_POWERED;
7349 rp_len = sizeof(*rp) + eir_len;
7350 rp = kmalloc(rp_len, GFP_ATOMIC);
7361 case BIT(BDADDR_BREDR):
7362 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7363 err = read_local_ssp_oob_req(hdev, sk, cp);
7364 hci_dev_unlock(hdev);
7368 status = MGMT_STATUS_FAILED;
7371 eir_len = eir_append_data(rp->eir, eir_len,
7373 hdev->dev_class, 3);
7376 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7377 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7378 smp_generate_oob(hdev, hash, rand) < 0) {
7379 hci_dev_unlock(hdev);
7380 status = MGMT_STATUS_FAILED;
7384 /* This should return the active RPA, but since the RPA
7385 * is only programmed on demand, it is really hard to fill
7386 * this in at the moment. For now disallow retrieving
7387 * local out-of-band data when privacy is in use.
7389 * Returning the identity address will not help here since
7390 * pairing happens before the identity resolving key is
7391 * known and thus the connection establishment happens
7392 * based on the RPA and not the identity address.
7394 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7395 hci_dev_unlock(hdev);
7396 status = MGMT_STATUS_REJECTED;
7400 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7401 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7402 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7403 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7404 memcpy(addr, &hdev->static_addr, 6);
7407 memcpy(addr, &hdev->bdaddr, 6);
7411 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7412 addr, sizeof(addr));
7414 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7419 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7420 &role, sizeof(role));
7422 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7423 eir_len = eir_append_data(rp->eir, eir_len,
7425 hash, sizeof(hash));
7427 eir_len = eir_append_data(rp->eir, eir_len,
7429 rand, sizeof(rand));
7432 flags = mgmt_get_adv_discov_flags(hdev);
7434 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7435 flags |= LE_AD_NO_BREDR;
7437 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7438 &flags, sizeof(flags));
7442 hci_dev_unlock(hdev);
7444 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7446 status = MGMT_STATUS_SUCCESS;
7449 rp->type = cp->type;
7450 rp->eir_len = cpu_to_le16(eir_len);
7452 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7453 status, rp, sizeof(*rp) + eir_len);
7454 if (err < 0 || status)
7457 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7458 rp, sizeof(*rp) + eir_len,
7459 HCI_MGMT_OOB_DATA_EVENTS, sk);
7467 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7471 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7472 flags |= MGMT_ADV_FLAG_DISCOV;
7473 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7474 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7475 flags |= MGMT_ADV_FLAG_APPEARANCE;
7476 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7477 flags |= MGMT_ADV_PARAM_DURATION;
7478 flags |= MGMT_ADV_PARAM_TIMEOUT;
7479 flags |= MGMT_ADV_PARAM_INTERVALS;
7480 flags |= MGMT_ADV_PARAM_TX_POWER;
7481 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7483 /* In extended adv TX_POWER returned from Set Adv Param
7484 * will be always valid.
7486 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7487 ext_adv_capable(hdev))
7488 flags |= MGMT_ADV_FLAG_TX_POWER;
7490 if (ext_adv_capable(hdev)) {
7491 flags |= MGMT_ADV_FLAG_SEC_1M;
7492 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7493 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7495 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7496 flags |= MGMT_ADV_FLAG_SEC_2M;
7498 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7499 flags |= MGMT_ADV_FLAG_SEC_CODED;
7505 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7506 void *data, u16 data_len)
7508 struct mgmt_rp_read_adv_features *rp;
7511 struct adv_info *adv_instance;
7512 u32 supported_flags;
7515 bt_dev_dbg(hdev, "sock %p", sk);
7517 if (!lmp_le_capable(hdev))
7518 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7519 MGMT_STATUS_REJECTED);
7521 /* Enabling the experimental LL Privay support disables support for
7524 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7525 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7526 MGMT_STATUS_NOT_SUPPORTED);
7530 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7531 rp = kmalloc(rp_len, GFP_ATOMIC);
7533 hci_dev_unlock(hdev);
7537 supported_flags = get_supported_adv_flags(hdev);
7539 rp->supported_flags = cpu_to_le32(supported_flags);
7540 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7541 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7542 rp->max_instances = hdev->le_num_of_adv_sets;
7543 rp->num_instances = hdev->adv_instance_cnt;
7545 instance = rp->instance;
7546 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7547 *instance = adv_instance->instance;
7551 hci_dev_unlock(hdev);
7553 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7554 MGMT_STATUS_SUCCESS, rp, rp_len);
7561 static u8 calculate_name_len(struct hci_dev *hdev)
7563 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7565 return append_local_name(hdev, buf, 0);
7568 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7571 u8 max_len = HCI_MAX_AD_LENGTH;
7574 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7575 MGMT_ADV_FLAG_LIMITED_DISCOV |
7576 MGMT_ADV_FLAG_MANAGED_FLAGS))
7579 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7582 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7583 max_len -= calculate_name_len(hdev);
7585 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7592 static bool flags_managed(u32 adv_flags)
7594 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7595 MGMT_ADV_FLAG_LIMITED_DISCOV |
7596 MGMT_ADV_FLAG_MANAGED_FLAGS);
7599 static bool tx_power_managed(u32 adv_flags)
7601 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7604 static bool name_managed(u32 adv_flags)
7606 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7609 static bool appearance_managed(u32 adv_flags)
7611 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7614 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7615 u8 len, bool is_adv_data)
7620 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7625 /* Make sure that the data is correctly formatted. */
7626 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7632 if (data[i + 1] == EIR_FLAGS &&
7633 (!is_adv_data || flags_managed(adv_flags)))
7636 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7639 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7642 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7645 if (data[i + 1] == EIR_APPEARANCE &&
7646 appearance_managed(adv_flags))
7649 /* If the current field length would exceed the total data
7650 * length, then it's invalid.
7652 if (i + cur_len >= len)
7659 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7661 u32 supported_flags, phy_flags;
7663 /* The current implementation only supports a subset of the specified
7664 * flags. Also need to check mutual exclusiveness of sec flags.
7666 supported_flags = get_supported_adv_flags(hdev);
7667 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7668 if (adv_flags & ~supported_flags ||
7669 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7675 static bool adv_busy(struct hci_dev *hdev)
7677 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7678 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7679 pending_find(MGMT_OP_SET_LE, hdev) ||
7680 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7681 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7684 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7687 struct mgmt_pending_cmd *cmd;
7688 struct mgmt_cp_add_advertising *cp;
7689 struct mgmt_rp_add_advertising rp;
7690 struct adv_info *adv_instance, *n;
7693 bt_dev_dbg(hdev, "status %u", status);
7697 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7699 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7701 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7702 if (!adv_instance->pending)
7706 adv_instance->pending = false;
7710 instance = adv_instance->instance;
7712 if (hdev->cur_adv_instance == instance)
7713 cancel_adv_timeout(hdev);
7715 hci_remove_adv_instance(hdev, instance);
7716 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7723 rp.instance = cp->instance;
7726 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7727 mgmt_status(status));
7729 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7730 mgmt_status(status), &rp, sizeof(rp));
7732 mgmt_pending_remove(cmd);
7735 hci_dev_unlock(hdev);
7738 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7739 void *data, u16 data_len)
7741 struct mgmt_cp_add_advertising *cp = data;
7742 struct mgmt_rp_add_advertising rp;
7745 u16 timeout, duration;
7746 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7747 u8 schedule_instance = 0;
7748 struct adv_info *next_instance;
7750 struct mgmt_pending_cmd *cmd;
7751 struct hci_request req;
7753 bt_dev_dbg(hdev, "sock %p", sk);
7755 status = mgmt_le_support(hdev);
7757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7760 /* Enabling the experimental LL Privay support disables support for
7763 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7765 MGMT_STATUS_NOT_SUPPORTED);
7767 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7769 MGMT_STATUS_INVALID_PARAMS);
7771 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7772 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7773 MGMT_STATUS_INVALID_PARAMS);
7775 flags = __le32_to_cpu(cp->flags);
7776 timeout = __le16_to_cpu(cp->timeout);
7777 duration = __le16_to_cpu(cp->duration);
7779 if (!requested_adv_flags_are_valid(hdev, flags))
7780 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7781 MGMT_STATUS_INVALID_PARAMS);
7785 if (timeout && !hdev_is_powered(hdev)) {
7786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7787 MGMT_STATUS_REJECTED);
7791 if (adv_busy(hdev)) {
7792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7797 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7798 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7799 cp->scan_rsp_len, false)) {
7800 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7801 MGMT_STATUS_INVALID_PARAMS);
7805 err = hci_add_adv_instance(hdev, cp->instance, flags,
7806 cp->adv_data_len, cp->data,
7808 cp->data + cp->adv_data_len,
7810 HCI_ADV_TX_POWER_NO_PREFERENCE,
7811 hdev->le_adv_min_interval,
7812 hdev->le_adv_max_interval);
7814 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7815 MGMT_STATUS_FAILED);
7819 /* Only trigger an advertising added event if a new instance was
7822 if (hdev->adv_instance_cnt > prev_instance_cnt)
7823 mgmt_advertising_added(sk, hdev, cp->instance);
7825 if (hdev->cur_adv_instance == cp->instance) {
7826 /* If the currently advertised instance is being changed then
7827 * cancel the current advertising and schedule the next
7828 * instance. If there is only one instance then the overridden
7829 * advertising data will be visible right away.
7831 cancel_adv_timeout(hdev);
7833 next_instance = hci_get_next_instance(hdev, cp->instance);
7835 schedule_instance = next_instance->instance;
7836 } else if (!hdev->adv_instance_timeout) {
7837 /* Immediately advertise the new instance if no other
7838 * instance is currently being advertised.
7840 schedule_instance = cp->instance;
7843 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7844 * there is no instance to be advertised then we have no HCI
7845 * communication to make. Simply return.
7847 if (!hdev_is_powered(hdev) ||
7848 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7849 !schedule_instance) {
7850 rp.instance = cp->instance;
7851 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7852 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7856 /* We're good to go, update advertising data, parameters, and start
7859 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7866 hci_req_init(&req, hdev);
7868 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7871 err = hci_req_run(&req, add_advertising_complete);
7874 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7875 MGMT_STATUS_FAILED);
7876 mgmt_pending_remove(cmd);
7880 hci_dev_unlock(hdev);
7885 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
7888 struct mgmt_pending_cmd *cmd;
7889 struct mgmt_cp_add_ext_adv_params *cp;
7890 struct mgmt_rp_add_ext_adv_params rp;
7891 struct adv_info *adv_instance;
7894 BT_DBG("%s", hdev->name);
7898 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
7903 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7907 rp.instance = cp->instance;
7908 rp.tx_power = adv_instance->tx_power;
7910 /* While we're at it, inform userspace of the available space for this
7911 * advertisement, given the flags that will be used.
7913 flags = __le32_to_cpu(cp->flags);
7914 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7915 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7918 /* If this advertisement was previously advertising and we
7919 * failed to update it, we signal that it has been removed and
7920 * delete its structure
7922 if (!adv_instance->pending)
7923 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
7925 hci_remove_adv_instance(hdev, cp->instance);
7927 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7928 mgmt_status(status));
7931 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7932 mgmt_status(status), &rp, sizeof(rp));
7937 mgmt_pending_remove(cmd);
7939 hci_dev_unlock(hdev);
7942 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
7943 void *data, u16 data_len)
7945 struct mgmt_cp_add_ext_adv_params *cp = data;
7946 struct mgmt_rp_add_ext_adv_params rp;
7947 struct mgmt_pending_cmd *cmd = NULL;
7948 struct adv_info *adv_instance;
7949 struct hci_request req;
7950 u32 flags, min_interval, max_interval;
7951 u16 timeout, duration;
7956 BT_DBG("%s", hdev->name);
7958 status = mgmt_le_support(hdev);
7960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7963 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7965 MGMT_STATUS_INVALID_PARAMS);
7967 /* The purpose of breaking add_advertising into two separate MGMT calls
7968 * for params and data is to allow more parameters to be added to this
7969 * structure in the future. For this reason, we verify that we have the
7970 * bare minimum structure we know of when the interface was defined. Any
7971 * extra parameters we don't know about will be ignored in this request.
7973 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
7974 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7975 MGMT_STATUS_INVALID_PARAMS);
7977 flags = __le32_to_cpu(cp->flags);
7979 if (!requested_adv_flags_are_valid(hdev, flags))
7980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7981 MGMT_STATUS_INVALID_PARAMS);
7985 /* In new interface, we require that we are powered to register */
7986 if (!hdev_is_powered(hdev)) {
7987 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7988 MGMT_STATUS_REJECTED);
7992 if (adv_busy(hdev)) {
7993 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7998 /* Parse defined parameters from request, use defaults otherwise */
7999 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8000 __le16_to_cpu(cp->timeout) : 0;
8002 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8003 __le16_to_cpu(cp->duration) :
8004 hdev->def_multi_adv_rotation_duration;
8006 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8007 __le32_to_cpu(cp->min_interval) :
8008 hdev->le_adv_min_interval;
8010 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8011 __le32_to_cpu(cp->max_interval) :
8012 hdev->le_adv_max_interval;
8014 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8016 HCI_ADV_TX_POWER_NO_PREFERENCE;
8018 /* Create advertising instance with no advertising or response data */
8019 err = hci_add_adv_instance(hdev, cp->instance, flags,
8020 0, NULL, 0, NULL, timeout, duration,
8021 tx_power, min_interval, max_interval);
8024 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8025 MGMT_STATUS_FAILED);
8029 /* Submit request for advertising params if ext adv available */
8030 if (ext_adv_capable(hdev)) {
8031 hci_req_init(&req, hdev);
8032 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8034 /* Updating parameters of an active instance will return a
8035 * Command Disallowed error, so we must first disable the
8036 * instance if it is active.
8038 if (!adv_instance->pending)
8039 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8041 __hci_req_setup_ext_adv_instance(&req, cp->instance);
8043 err = hci_req_run(&req, add_ext_adv_params_complete);
8046 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
8047 hdev, data, data_len);
8050 hci_remove_adv_instance(hdev, cp->instance);
8055 rp.instance = cp->instance;
8056 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8057 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8058 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8059 err = mgmt_cmd_complete(sk, hdev->id,
8060 MGMT_OP_ADD_EXT_ADV_PARAMS,
8061 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8065 hci_dev_unlock(hdev);
8070 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8073 struct mgmt_cp_add_ext_adv_data *cp = data;
8074 struct mgmt_rp_add_ext_adv_data rp;
8075 u8 schedule_instance = 0;
8076 struct adv_info *next_instance;
8077 struct adv_info *adv_instance;
8079 struct mgmt_pending_cmd *cmd;
8080 struct hci_request req;
8082 BT_DBG("%s", hdev->name);
8086 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8088 if (!adv_instance) {
8089 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8090 MGMT_STATUS_INVALID_PARAMS);
8094 /* In new interface, we require that we are powered to register */
8095 if (!hdev_is_powered(hdev)) {
8096 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8097 MGMT_STATUS_REJECTED);
8098 goto clear_new_instance;
8101 if (adv_busy(hdev)) {
8102 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8104 goto clear_new_instance;
8107 /* Validate new data */
8108 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8109 cp->adv_data_len, true) ||
8110 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8111 cp->adv_data_len, cp->scan_rsp_len, false)) {
8112 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8113 MGMT_STATUS_INVALID_PARAMS);
8114 goto clear_new_instance;
8117 /* Set the data in the advertising instance */
8118 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8119 cp->data, cp->scan_rsp_len,
8120 cp->data + cp->adv_data_len);
8122 /* We're good to go, update advertising data, parameters, and start
8126 hci_req_init(&req, hdev);
8128 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
8130 if (ext_adv_capable(hdev)) {
8131 __hci_req_update_adv_data(&req, cp->instance);
8132 __hci_req_update_scan_rsp_data(&req, cp->instance);
8133 __hci_req_enable_ext_advertising(&req, cp->instance);
8136 /* If using software rotation, determine next instance to use */
8138 if (hdev->cur_adv_instance == cp->instance) {
8139 /* If the currently advertised instance is being changed
8140 * then cancel the current advertising and schedule the
8141 * next instance. If there is only one instance then the
8142 * overridden advertising data will be visible right
8145 cancel_adv_timeout(hdev);
8147 next_instance = hci_get_next_instance(hdev,
8150 schedule_instance = next_instance->instance;
8151 } else if (!hdev->adv_instance_timeout) {
8152 /* Immediately advertise the new instance if no other
8153 * instance is currently being advertised.
8155 schedule_instance = cp->instance;
8158 /* If the HCI_ADVERTISING flag is set or there is no instance to
8159 * be advertised then we have no HCI communication to make.
8162 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8163 !schedule_instance) {
8164 if (adv_instance->pending) {
8165 mgmt_advertising_added(sk, hdev, cp->instance);
8166 adv_instance->pending = false;
8168 rp.instance = cp->instance;
8169 err = mgmt_cmd_complete(sk, hdev->id,
8170 MGMT_OP_ADD_EXT_ADV_DATA,
8171 MGMT_STATUS_SUCCESS, &rp,
8176 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
8180 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8184 goto clear_new_instance;
8188 err = hci_req_run(&req, add_advertising_complete);
8191 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8192 MGMT_STATUS_FAILED);
8193 mgmt_pending_remove(cmd);
8194 goto clear_new_instance;
8197 /* We were successful in updating data, so trigger advertising_added
8198 * event if this is an instance that wasn't previously advertising. If
8199 * a failure occurs in the requests we initiated, we will remove the
8200 * instance again in add_advertising_complete
8202 if (adv_instance->pending)
8203 mgmt_advertising_added(sk, hdev, cp->instance);
8208 hci_remove_adv_instance(hdev, cp->instance);
8211 hci_dev_unlock(hdev);
8216 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8219 struct mgmt_pending_cmd *cmd;
8220 struct mgmt_cp_remove_advertising *cp;
8221 struct mgmt_rp_remove_advertising rp;
8223 bt_dev_dbg(hdev, "status %u", status);
8227 /* A failure status here only means that we failed to disable
8228 * advertising. Otherwise, the advertising instance has been removed,
8229 * so report success.
8231 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8236 rp.instance = cp->instance;
8238 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8240 mgmt_pending_remove(cmd);
8243 hci_dev_unlock(hdev);
8246 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8247 void *data, u16 data_len)
8249 struct mgmt_cp_remove_advertising *cp = data;
8250 struct mgmt_rp_remove_advertising rp;
8251 struct mgmt_pending_cmd *cmd;
8252 struct hci_request req;
8255 bt_dev_dbg(hdev, "sock %p", sk);
8257 /* Enabling the experimental LL Privay support disables support for
8260 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8261 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8262 MGMT_STATUS_NOT_SUPPORTED);
8266 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8267 err = mgmt_cmd_status(sk, hdev->id,
8268 MGMT_OP_REMOVE_ADVERTISING,
8269 MGMT_STATUS_INVALID_PARAMS);
8273 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8274 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8275 pending_find(MGMT_OP_SET_LE, hdev)) {
8276 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8281 if (list_empty(&hdev->adv_instances)) {
8282 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8283 MGMT_STATUS_INVALID_PARAMS);
8287 hci_req_init(&req, hdev);
8289 /* If we use extended advertising, instance is disabled and removed */
8290 if (ext_adv_capable(hdev)) {
8291 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8292 __hci_req_remove_ext_adv_instance(&req, cp->instance);
8295 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8297 if (list_empty(&hdev->adv_instances))
8298 __hci_req_disable_advertising(&req);
8300 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8301 * flag is set or the device isn't powered then we have no HCI
8302 * communication to make. Simply return.
8304 if (skb_queue_empty(&req.cmd_q) ||
8305 !hdev_is_powered(hdev) ||
8306 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8307 hci_req_purge(&req);
8308 rp.instance = cp->instance;
8309 err = mgmt_cmd_complete(sk, hdev->id,
8310 MGMT_OP_REMOVE_ADVERTISING,
8311 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8315 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8322 err = hci_req_run(&req, remove_advertising_complete);
8324 mgmt_pending_remove(cmd);
8327 hci_dev_unlock(hdev);
8332 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8333 void *data, u16 data_len)
8335 struct mgmt_cp_get_adv_size_info *cp = data;
8336 struct mgmt_rp_get_adv_size_info rp;
8337 u32 flags, supported_flags;
8340 bt_dev_dbg(hdev, "sock %p", sk);
8342 if (!lmp_le_capable(hdev))
8343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8344 MGMT_STATUS_REJECTED);
8346 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8348 MGMT_STATUS_INVALID_PARAMS);
8350 flags = __le32_to_cpu(cp->flags);
8352 /* The current implementation only supports a subset of the specified
8355 supported_flags = get_supported_adv_flags(hdev);
8356 if (flags & ~supported_flags)
8357 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8358 MGMT_STATUS_INVALID_PARAMS);
8360 rp.instance = cp->instance;
8361 rp.flags = cp->flags;
8362 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8363 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8365 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8366 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8371 static const struct hci_mgmt_handler mgmt_handlers[] = {
8372 { NULL }, /* 0x0000 (no command) */
8373 { read_version, MGMT_READ_VERSION_SIZE,
8375 HCI_MGMT_UNTRUSTED },
8376 { read_commands, MGMT_READ_COMMANDS_SIZE,
8378 HCI_MGMT_UNTRUSTED },
8379 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8381 HCI_MGMT_UNTRUSTED },
8382 { read_controller_info, MGMT_READ_INFO_SIZE,
8383 HCI_MGMT_UNTRUSTED },
8384 { set_powered, MGMT_SETTING_SIZE },
8385 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8386 { set_connectable, MGMT_SETTING_SIZE },
8387 { set_fast_connectable, MGMT_SETTING_SIZE },
8388 { set_bondable, MGMT_SETTING_SIZE },
8389 { set_link_security, MGMT_SETTING_SIZE },
8390 { set_ssp, MGMT_SETTING_SIZE },
8391 { set_hs, MGMT_SETTING_SIZE },
8392 { set_le, MGMT_SETTING_SIZE },
8393 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8394 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8395 { add_uuid, MGMT_ADD_UUID_SIZE },
8396 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8397 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8399 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8401 { disconnect, MGMT_DISCONNECT_SIZE },
8402 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8403 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8404 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8405 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8406 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8407 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8408 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8409 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8410 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8411 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8412 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8413 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8414 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8416 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8417 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8418 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8419 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8420 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8421 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8422 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8423 { set_advertising, MGMT_SETTING_SIZE },
8424 { set_bredr, MGMT_SETTING_SIZE },
8425 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8426 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8427 { set_secure_conn, MGMT_SETTING_SIZE },
8428 { set_debug_keys, MGMT_SETTING_SIZE },
8429 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8430 { load_irks, MGMT_LOAD_IRKS_SIZE,
8432 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8433 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8434 { add_device, MGMT_ADD_DEVICE_SIZE },
8435 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8436 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8438 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8440 HCI_MGMT_UNTRUSTED },
8441 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8442 HCI_MGMT_UNCONFIGURED |
8443 HCI_MGMT_UNTRUSTED },
8444 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8445 HCI_MGMT_UNCONFIGURED },
8446 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8447 HCI_MGMT_UNCONFIGURED },
8448 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8450 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8451 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8453 HCI_MGMT_UNTRUSTED },
8454 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8455 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8457 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8458 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8459 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8460 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8461 HCI_MGMT_UNTRUSTED },
8462 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8463 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8464 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8465 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8467 { set_wideband_speech, MGMT_SETTING_SIZE },
8468 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8469 HCI_MGMT_UNTRUSTED },
8470 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8471 HCI_MGMT_UNTRUSTED |
8472 HCI_MGMT_HDEV_OPTIONAL },
8473 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8475 HCI_MGMT_HDEV_OPTIONAL },
8476 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8477 HCI_MGMT_UNTRUSTED },
8478 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8480 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8481 HCI_MGMT_UNTRUSTED },
8482 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8484 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8485 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8486 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8487 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8489 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8490 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8492 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8494 { add_adv_patterns_monitor_rssi,
8495 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8499 void mgmt_index_added(struct hci_dev *hdev)
8501 struct mgmt_ev_ext_index ev;
8503 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8506 switch (hdev->dev_type) {
8508 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8509 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8510 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8513 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8514 HCI_MGMT_INDEX_EVENTS);
8527 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8528 HCI_MGMT_EXT_INDEX_EVENTS);
8531 void mgmt_index_removed(struct hci_dev *hdev)
8533 struct mgmt_ev_ext_index ev;
8534 u8 status = MGMT_STATUS_INVALID_INDEX;
8536 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8539 switch (hdev->dev_type) {
8541 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8543 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8544 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8545 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8548 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8549 HCI_MGMT_INDEX_EVENTS);
8562 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8563 HCI_MGMT_EXT_INDEX_EVENTS);
8566 /* This function requires the caller holds hdev->lock */
8567 static void restart_le_actions(struct hci_dev *hdev)
8569 struct hci_conn_params *p;
8571 list_for_each_entry(p, &hdev->le_conn_params, list) {
8572 /* Needed for AUTO_OFF case where might not "really"
8573 * have been powered off.
8575 list_del_init(&p->action);
8577 switch (p->auto_connect) {
8578 case HCI_AUTO_CONN_DIRECT:
8579 case HCI_AUTO_CONN_ALWAYS:
8580 list_add(&p->action, &hdev->pend_le_conns);
8582 case HCI_AUTO_CONN_REPORT:
8583 list_add(&p->action, &hdev->pend_le_reports);
8591 void mgmt_power_on(struct hci_dev *hdev, int err)
8593 struct cmd_lookup match = { NULL, hdev };
8595 bt_dev_dbg(hdev, "err %d", err);
8600 restart_le_actions(hdev);
8601 hci_update_background_scan(hdev);
8604 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8606 new_settings(hdev, match.sk);
8611 hci_dev_unlock(hdev);
8614 void __mgmt_power_off(struct hci_dev *hdev)
8616 struct cmd_lookup match = { NULL, hdev };
8617 u8 status, zero_cod[] = { 0, 0, 0 };
8619 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8621 /* If the power off is because of hdev unregistration let
8622 * use the appropriate INVALID_INDEX status. Otherwise use
8623 * NOT_POWERED. We cover both scenarios here since later in
8624 * mgmt_index_removed() any hci_conn callbacks will have already
8625 * been triggered, potentially causing misleading DISCONNECTED
8628 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8629 status = MGMT_STATUS_INVALID_INDEX;
8631 status = MGMT_STATUS_NOT_POWERED;
8633 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8635 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8636 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8637 zero_cod, sizeof(zero_cod),
8638 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8639 ext_info_changed(hdev, NULL);
8642 new_settings(hdev, match.sk);
8648 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8650 struct mgmt_pending_cmd *cmd;
8653 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8657 if (err == -ERFKILL)
8658 status = MGMT_STATUS_RFKILLED;
8660 status = MGMT_STATUS_FAILED;
8662 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8664 mgmt_pending_remove(cmd);
8667 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8670 struct mgmt_ev_new_link_key ev;
8672 memset(&ev, 0, sizeof(ev));
8674 ev.store_hint = persistent;
8675 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8676 ev.key.addr.type = BDADDR_BREDR;
8677 ev.key.type = key->type;
8678 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8679 ev.key.pin_len = key->pin_len;
8681 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8684 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8686 switch (ltk->type) {
8688 case SMP_LTK_RESPONDER:
8689 if (ltk->authenticated)
8690 return MGMT_LTK_AUTHENTICATED;
8691 return MGMT_LTK_UNAUTHENTICATED;
8693 if (ltk->authenticated)
8694 return MGMT_LTK_P256_AUTH;
8695 return MGMT_LTK_P256_UNAUTH;
8696 case SMP_LTK_P256_DEBUG:
8697 return MGMT_LTK_P256_DEBUG;
8700 return MGMT_LTK_UNAUTHENTICATED;
8703 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8705 struct mgmt_ev_new_long_term_key ev;
8707 memset(&ev, 0, sizeof(ev));
8709 /* Devices using resolvable or non-resolvable random addresses
8710 * without providing an identity resolving key don't require
8711 * to store long term keys. Their addresses will change the
8714 * Only when a remote device provides an identity address
8715 * make sure the long term key is stored. If the remote
8716 * identity is known, the long term keys are internally
8717 * mapped to the identity address. So allow static random
8718 * and public addresses here.
8720 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8721 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8722 ev.store_hint = 0x00;
8724 ev.store_hint = persistent;
8726 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8727 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8728 ev.key.type = mgmt_ltk_type(key);
8729 ev.key.enc_size = key->enc_size;
8730 ev.key.ediv = key->ediv;
8731 ev.key.rand = key->rand;
8733 if (key->type == SMP_LTK)
8734 ev.key.initiator = 1;
8736 /* Make sure we copy only the significant bytes based on the
8737 * encryption key size, and set the rest of the value to zeroes.
8739 memcpy(ev.key.val, key->val, key->enc_size);
8740 memset(ev.key.val + key->enc_size, 0,
8741 sizeof(ev.key.val) - key->enc_size);
8743 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8746 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8748 struct mgmt_ev_new_irk ev;
8750 memset(&ev, 0, sizeof(ev));
8752 ev.store_hint = persistent;
8754 bacpy(&ev.rpa, &irk->rpa);
8755 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8756 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8757 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8759 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8762 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8765 struct mgmt_ev_new_csrk ev;
8767 memset(&ev, 0, sizeof(ev));
8769 /* Devices using resolvable or non-resolvable random addresses
8770 * without providing an identity resolving key don't require
8771 * to store signature resolving keys. Their addresses will change
8772 * the next time around.
8774 * Only when a remote device provides an identity address
8775 * make sure the signature resolving key is stored. So allow
8776 * static random and public addresses here.
8778 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8779 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8780 ev.store_hint = 0x00;
8782 ev.store_hint = persistent;
8784 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8785 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8786 ev.key.type = csrk->type;
8787 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8789 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8792 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8793 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8794 u16 max_interval, u16 latency, u16 timeout)
8796 struct mgmt_ev_new_conn_param ev;
8798 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8801 memset(&ev, 0, sizeof(ev));
8802 bacpy(&ev.addr.bdaddr, bdaddr);
8803 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8804 ev.store_hint = store_hint;
8805 ev.min_interval = cpu_to_le16(min_interval);
8806 ev.max_interval = cpu_to_le16(max_interval);
8807 ev.latency = cpu_to_le16(latency);
8808 ev.timeout = cpu_to_le16(timeout);
8810 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8813 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8814 u8 *name, u8 name_len)
8817 struct mgmt_ev_device_connected *ev = (void *) buf;
8821 bacpy(&ev->addr.bdaddr, &conn->dst);
8822 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8825 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
8827 ev->flags = __cpu_to_le32(flags);
8829 /* We must ensure that the EIR Data fields are ordered and
8830 * unique. Keep it simple for now and avoid the problem by not
8831 * adding any BR/EDR data to the LE adv.
8833 if (conn->le_adv_data_len > 0) {
8834 memcpy(&ev->eir[eir_len],
8835 conn->le_adv_data, conn->le_adv_data_len);
8836 eir_len = conn->le_adv_data_len;
8839 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8842 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8843 eir_len = eir_append_data(ev->eir, eir_len,
8845 conn->dev_class, 3);
8848 ev->eir_len = cpu_to_le16(eir_len);
8850 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8851 sizeof(*ev) + eir_len, NULL);
8854 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8856 struct sock **sk = data;
8858 cmd->cmd_complete(cmd, 0);
8863 mgmt_pending_remove(cmd);
8866 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8868 struct hci_dev *hdev = data;
8869 struct mgmt_cp_unpair_device *cp = cmd->param;
8871 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8873 cmd->cmd_complete(cmd, 0);
8874 mgmt_pending_remove(cmd);
8877 bool mgmt_powering_down(struct hci_dev *hdev)
8879 struct mgmt_pending_cmd *cmd;
8880 struct mgmt_mode *cp;
8882 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8893 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8894 u8 link_type, u8 addr_type, u8 reason,
8895 bool mgmt_connected)
8897 struct mgmt_ev_device_disconnected ev;
8898 struct sock *sk = NULL;
8900 /* The connection is still in hci_conn_hash so test for 1
8901 * instead of 0 to know if this is the last one.
8903 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8904 cancel_delayed_work(&hdev->power_off);
8905 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8908 if (!mgmt_connected)
8911 if (link_type != ACL_LINK && link_type != LE_LINK)
8914 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8916 bacpy(&ev.addr.bdaddr, bdaddr);
8917 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8920 /* Report disconnects due to suspend */
8921 if (hdev->suspended)
8922 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8924 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8929 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8933 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8934 u8 link_type, u8 addr_type, u8 status)
8936 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8937 struct mgmt_cp_disconnect *cp;
8938 struct mgmt_pending_cmd *cmd;
8940 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8943 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8949 if (bacmp(bdaddr, &cp->addr.bdaddr))
8952 if (cp->addr.type != bdaddr_type)
8955 cmd->cmd_complete(cmd, mgmt_status(status));
8956 mgmt_pending_remove(cmd);
8959 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8960 u8 addr_type, u8 status)
8962 struct mgmt_ev_connect_failed ev;
8964 /* The connection is still in hci_conn_hash so test for 1
8965 * instead of 0 to know if this is the last one.
8967 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8968 cancel_delayed_work(&hdev->power_off);
8969 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8972 bacpy(&ev.addr.bdaddr, bdaddr);
8973 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8974 ev.status = mgmt_status(status);
8976 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8979 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8981 struct mgmt_ev_pin_code_request ev;
8983 bacpy(&ev.addr.bdaddr, bdaddr);
8984 ev.addr.type = BDADDR_BREDR;
8987 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8990 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8993 struct mgmt_pending_cmd *cmd;
8995 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8999 cmd->cmd_complete(cmd, mgmt_status(status));
9000 mgmt_pending_remove(cmd);
9003 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9006 struct mgmt_pending_cmd *cmd;
9008 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9012 cmd->cmd_complete(cmd, mgmt_status(status));
9013 mgmt_pending_remove(cmd);
9016 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9017 u8 link_type, u8 addr_type, u32 value,
9020 struct mgmt_ev_user_confirm_request ev;
9022 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9024 bacpy(&ev.addr.bdaddr, bdaddr);
9025 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9026 ev.confirm_hint = confirm_hint;
9027 ev.value = cpu_to_le32(value);
9029 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9033 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9034 u8 link_type, u8 addr_type)
9036 struct mgmt_ev_user_passkey_request ev;
9038 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9040 bacpy(&ev.addr.bdaddr, bdaddr);
9041 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9043 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9047 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9048 u8 link_type, u8 addr_type, u8 status,
9051 struct mgmt_pending_cmd *cmd;
9053 cmd = pending_find(opcode, hdev);
9057 cmd->cmd_complete(cmd, mgmt_status(status));
9058 mgmt_pending_remove(cmd);
9063 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9064 u8 link_type, u8 addr_type, u8 status)
9066 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9067 status, MGMT_OP_USER_CONFIRM_REPLY);
9070 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9071 u8 link_type, u8 addr_type, u8 status)
9073 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9075 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9078 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9079 u8 link_type, u8 addr_type, u8 status)
9081 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9082 status, MGMT_OP_USER_PASSKEY_REPLY);
9085 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9086 u8 link_type, u8 addr_type, u8 status)
9088 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9090 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9093 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9094 u8 link_type, u8 addr_type, u32 passkey,
9097 struct mgmt_ev_passkey_notify ev;
9099 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9101 bacpy(&ev.addr.bdaddr, bdaddr);
9102 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9103 ev.passkey = __cpu_to_le32(passkey);
9104 ev.entered = entered;
9106 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9109 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9111 struct mgmt_ev_auth_failed ev;
9112 struct mgmt_pending_cmd *cmd;
9113 u8 status = mgmt_status(hci_status);
9115 bacpy(&ev.addr.bdaddr, &conn->dst);
9116 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9119 cmd = find_pairing(conn);
9121 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9122 cmd ? cmd->sk : NULL);
9125 cmd->cmd_complete(cmd, status);
9126 mgmt_pending_remove(cmd);
9130 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9132 struct cmd_lookup match = { NULL, hdev };
9136 u8 mgmt_err = mgmt_status(status);
9137 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9138 cmd_status_rsp, &mgmt_err);
9142 if (test_bit(HCI_AUTH, &hdev->flags))
9143 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9145 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9147 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9151 new_settings(hdev, match.sk);
9157 static void clear_eir(struct hci_request *req)
9159 struct hci_dev *hdev = req->hdev;
9160 struct hci_cp_write_eir cp;
9162 if (!lmp_ext_inq_capable(hdev))
9165 memset(hdev->eir, 0, sizeof(hdev->eir));
9167 memset(&cp, 0, sizeof(cp));
9169 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9172 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9174 struct cmd_lookup match = { NULL, hdev };
9175 struct hci_request req;
9176 bool changed = false;
9179 u8 mgmt_err = mgmt_status(status);
9181 if (enable && hci_dev_test_and_clear_flag(hdev,
9183 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9184 new_settings(hdev, NULL);
9187 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9193 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9195 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9197 changed = hci_dev_test_and_clear_flag(hdev,
9200 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9203 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9206 new_settings(hdev, match.sk);
9211 hci_req_init(&req, hdev);
9213 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9214 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9215 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9216 sizeof(enable), &enable);
9217 __hci_req_update_eir(&req);
9222 hci_req_run(&req, NULL);
9225 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9227 struct cmd_lookup *match = data;
9229 if (match->sk == NULL) {
9230 match->sk = cmd->sk;
9231 sock_hold(match->sk);
9235 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9238 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9240 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9241 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9242 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9245 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9246 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9247 ext_info_changed(hdev, NULL);
9254 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9256 struct mgmt_cp_set_local_name ev;
9257 struct mgmt_pending_cmd *cmd;
9262 memset(&ev, 0, sizeof(ev));
9263 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9264 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9266 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9268 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9270 /* If this is a HCI command related to powering on the
9271 * HCI dev don't send any mgmt signals.
9273 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9277 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9278 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9279 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9282 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9286 for (i = 0; i < uuid_count; i++) {
9287 if (!memcmp(uuid, uuids[i], 16))
9294 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9298 while (parsed < eir_len) {
9299 u8 field_len = eir[0];
9306 if (eir_len - parsed < field_len + 1)
9310 case EIR_UUID16_ALL:
9311 case EIR_UUID16_SOME:
9312 for (i = 0; i + 3 <= field_len; i += 2) {
9313 memcpy(uuid, bluetooth_base_uuid, 16);
9314 uuid[13] = eir[i + 3];
9315 uuid[12] = eir[i + 2];
9316 if (has_uuid(uuid, uuid_count, uuids))
9320 case EIR_UUID32_ALL:
9321 case EIR_UUID32_SOME:
9322 for (i = 0; i + 5 <= field_len; i += 4) {
9323 memcpy(uuid, bluetooth_base_uuid, 16);
9324 uuid[15] = eir[i + 5];
9325 uuid[14] = eir[i + 4];
9326 uuid[13] = eir[i + 3];
9327 uuid[12] = eir[i + 2];
9328 if (has_uuid(uuid, uuid_count, uuids))
9332 case EIR_UUID128_ALL:
9333 case EIR_UUID128_SOME:
9334 for (i = 0; i + 17 <= field_len; i += 16) {
9335 memcpy(uuid, eir + i + 2, 16);
9336 if (has_uuid(uuid, uuid_count, uuids))
9342 parsed += field_len + 1;
9343 eir += field_len + 1;
9349 static void restart_le_scan(struct hci_dev *hdev)
9351 /* If controller is not scanning we are done. */
9352 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9355 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9356 hdev->discovery.scan_start +
9357 hdev->discovery.scan_duration))
9360 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9361 DISCOV_LE_RESTART_DELAY);
9364 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9365 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9367 /* If a RSSI threshold has been specified, and
9368 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9369 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9370 * is set, let it through for further processing, as we might need to
9373 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9374 * the results are also dropped.
9376 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9377 (rssi == HCI_RSSI_INVALID ||
9378 (rssi < hdev->discovery.rssi &&
9379 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9382 if (hdev->discovery.uuid_count != 0) {
9383 /* If a list of UUIDs is provided in filter, results with no
9384 * matching UUID should be dropped.
9386 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9387 hdev->discovery.uuids) &&
9388 !eir_has_uuids(scan_rsp, scan_rsp_len,
9389 hdev->discovery.uuid_count,
9390 hdev->discovery.uuids))
9394 /* If duplicate filtering does not report RSSI changes, then restart
9395 * scanning to ensure updated result with updated RSSI values.
9397 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9398 restart_le_scan(hdev);
9400 /* Validate RSSI value against the RSSI threshold once more. */
9401 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9402 rssi < hdev->discovery.rssi)
9409 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9410 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9411 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9414 struct mgmt_ev_device_found *ev = (void *)buf;
9417 /* Don't send events for a non-kernel initiated discovery. With
9418 * LE one exception is if we have pend_le_reports > 0 in which
9419 * case we're doing passive scanning and want these events.
9421 if (!hci_discovery_active(hdev)) {
9422 if (link_type == ACL_LINK)
9424 if (link_type == LE_LINK &&
9425 list_empty(&hdev->pend_le_reports) &&
9426 !hci_is_adv_monitoring(hdev)) {
9431 if (hdev->discovery.result_filtering) {
9432 /* We are using service discovery */
9433 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9438 if (hdev->discovery.limited) {
9439 /* Check for limited discoverable bit */
9441 if (!(dev_class[1] & 0x20))
9444 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9445 if (!flags || !(flags[0] & LE_AD_LIMITED))
9450 /* Make sure that the buffer is big enough. The 5 extra bytes
9451 * are for the potential CoD field.
9453 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9456 memset(buf, 0, sizeof(buf));
9458 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9459 * RSSI value was reported as 0 when not available. This behavior
9460 * is kept when using device discovery. This is required for full
9461 * backwards compatibility with the API.
9463 * However when using service discovery, the value 127 will be
9464 * returned when the RSSI is not available.
9466 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9467 link_type == ACL_LINK)
9470 bacpy(&ev->addr.bdaddr, bdaddr);
9471 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9473 ev->flags = cpu_to_le32(flags);
9476 /* Copy EIR or advertising data into event */
9477 memcpy(ev->eir, eir, eir_len);
9479 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9481 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9484 if (scan_rsp_len > 0)
9485 /* Append scan response data to event */
9486 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9488 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9489 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9491 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9494 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9495 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9497 struct mgmt_ev_device_found *ev;
9498 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9501 ev = (struct mgmt_ev_device_found *) buf;
9503 memset(buf, 0, sizeof(buf));
9505 bacpy(&ev->addr.bdaddr, bdaddr);
9506 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9509 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9512 ev->eir_len = cpu_to_le16(eir_len);
9514 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9517 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9519 struct mgmt_ev_discovering ev;
9521 bt_dev_dbg(hdev, "discovering %u", discovering);
9523 memset(&ev, 0, sizeof(ev));
9524 ev.type = hdev->discovery.type;
9525 ev.discovering = discovering;
9527 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9530 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9532 struct mgmt_ev_controller_suspend ev;
9534 ev.suspend_state = state;
9535 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9538 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9541 struct mgmt_ev_controller_resume ev;
9543 ev.wake_reason = reason;
9545 bacpy(&ev.addr.bdaddr, bdaddr);
9546 ev.addr.type = addr_type;
9548 memset(&ev.addr, 0, sizeof(ev.addr));
9551 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9554 static struct hci_mgmt_chan chan = {
9555 .channel = HCI_CHANNEL_CONTROL,
9556 .handler_count = ARRAY_SIZE(mgmt_handlers),
9557 .handlers = mgmt_handlers,
9558 .hdev_init = mgmt_init_hdev,
9563 return hci_mgmt_chan_register(&chan);
9566 void mgmt_exit(void)
9568 hci_mgmt_chan_unregister(&chan);