2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 14
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
106 MGMT_OP_START_LIMITED_DISCOVERY,
107 MGMT_OP_READ_EXT_INFO,
108 MGMT_OP_SET_APPEARANCE,
111 static const u16 mgmt_events[] = {
112 MGMT_EV_CONTROLLER_ERROR,
114 MGMT_EV_INDEX_REMOVED,
115 MGMT_EV_NEW_SETTINGS,
116 MGMT_EV_CLASS_OF_DEV_CHANGED,
117 MGMT_EV_LOCAL_NAME_CHANGED,
118 MGMT_EV_NEW_LINK_KEY,
119 MGMT_EV_NEW_LONG_TERM_KEY,
120 MGMT_EV_DEVICE_CONNECTED,
121 MGMT_EV_DEVICE_DISCONNECTED,
122 MGMT_EV_CONNECT_FAILED,
123 MGMT_EV_PIN_CODE_REQUEST,
124 MGMT_EV_USER_CONFIRM_REQUEST,
125 MGMT_EV_USER_PASSKEY_REQUEST,
127 MGMT_EV_DEVICE_FOUND,
129 MGMT_EV_DEVICE_BLOCKED,
130 MGMT_EV_DEVICE_UNBLOCKED,
131 MGMT_EV_DEVICE_UNPAIRED,
132 MGMT_EV_PASSKEY_NOTIFY,
135 MGMT_EV_DEVICE_ADDED,
136 MGMT_EV_DEVICE_REMOVED,
137 MGMT_EV_NEW_CONN_PARAM,
138 MGMT_EV_UNCONF_INDEX_ADDED,
139 MGMT_EV_UNCONF_INDEX_REMOVED,
140 MGMT_EV_NEW_CONFIG_OPTIONS,
141 MGMT_EV_EXT_INDEX_ADDED,
142 MGMT_EV_EXT_INDEX_REMOVED,
143 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
144 MGMT_EV_ADVERTISING_ADDED,
145 MGMT_EV_ADVERTISING_REMOVED,
146 MGMT_EV_EXT_INFO_CHANGED,
149 static const u16 mgmt_untrusted_commands[] = {
150 MGMT_OP_READ_INDEX_LIST,
152 MGMT_OP_READ_UNCONF_INDEX_LIST,
153 MGMT_OP_READ_CONFIG_INFO,
154 MGMT_OP_READ_EXT_INDEX_LIST,
155 MGMT_OP_READ_EXT_INFO,
158 static const u16 mgmt_untrusted_events[] = {
160 MGMT_EV_INDEX_REMOVED,
161 MGMT_EV_NEW_SETTINGS,
162 MGMT_EV_CLASS_OF_DEV_CHANGED,
163 MGMT_EV_LOCAL_NAME_CHANGED,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_EXT_INFO_CHANGED,
172 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
174 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
175 "\x00\x00\x00\x00\x00\x00\x00\x00"
177 /* HCI to MGMT error code conversion table */
178 static u8 mgmt_status_table[] = {
180 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
181 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
182 MGMT_STATUS_FAILED, /* Hardware Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
184 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
185 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
186 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
187 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
188 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
189 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
190 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
191 MGMT_STATUS_BUSY, /* Command Disallowed */
192 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
193 MGMT_STATUS_REJECTED, /* Rejected Security */
194 MGMT_STATUS_REJECTED, /* Rejected Personal */
195 MGMT_STATUS_TIMEOUT, /* Host Timeout */
196 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
197 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
198 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
199 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
200 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
201 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
202 MGMT_STATUS_BUSY, /* Repeated Attempts */
203 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
204 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
205 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
206 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
207 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
208 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
209 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
210 MGMT_STATUS_FAILED, /* Unspecified Error */
211 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
212 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
213 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
214 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
215 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
216 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
217 MGMT_STATUS_FAILED, /* Unit Link Key Used */
218 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
219 MGMT_STATUS_TIMEOUT, /* Instant Passed */
220 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
221 MGMT_STATUS_FAILED, /* Transaction Collision */
222 MGMT_STATUS_FAILED, /* Reserved for future use */
223 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
224 MGMT_STATUS_REJECTED, /* QoS Rejected */
225 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
226 MGMT_STATUS_REJECTED, /* Insufficient Security */
227 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
228 MGMT_STATUS_FAILED, /* Reserved for future use */
229 MGMT_STATUS_BUSY, /* Role Switch Pending */
230 MGMT_STATUS_FAILED, /* Reserved for future use */
231 MGMT_STATUS_FAILED, /* Slot Violation */
232 MGMT_STATUS_FAILED, /* Role Switch Failed */
233 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
234 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
235 MGMT_STATUS_BUSY, /* Host Busy Pairing */
236 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
237 MGMT_STATUS_BUSY, /* Controller Busy */
238 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
239 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
240 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
241 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
242 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
245 static u8 mgmt_status(u8 hci_status)
247 if (hci_status < ARRAY_SIZE(mgmt_status_table))
248 return mgmt_status_table[hci_status];
250 return MGMT_STATUS_FAILED;
253 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
256 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
260 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
261 u16 len, int flag, struct sock *skip_sk)
263 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
267 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
268 struct sock *skip_sk)
270 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
271 HCI_SOCK_TRUSTED, skip_sk);
274 static u8 le_addr_type(u8 mgmt_addr_type)
276 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
277 return ADDR_LE_DEV_PUBLIC;
279 return ADDR_LE_DEV_RANDOM;
282 void mgmt_fill_version_info(void *ver)
284 struct mgmt_rp_read_version *rp = ver;
286 rp->version = MGMT_VERSION;
287 rp->revision = cpu_to_le16(MGMT_REVISION);
290 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
293 struct mgmt_rp_read_version rp;
295 BT_DBG("sock %p", sk);
297 mgmt_fill_version_info(&rp);
299 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
303 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
306 struct mgmt_rp_read_commands *rp;
307 u16 num_commands, num_events;
311 BT_DBG("sock %p", sk);
313 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
314 num_commands = ARRAY_SIZE(mgmt_commands);
315 num_events = ARRAY_SIZE(mgmt_events);
317 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
318 num_events = ARRAY_SIZE(mgmt_untrusted_events);
321 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
323 rp = kmalloc(rp_size, GFP_KERNEL);
327 rp->num_commands = cpu_to_le16(num_commands);
328 rp->num_events = cpu_to_le16(num_events);
330 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
331 __le16 *opcode = rp->opcodes;
333 for (i = 0; i < num_commands; i++, opcode++)
334 put_unaligned_le16(mgmt_commands[i], opcode);
336 for (i = 0; i < num_events; i++, opcode++)
337 put_unaligned_le16(mgmt_events[i], opcode);
339 __le16 *opcode = rp->opcodes;
341 for (i = 0; i < num_commands; i++, opcode++)
342 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
344 for (i = 0; i < num_events; i++, opcode++)
345 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
348 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
355 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
358 struct mgmt_rp_read_index_list *rp;
364 BT_DBG("sock %p", sk);
366 read_lock(&hci_dev_list_lock);
369 list_for_each_entry(d, &hci_dev_list, list) {
370 if (d->dev_type == HCI_PRIMARY &&
371 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
375 rp_len = sizeof(*rp) + (2 * count);
376 rp = kmalloc(rp_len, GFP_ATOMIC);
378 read_unlock(&hci_dev_list_lock);
383 list_for_each_entry(d, &hci_dev_list, list) {
384 if (hci_dev_test_flag(d, HCI_SETUP) ||
385 hci_dev_test_flag(d, HCI_CONFIG) ||
386 hci_dev_test_flag(d, HCI_USER_CHANNEL))
389 /* Devices marked as raw-only are neither configured
390 * nor unconfigured controllers.
392 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
395 if (d->dev_type == HCI_PRIMARY &&
396 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
397 rp->index[count++] = cpu_to_le16(d->id);
398 BT_DBG("Added hci%u", d->id);
402 rp->num_controllers = cpu_to_le16(count);
403 rp_len = sizeof(*rp) + (2 * count);
405 read_unlock(&hci_dev_list_lock);
407 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
415 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
416 void *data, u16 data_len)
418 struct mgmt_rp_read_unconf_index_list *rp;
424 BT_DBG("sock %p", sk);
426 read_lock(&hci_dev_list_lock);
429 list_for_each_entry(d, &hci_dev_list, list) {
430 if (d->dev_type == HCI_PRIMARY &&
431 hci_dev_test_flag(d, HCI_UNCONFIGURED))
435 rp_len = sizeof(*rp) + (2 * count);
436 rp = kmalloc(rp_len, GFP_ATOMIC);
438 read_unlock(&hci_dev_list_lock);
443 list_for_each_entry(d, &hci_dev_list, list) {
444 if (hci_dev_test_flag(d, HCI_SETUP) ||
445 hci_dev_test_flag(d, HCI_CONFIG) ||
446 hci_dev_test_flag(d, HCI_USER_CHANNEL))
449 /* Devices marked as raw-only are neither configured
450 * nor unconfigured controllers.
452 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
455 if (d->dev_type == HCI_PRIMARY &&
456 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
457 rp->index[count++] = cpu_to_le16(d->id);
458 BT_DBG("Added hci%u", d->id);
462 rp->num_controllers = cpu_to_le16(count);
463 rp_len = sizeof(*rp) + (2 * count);
465 read_unlock(&hci_dev_list_lock);
467 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
468 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
475 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
476 void *data, u16 data_len)
478 struct mgmt_rp_read_ext_index_list *rp;
484 BT_DBG("sock %p", sk);
486 read_lock(&hci_dev_list_lock);
489 list_for_each_entry(d, &hci_dev_list, list) {
490 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
494 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
495 rp = kmalloc(rp_len, GFP_ATOMIC);
497 read_unlock(&hci_dev_list_lock);
502 list_for_each_entry(d, &hci_dev_list, list) {
503 if (hci_dev_test_flag(d, HCI_SETUP) ||
504 hci_dev_test_flag(d, HCI_CONFIG) ||
505 hci_dev_test_flag(d, HCI_USER_CHANNEL))
508 /* Devices marked as raw-only are neither configured
509 * nor unconfigured controllers.
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
514 if (d->dev_type == HCI_PRIMARY) {
515 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
516 rp->entry[count].type = 0x01;
518 rp->entry[count].type = 0x00;
519 } else if (d->dev_type == HCI_AMP) {
520 rp->entry[count].type = 0x02;
525 rp->entry[count].bus = d->bus;
526 rp->entry[count++].index = cpu_to_le16(d->id);
527 BT_DBG("Added hci%u", d->id);
530 rp->num_controllers = cpu_to_le16(count);
531 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
533 read_unlock(&hci_dev_list_lock);
535 /* If this command is called at least once, then all the
536 * default index and unconfigured index events are disabled
537 * and from now on only extended index events are used.
539 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
540 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
541 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
551 static bool is_configured(struct hci_dev *hdev)
553 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
554 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
557 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
558 !bacmp(&hdev->public_addr, BDADDR_ANY))
564 static __le32 get_missing_options(struct hci_dev *hdev)
568 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
569 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
570 options |= MGMT_OPTION_EXTERNAL_CONFIG;
572 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
573 !bacmp(&hdev->public_addr, BDADDR_ANY))
574 options |= MGMT_OPTION_PUBLIC_ADDRESS;
576 return cpu_to_le32(options);
579 static int new_options(struct hci_dev *hdev, struct sock *skip)
581 __le32 options = get_missing_options(hdev);
583 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
584 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
587 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
589 __le32 options = get_missing_options(hdev);
591 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
595 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
596 void *data, u16 data_len)
598 struct mgmt_rp_read_config_info rp;
601 BT_DBG("sock %p %s", sk, hdev->name);
605 memset(&rp, 0, sizeof(rp));
606 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
608 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
609 options |= MGMT_OPTION_EXTERNAL_CONFIG;
611 if (hdev->set_bdaddr)
612 options |= MGMT_OPTION_PUBLIC_ADDRESS;
614 rp.supported_options = cpu_to_le32(options);
615 rp.missing_options = get_missing_options(hdev);
617 hci_dev_unlock(hdev);
619 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
623 static u32 get_supported_phys(struct hci_dev *hdev)
625 u32 supported_phys = 0;
627 if (lmp_bredr_capable(hdev)) {
628 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
630 if (hdev->features[0][0] & LMP_3SLOT)
631 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
633 if (hdev->features[0][0] & LMP_5SLOT)
634 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
636 if (lmp_edr_2m_capable(hdev)) {
637 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
639 if (lmp_edr_3slot_capable(hdev))
640 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
642 if (lmp_edr_5slot_capable(hdev))
643 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
645 if (lmp_edr_3m_capable(hdev)) {
646 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
648 if (lmp_edr_3slot_capable(hdev))
649 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
651 if (lmp_edr_5slot_capable(hdev))
652 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
657 if (lmp_le_capable(hdev)) {
658 supported_phys |= MGMT_PHY_LE_1M_TX;
659 supported_phys |= MGMT_PHY_LE_1M_RX;
661 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
662 supported_phys |= MGMT_PHY_LE_2M_TX;
663 supported_phys |= MGMT_PHY_LE_2M_RX;
666 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
667 supported_phys |= MGMT_PHY_LE_CODED_TX;
668 supported_phys |= MGMT_PHY_LE_CODED_RX;
672 return supported_phys;
675 static u32 get_selected_phys(struct hci_dev *hdev)
677 u32 selected_phys = 0;
679 if (lmp_bredr_capable(hdev)) {
680 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
682 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
683 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
685 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
686 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
688 if (lmp_edr_2m_capable(hdev)) {
689 if (!(hdev->pkt_type & HCI_2DH1))
690 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
692 if (lmp_edr_3slot_capable(hdev) &&
693 !(hdev->pkt_type & HCI_2DH3))
694 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
696 if (lmp_edr_5slot_capable(hdev) &&
697 !(hdev->pkt_type & HCI_2DH5))
698 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
700 if (lmp_edr_3m_capable(hdev)) {
701 if (!(hdev->pkt_type & HCI_3DH1))
702 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
704 if (lmp_edr_3slot_capable(hdev) &&
705 !(hdev->pkt_type & HCI_3DH3))
706 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
708 if (lmp_edr_5slot_capable(hdev) &&
709 !(hdev->pkt_type & HCI_3DH5))
710 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
715 if (lmp_le_capable(hdev)) {
716 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
717 selected_phys |= MGMT_PHY_LE_1M_TX;
719 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
720 selected_phys |= MGMT_PHY_LE_1M_RX;
722 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
723 selected_phys |= MGMT_PHY_LE_2M_TX;
725 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
726 selected_phys |= MGMT_PHY_LE_2M_RX;
728 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
729 selected_phys |= MGMT_PHY_LE_CODED_TX;
731 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
732 selected_phys |= MGMT_PHY_LE_CODED_RX;
735 return selected_phys;
738 static u32 get_configurable_phys(struct hci_dev *hdev)
740 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
741 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
744 static u32 get_supported_settings(struct hci_dev *hdev)
748 settings |= MGMT_SETTING_POWERED;
749 settings |= MGMT_SETTING_BONDABLE;
750 settings |= MGMT_SETTING_DEBUG_KEYS;
751 settings |= MGMT_SETTING_CONNECTABLE;
752 settings |= MGMT_SETTING_DISCOVERABLE;
754 if (lmp_bredr_capable(hdev)) {
755 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
756 settings |= MGMT_SETTING_FAST_CONNECTABLE;
757 settings |= MGMT_SETTING_BREDR;
758 settings |= MGMT_SETTING_LINK_SECURITY;
760 if (lmp_ssp_capable(hdev)) {
761 settings |= MGMT_SETTING_SSP;
762 if (IS_ENABLED(CONFIG_BT_HS))
763 settings |= MGMT_SETTING_HS;
766 if (lmp_sc_capable(hdev))
767 settings |= MGMT_SETTING_SECURE_CONN;
770 if (lmp_le_capable(hdev)) {
771 settings |= MGMT_SETTING_LE;
772 settings |= MGMT_SETTING_ADVERTISING;
773 settings |= MGMT_SETTING_SECURE_CONN;
774 settings |= MGMT_SETTING_PRIVACY;
775 settings |= MGMT_SETTING_STATIC_ADDRESS;
778 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
780 settings |= MGMT_SETTING_CONFIGURATION;
782 settings |= MGMT_SETTING_PHY_CONFIGURATION;
787 static u32 get_current_settings(struct hci_dev *hdev)
791 if (hdev_is_powered(hdev))
792 settings |= MGMT_SETTING_POWERED;
794 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
795 settings |= MGMT_SETTING_CONNECTABLE;
797 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
798 settings |= MGMT_SETTING_FAST_CONNECTABLE;
800 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
801 settings |= MGMT_SETTING_DISCOVERABLE;
803 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
804 settings |= MGMT_SETTING_BONDABLE;
806 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
807 settings |= MGMT_SETTING_BREDR;
809 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
810 settings |= MGMT_SETTING_LE;
812 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
813 settings |= MGMT_SETTING_LINK_SECURITY;
815 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
816 settings |= MGMT_SETTING_SSP;
818 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
819 settings |= MGMT_SETTING_HS;
821 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
822 settings |= MGMT_SETTING_ADVERTISING;
824 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
825 settings |= MGMT_SETTING_SECURE_CONN;
827 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
828 settings |= MGMT_SETTING_DEBUG_KEYS;
830 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
831 settings |= MGMT_SETTING_PRIVACY;
833 /* The current setting for static address has two purposes. The
834 * first is to indicate if the static address will be used and
835 * the second is to indicate if it is actually set.
837 * This means if the static address is not configured, this flag
838 * will never be set. If the address is configured, then if the
839 * address is actually used decides if the flag is set or not.
841 * For single mode LE only controllers and dual-mode controllers
842 * with BR/EDR disabled, the existence of the static address will
845 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
846 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
847 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
848 if (bacmp(&hdev->static_addr, BDADDR_ANY))
849 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
857 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
860 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
861 struct hci_dev *hdev,
864 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
867 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
869 struct mgmt_pending_cmd *cmd;
871 /* If there's a pending mgmt command the flags will not yet have
872 * their final values, so check for this first.
874 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
876 struct mgmt_mode *cp = cmd->param;
878 return LE_AD_GENERAL;
879 else if (cp->val == 0x02)
880 return LE_AD_LIMITED;
882 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
883 return LE_AD_LIMITED;
884 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 return LE_AD_GENERAL;
891 bool mgmt_get_connectable(struct hci_dev *hdev)
893 struct mgmt_pending_cmd *cmd;
895 /* If there's a pending mgmt command the flag will not yet have
896 * it's final value, so check for this first.
898 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
900 struct mgmt_mode *cp = cmd->param;
905 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
908 static void service_cache_off(struct work_struct *work)
910 struct hci_dev *hdev = container_of(work, struct hci_dev,
912 struct hci_request req;
914 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
917 hci_req_init(&req, hdev);
921 __hci_req_update_eir(&req);
922 __hci_req_update_class(&req);
924 hci_dev_unlock(hdev);
926 hci_req_run(&req, NULL);
929 static void rpa_expired(struct work_struct *work)
931 struct hci_dev *hdev = container_of(work, struct hci_dev,
933 struct hci_request req;
937 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
939 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
942 /* The generation of a new RPA and programming it into the
943 * controller happens in the hci_req_enable_advertising()
946 hci_req_init(&req, hdev);
947 if (ext_adv_capable(hdev))
948 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
950 __hci_req_enable_advertising(&req);
951 hci_req_run(&req, NULL);
954 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
956 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
959 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
960 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
962 /* Non-mgmt controlled devices get this bit set
963 * implicitly so that pairing works for them, however
964 * for mgmt we require user-space to explicitly enable
967 hci_dev_clear_flag(hdev, HCI_BONDABLE);
970 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
971 void *data, u16 data_len)
973 struct mgmt_rp_read_info rp;
975 BT_DBG("sock %p %s", sk, hdev->name);
979 memset(&rp, 0, sizeof(rp));
981 bacpy(&rp.bdaddr, &hdev->bdaddr);
983 rp.version = hdev->hci_ver;
984 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
986 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
987 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
989 memcpy(rp.dev_class, hdev->dev_class, 3);
991 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
992 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
994 hci_dev_unlock(hdev);
996 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1000 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1005 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1006 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1007 hdev->dev_class, 3);
1009 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1010 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1013 name_len = strlen(hdev->dev_name);
1014 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1015 hdev->dev_name, name_len);
1017 name_len = strlen(hdev->short_name);
1018 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1019 hdev->short_name, name_len);
1024 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1025 void *data, u16 data_len)
1028 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1031 BT_DBG("sock %p %s", sk, hdev->name);
1033 memset(&buf, 0, sizeof(buf));
1037 bacpy(&rp->bdaddr, &hdev->bdaddr);
1039 rp->version = hdev->hci_ver;
1040 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1042 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1043 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1046 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1047 rp->eir_len = cpu_to_le16(eir_len);
1049 hci_dev_unlock(hdev);
1051 /* If this command is called at least once, then the events
1052 * for class of device and local name changes are disabled
1053 * and only the new extended controller information event
1056 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1057 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1058 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1060 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1061 sizeof(*rp) + eir_len);
1064 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1067 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1070 memset(buf, 0, sizeof(buf));
1072 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1073 ev->eir_len = cpu_to_le16(eir_len);
1075 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1076 sizeof(*ev) + eir_len,
1077 HCI_MGMT_EXT_INFO_EVENTS, skip);
1080 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1082 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1084 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1088 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1090 BT_DBG("%s status 0x%02x", hdev->name, status);
1092 if (hci_conn_count(hdev) == 0) {
1093 cancel_delayed_work(&hdev->power_off);
1094 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1098 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1100 struct mgmt_ev_advertising_added ev;
1102 ev.instance = instance;
1104 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1107 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1110 struct mgmt_ev_advertising_removed ev;
1112 ev.instance = instance;
1114 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1117 static void cancel_adv_timeout(struct hci_dev *hdev)
1119 if (hdev->adv_instance_timeout) {
1120 hdev->adv_instance_timeout = 0;
1121 cancel_delayed_work(&hdev->adv_instance_expire);
1125 static int clean_up_hci_state(struct hci_dev *hdev)
1127 struct hci_request req;
1128 struct hci_conn *conn;
1129 bool discov_stopped;
1132 hci_req_init(&req, hdev);
1134 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1135 test_bit(HCI_PSCAN, &hdev->flags)) {
1137 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1140 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1142 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1143 __hci_req_disable_advertising(&req);
1145 discov_stopped = hci_req_stop_discovery(&req);
1147 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1148 /* 0x15 == Terminated due to Power Off */
1149 __hci_abort_conn(&req, conn, 0x15);
1152 err = hci_req_run(&req, clean_up_hci_complete);
1153 if (!err && discov_stopped)
1154 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1159 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1162 struct mgmt_mode *cp = data;
1163 struct mgmt_pending_cmd *cmd;
1166 BT_DBG("request for %s", hdev->name);
1168 if (cp->val != 0x00 && cp->val != 0x01)
1169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1170 MGMT_STATUS_INVALID_PARAMS);
1174 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1175 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1180 if (!!cp->val == hdev_is_powered(hdev)) {
1181 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1185 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1192 queue_work(hdev->req_workqueue, &hdev->power_on);
1195 /* Disconnect connections, stop scans, etc */
1196 err = clean_up_hci_state(hdev);
1198 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1199 HCI_POWER_OFF_TIMEOUT);
1201 /* ENODATA means there were no HCI commands queued */
1202 if (err == -ENODATA) {
1203 cancel_delayed_work(&hdev->power_off);
1204 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1210 hci_dev_unlock(hdev);
1214 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1216 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1218 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1219 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1222 int mgmt_new_settings(struct hci_dev *hdev)
1224 return new_settings(hdev, NULL);
1229 struct hci_dev *hdev;
1233 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1235 struct cmd_lookup *match = data;
1237 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1239 list_del(&cmd->list);
1241 if (match->sk == NULL) {
1242 match->sk = cmd->sk;
1243 sock_hold(match->sk);
1246 mgmt_pending_free(cmd);
1249 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1253 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1254 mgmt_pending_remove(cmd);
1257 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1259 if (cmd->cmd_complete) {
1262 cmd->cmd_complete(cmd, *status);
1263 mgmt_pending_remove(cmd);
1268 cmd_status_rsp(cmd, data);
1271 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1273 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1274 cmd->param, cmd->param_len);
1277 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1279 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1280 cmd->param, sizeof(struct mgmt_addr_info));
1283 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1285 if (!lmp_bredr_capable(hdev))
1286 return MGMT_STATUS_NOT_SUPPORTED;
1287 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1288 return MGMT_STATUS_REJECTED;
1290 return MGMT_STATUS_SUCCESS;
1293 static u8 mgmt_le_support(struct hci_dev *hdev)
1295 if (!lmp_le_capable(hdev))
1296 return MGMT_STATUS_NOT_SUPPORTED;
1297 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1298 return MGMT_STATUS_REJECTED;
1300 return MGMT_STATUS_SUCCESS;
1303 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1305 struct mgmt_pending_cmd *cmd;
1307 BT_DBG("status 0x%02x", status);
1311 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1316 u8 mgmt_err = mgmt_status(status);
1317 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1318 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1322 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1323 hdev->discov_timeout > 0) {
1324 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1325 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1328 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1329 new_settings(hdev, cmd->sk);
1332 mgmt_pending_remove(cmd);
1335 hci_dev_unlock(hdev);
1338 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1341 struct mgmt_cp_set_discoverable *cp = data;
1342 struct mgmt_pending_cmd *cmd;
1346 BT_DBG("request for %s", hdev->name);
1348 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1349 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1351 MGMT_STATUS_REJECTED);
1353 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1355 MGMT_STATUS_INVALID_PARAMS);
1357 timeout = __le16_to_cpu(cp->timeout);
1359 /* Disabling discoverable requires that no timeout is set,
1360 * and enabling limited discoverable requires a timeout.
1362 if ((cp->val == 0x00 && timeout > 0) ||
1363 (cp->val == 0x02 && timeout == 0))
1364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1365 MGMT_STATUS_INVALID_PARAMS);
1369 if (!hdev_is_powered(hdev) && timeout > 0) {
1370 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1371 MGMT_STATUS_NOT_POWERED);
1375 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1376 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1382 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1383 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1384 MGMT_STATUS_REJECTED);
1388 if (!hdev_is_powered(hdev)) {
1389 bool changed = false;
1391 /* Setting limited discoverable when powered off is
1392 * not a valid operation since it requires a timeout
1393 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1395 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1396 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1400 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1405 err = new_settings(hdev, sk);
1410 /* If the current mode is the same, then just update the timeout
1411 * value with the new value. And if only the timeout gets updated,
1412 * then no need for any HCI transactions.
1414 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1415 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1416 HCI_LIMITED_DISCOVERABLE)) {
1417 cancel_delayed_work(&hdev->discov_off);
1418 hdev->discov_timeout = timeout;
1420 if (cp->val && hdev->discov_timeout > 0) {
1421 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1422 queue_delayed_work(hdev->req_workqueue,
1423 &hdev->discov_off, to);
1426 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1430 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1436 /* Cancel any potential discoverable timeout that might be
1437 * still active and store new timeout value. The arming of
1438 * the timeout happens in the complete handler.
1440 cancel_delayed_work(&hdev->discov_off);
1441 hdev->discov_timeout = timeout;
1444 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1446 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1448 /* Limited discoverable mode */
1449 if (cp->val == 0x02)
1450 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1452 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1454 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1458 hci_dev_unlock(hdev);
1462 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1464 struct mgmt_pending_cmd *cmd;
1466 BT_DBG("status 0x%02x", status);
1470 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1475 u8 mgmt_err = mgmt_status(status);
1476 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1480 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1481 new_settings(hdev, cmd->sk);
1484 mgmt_pending_remove(cmd);
1487 hci_dev_unlock(hdev);
1490 static int set_connectable_update_settings(struct hci_dev *hdev,
1491 struct sock *sk, u8 val)
1493 bool changed = false;
1496 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1500 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1502 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1503 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1506 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1511 hci_req_update_scan(hdev);
1512 hci_update_background_scan(hdev);
1513 return new_settings(hdev, sk);
1519 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1522 struct mgmt_mode *cp = data;
1523 struct mgmt_pending_cmd *cmd;
1526 BT_DBG("request for %s", hdev->name);
1528 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1529 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1530 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1531 MGMT_STATUS_REJECTED);
1533 if (cp->val != 0x00 && cp->val != 0x01)
1534 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1535 MGMT_STATUS_INVALID_PARAMS);
1539 if (!hdev_is_powered(hdev)) {
1540 err = set_connectable_update_settings(hdev, sk, cp->val);
1544 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1545 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1546 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1551 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1558 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1560 if (hdev->discov_timeout > 0)
1561 cancel_delayed_work(&hdev->discov_off);
1563 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1564 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1565 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1568 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1572 hci_dev_unlock(hdev);
1576 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1579 struct mgmt_mode *cp = data;
1583 BT_DBG("request for %s", hdev->name);
1585 if (cp->val != 0x00 && cp->val != 0x01)
1586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1587 MGMT_STATUS_INVALID_PARAMS);
1592 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1594 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1596 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1601 /* In limited privacy mode the change of bondable mode
1602 * may affect the local advertising address.
1604 if (hdev_is_powered(hdev) &&
1605 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1606 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1607 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1608 queue_work(hdev->req_workqueue,
1609 &hdev->discoverable_update);
1611 err = new_settings(hdev, sk);
1615 hci_dev_unlock(hdev);
1619 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1622 struct mgmt_mode *cp = data;
1623 struct mgmt_pending_cmd *cmd;
1627 BT_DBG("request for %s", hdev->name);
1629 status = mgmt_bredr_support(hdev);
1631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1634 if (cp->val != 0x00 && cp->val != 0x01)
1635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1636 MGMT_STATUS_INVALID_PARAMS);
1640 if (!hdev_is_powered(hdev)) {
1641 bool changed = false;
1643 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1644 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1648 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1653 err = new_settings(hdev, sk);
1658 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1666 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1667 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1671 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1677 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1679 mgmt_pending_remove(cmd);
1684 hci_dev_unlock(hdev);
1688 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1690 struct mgmt_mode *cp = data;
1691 struct mgmt_pending_cmd *cmd;
1695 BT_DBG("request for %s", hdev->name);
1697 status = mgmt_bredr_support(hdev);
1699 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1701 if (!lmp_ssp_capable(hdev))
1702 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1703 MGMT_STATUS_NOT_SUPPORTED);
1705 if (cp->val != 0x00 && cp->val != 0x01)
1706 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1707 MGMT_STATUS_INVALID_PARAMS);
1711 if (!hdev_is_powered(hdev)) {
1715 changed = !hci_dev_test_and_set_flag(hdev,
1718 changed = hci_dev_test_and_clear_flag(hdev,
1721 changed = hci_dev_test_and_clear_flag(hdev,
1724 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1727 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1732 err = new_settings(hdev, sk);
1737 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1743 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1744 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1748 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1754 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1755 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1756 sizeof(cp->val), &cp->val);
1758 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1760 mgmt_pending_remove(cmd);
1765 hci_dev_unlock(hdev);
1769 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1771 struct mgmt_mode *cp = data;
1776 BT_DBG("request for %s", hdev->name);
1778 if (!IS_ENABLED(CONFIG_BT_HS))
1779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1780 MGMT_STATUS_NOT_SUPPORTED);
1782 status = mgmt_bredr_support(hdev);
1784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1786 if (!lmp_ssp_capable(hdev))
1787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1788 MGMT_STATUS_NOT_SUPPORTED);
1790 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1792 MGMT_STATUS_REJECTED);
1794 if (cp->val != 0x00 && cp->val != 0x01)
1795 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1796 MGMT_STATUS_INVALID_PARAMS);
1800 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1801 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1807 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1809 if (hdev_is_powered(hdev)) {
1810 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1811 MGMT_STATUS_REJECTED);
1815 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1818 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1823 err = new_settings(hdev, sk);
1826 hci_dev_unlock(hdev);
1830 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1832 struct cmd_lookup match = { NULL, hdev };
1837 u8 mgmt_err = mgmt_status(status);
1839 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1844 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1846 new_settings(hdev, match.sk);
1851 /* Make sure the controller has a good default for
1852 * advertising data. Restrict the update to when LE
1853 * has actually been enabled. During power on, the
1854 * update in powered_update_hci will take care of it.
1856 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1857 struct hci_request req;
1858 hci_req_init(&req, hdev);
1859 if (ext_adv_capable(hdev)) {
1862 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1864 __hci_req_update_scan_rsp_data(&req, 0x00);
1866 __hci_req_update_adv_data(&req, 0x00);
1867 __hci_req_update_scan_rsp_data(&req, 0x00);
1869 hci_req_run(&req, NULL);
1870 hci_update_background_scan(hdev);
1874 hci_dev_unlock(hdev);
1877 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1879 struct mgmt_mode *cp = data;
1880 struct hci_cp_write_le_host_supported hci_cp;
1881 struct mgmt_pending_cmd *cmd;
1882 struct hci_request req;
1886 BT_DBG("request for %s", hdev->name);
1888 if (!lmp_le_capable(hdev))
1889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1890 MGMT_STATUS_NOT_SUPPORTED);
1892 if (cp->val != 0x00 && cp->val != 0x01)
1893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1894 MGMT_STATUS_INVALID_PARAMS);
1896 /* Bluetooth single mode LE only controllers or dual-mode
1897 * controllers configured as LE only devices, do not allow
1898 * switching LE off. These have either LE enabled explicitly
1899 * or BR/EDR has been previously switched off.
1901 * When trying to enable an already enabled LE, then gracefully
1902 * send a positive response. Trying to disable it however will
1903 * result into rejection.
1905 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1906 if (cp->val == 0x01)
1907 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1910 MGMT_STATUS_REJECTED);
1916 enabled = lmp_host_le_capable(hdev);
1919 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1921 if (!hdev_is_powered(hdev) || val == enabled) {
1922 bool changed = false;
1924 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1925 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1929 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1930 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1934 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1939 err = new_settings(hdev, sk);
1944 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1945 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1946 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1951 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1957 hci_req_init(&req, hdev);
1959 memset(&hci_cp, 0, sizeof(hci_cp));
1963 hci_cp.simul = 0x00;
1965 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1966 __hci_req_disable_advertising(&req);
1968 if (ext_adv_capable(hdev))
1969 __hci_req_clear_ext_adv_sets(&req);
1972 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1975 err = hci_req_run(&req, le_enable_complete);
1977 mgmt_pending_remove(cmd);
1980 hci_dev_unlock(hdev);
1984 /* This is a helper function to test for pending mgmt commands that can
1985 * cause CoD or EIR HCI commands. We can only allow one such pending
1986 * mgmt command at a time since otherwise we cannot easily track what
1987 * the current values are, will be, and based on that calculate if a new
1988 * HCI command needs to be sent and if yes with what value.
1990 static bool pending_eir_or_class(struct hci_dev *hdev)
1992 struct mgmt_pending_cmd *cmd;
1994 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1995 switch (cmd->opcode) {
1996 case MGMT_OP_ADD_UUID:
1997 case MGMT_OP_REMOVE_UUID:
1998 case MGMT_OP_SET_DEV_CLASS:
1999 case MGMT_OP_SET_POWERED:
2007 static const u8 bluetooth_base_uuid[] = {
2008 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2009 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2012 static u8 get_uuid_size(const u8 *uuid)
2016 if (memcmp(uuid, bluetooth_base_uuid, 12))
2019 val = get_unaligned_le32(&uuid[12]);
2026 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2028 struct mgmt_pending_cmd *cmd;
2032 cmd = pending_find(mgmt_op, hdev);
2036 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2037 mgmt_status(status), hdev->dev_class, 3);
2039 mgmt_pending_remove(cmd);
2042 hci_dev_unlock(hdev);
2045 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2047 BT_DBG("status 0x%02x", status);
2049 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2052 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2054 struct mgmt_cp_add_uuid *cp = data;
2055 struct mgmt_pending_cmd *cmd;
2056 struct hci_request req;
2057 struct bt_uuid *uuid;
2060 BT_DBG("request for %s", hdev->name);
2064 if (pending_eir_or_class(hdev)) {
2065 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2070 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2076 memcpy(uuid->uuid, cp->uuid, 16);
2077 uuid->svc_hint = cp->svc_hint;
2078 uuid->size = get_uuid_size(cp->uuid);
2080 list_add_tail(&uuid->list, &hdev->uuids);
2082 hci_req_init(&req, hdev);
2084 __hci_req_update_class(&req);
2085 __hci_req_update_eir(&req);
2087 err = hci_req_run(&req, add_uuid_complete);
2089 if (err != -ENODATA)
2092 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2093 hdev->dev_class, 3);
2097 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2106 hci_dev_unlock(hdev);
2110 static bool enable_service_cache(struct hci_dev *hdev)
2112 if (!hdev_is_powered(hdev))
2115 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2116 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2124 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2126 BT_DBG("status 0x%02x", status);
2128 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2131 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2134 struct mgmt_cp_remove_uuid *cp = data;
2135 struct mgmt_pending_cmd *cmd;
2136 struct bt_uuid *match, *tmp;
2137 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2138 struct hci_request req;
2141 BT_DBG("request for %s", hdev->name);
2145 if (pending_eir_or_class(hdev)) {
2146 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2151 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2152 hci_uuids_clear(hdev);
2154 if (enable_service_cache(hdev)) {
2155 err = mgmt_cmd_complete(sk, hdev->id,
2156 MGMT_OP_REMOVE_UUID,
2157 0, hdev->dev_class, 3);
2166 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2167 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2170 list_del(&match->list);
2176 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2177 MGMT_STATUS_INVALID_PARAMS);
2182 hci_req_init(&req, hdev);
2184 __hci_req_update_class(&req);
2185 __hci_req_update_eir(&req);
2187 err = hci_req_run(&req, remove_uuid_complete);
2189 if (err != -ENODATA)
2192 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2193 hdev->dev_class, 3);
2197 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2206 hci_dev_unlock(hdev);
2210 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2212 BT_DBG("status 0x%02x", status);
2214 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2217 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2220 struct mgmt_cp_set_dev_class *cp = data;
2221 struct mgmt_pending_cmd *cmd;
2222 struct hci_request req;
2225 BT_DBG("request for %s", hdev->name);
2227 if (!lmp_bredr_capable(hdev))
2228 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2229 MGMT_STATUS_NOT_SUPPORTED);
2233 if (pending_eir_or_class(hdev)) {
2234 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2239 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2240 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2241 MGMT_STATUS_INVALID_PARAMS);
2245 hdev->major_class = cp->major;
2246 hdev->minor_class = cp->minor;
2248 if (!hdev_is_powered(hdev)) {
2249 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2250 hdev->dev_class, 3);
2254 hci_req_init(&req, hdev);
2256 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2257 hci_dev_unlock(hdev);
2258 cancel_delayed_work_sync(&hdev->service_cache);
2260 __hci_req_update_eir(&req);
2263 __hci_req_update_class(&req);
2265 err = hci_req_run(&req, set_class_complete);
2267 if (err != -ENODATA)
2270 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2271 hdev->dev_class, 3);
2275 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2284 hci_dev_unlock(hdev);
2288 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2291 struct mgmt_cp_load_link_keys *cp = data;
2292 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2293 sizeof(struct mgmt_link_key_info));
2294 u16 key_count, expected_len;
2298 BT_DBG("request for %s", hdev->name);
2300 if (!lmp_bredr_capable(hdev))
2301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2302 MGMT_STATUS_NOT_SUPPORTED);
2304 key_count = __le16_to_cpu(cp->key_count);
2305 if (key_count > max_key_count) {
2306 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2308 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2309 MGMT_STATUS_INVALID_PARAMS);
2312 expected_len = sizeof(*cp) + key_count *
2313 sizeof(struct mgmt_link_key_info);
2314 if (expected_len != len) {
2315 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2317 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2318 MGMT_STATUS_INVALID_PARAMS);
2321 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2322 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2323 MGMT_STATUS_INVALID_PARAMS);
2325 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2328 for (i = 0; i < key_count; i++) {
2329 struct mgmt_link_key_info *key = &cp->keys[i];
2331 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2332 return mgmt_cmd_status(sk, hdev->id,
2333 MGMT_OP_LOAD_LINK_KEYS,
2334 MGMT_STATUS_INVALID_PARAMS);
2339 hci_link_keys_clear(hdev);
2342 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2344 changed = hci_dev_test_and_clear_flag(hdev,
2345 HCI_KEEP_DEBUG_KEYS);
2348 new_settings(hdev, NULL);
2350 for (i = 0; i < key_count; i++) {
2351 struct mgmt_link_key_info *key = &cp->keys[i];
2353 /* Always ignore debug keys and require a new pairing if
2354 * the user wants to use them.
2356 if (key->type == HCI_LK_DEBUG_COMBINATION)
2359 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2360 key->type, key->pin_len, NULL);
2363 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2365 hci_dev_unlock(hdev);
2370 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2371 u8 addr_type, struct sock *skip_sk)
2373 struct mgmt_ev_device_unpaired ev;
2375 bacpy(&ev.addr.bdaddr, bdaddr);
2376 ev.addr.type = addr_type;
2378 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2382 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2385 struct mgmt_cp_unpair_device *cp = data;
2386 struct mgmt_rp_unpair_device rp;
2387 struct hci_conn_params *params;
2388 struct mgmt_pending_cmd *cmd;
2389 struct hci_conn *conn;
2393 memset(&rp, 0, sizeof(rp));
2394 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2395 rp.addr.type = cp->addr.type;
2397 if (!bdaddr_type_is_valid(cp->addr.type))
2398 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2399 MGMT_STATUS_INVALID_PARAMS,
2402 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2403 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2404 MGMT_STATUS_INVALID_PARAMS,
2409 if (!hdev_is_powered(hdev)) {
2410 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2411 MGMT_STATUS_NOT_POWERED, &rp,
2416 if (cp->addr.type == BDADDR_BREDR) {
2417 /* If disconnection is requested, then look up the
2418 * connection. If the remote device is connected, it
2419 * will be later used to terminate the link.
2421 * Setting it to NULL explicitly will cause no
2422 * termination of the link.
2425 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2430 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2432 err = mgmt_cmd_complete(sk, hdev->id,
2433 MGMT_OP_UNPAIR_DEVICE,
2434 MGMT_STATUS_NOT_PAIRED, &rp,
2442 /* LE address type */
2443 addr_type = le_addr_type(cp->addr.type);
2445 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2446 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2448 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2449 MGMT_STATUS_NOT_PAIRED, &rp,
2454 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2456 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2461 /* Defer clearing up the connection parameters until closing to
2462 * give a chance of keeping them if a repairing happens.
2464 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2466 /* Disable auto-connection parameters if present */
2467 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2469 if (params->explicit_connect)
2470 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2472 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2475 /* If disconnection is not requested, then clear the connection
2476 * variable so that the link is not terminated.
2478 if (!cp->disconnect)
2482 /* If the connection variable is set, then termination of the
2483 * link is requested.
2486 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2488 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2492 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2499 cmd->cmd_complete = addr_cmd_complete;
2501 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2503 mgmt_pending_remove(cmd);
2506 hci_dev_unlock(hdev);
2510 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2513 struct mgmt_cp_disconnect *cp = data;
2514 struct mgmt_rp_disconnect rp;
2515 struct mgmt_pending_cmd *cmd;
2516 struct hci_conn *conn;
2521 memset(&rp, 0, sizeof(rp));
2522 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2523 rp.addr.type = cp->addr.type;
2525 if (!bdaddr_type_is_valid(cp->addr.type))
2526 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2527 MGMT_STATUS_INVALID_PARAMS,
2532 if (!test_bit(HCI_UP, &hdev->flags)) {
2533 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2534 MGMT_STATUS_NOT_POWERED, &rp,
2539 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2540 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2541 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2545 if (cp->addr.type == BDADDR_BREDR)
2546 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2549 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2550 le_addr_type(cp->addr.type));
2552 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2553 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2554 MGMT_STATUS_NOT_CONNECTED, &rp,
2559 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2565 cmd->cmd_complete = generic_cmd_complete;
2567 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2569 mgmt_pending_remove(cmd);
2572 hci_dev_unlock(hdev);
2576 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2578 switch (link_type) {
2580 switch (addr_type) {
2581 case ADDR_LE_DEV_PUBLIC:
2582 return BDADDR_LE_PUBLIC;
2585 /* Fallback to LE Random address type */
2586 return BDADDR_LE_RANDOM;
2590 /* Fallback to BR/EDR type */
2591 return BDADDR_BREDR;
2595 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2598 struct mgmt_rp_get_connections *rp;
2608 if (!hdev_is_powered(hdev)) {
2609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2610 MGMT_STATUS_NOT_POWERED);
2615 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2616 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2620 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2621 rp = kmalloc(rp_len, GFP_KERNEL);
2628 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2629 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2631 bacpy(&rp->addr[i].bdaddr, &c->dst);
2632 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2633 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2638 rp->conn_count = cpu_to_le16(i);
2640 /* Recalculate length in case of filtered SCO connections, etc */
2641 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2643 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2649 hci_dev_unlock(hdev);
2653 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2654 struct mgmt_cp_pin_code_neg_reply *cp)
2656 struct mgmt_pending_cmd *cmd;
2659 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2664 cmd->cmd_complete = addr_cmd_complete;
2666 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2667 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2669 mgmt_pending_remove(cmd);
2674 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2677 struct hci_conn *conn;
2678 struct mgmt_cp_pin_code_reply *cp = data;
2679 struct hci_cp_pin_code_reply reply;
2680 struct mgmt_pending_cmd *cmd;
2687 if (!hdev_is_powered(hdev)) {
2688 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2689 MGMT_STATUS_NOT_POWERED);
2693 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2695 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2696 MGMT_STATUS_NOT_CONNECTED);
2700 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2701 struct mgmt_cp_pin_code_neg_reply ncp;
2703 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2705 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2707 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2709 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2710 MGMT_STATUS_INVALID_PARAMS);
2715 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2721 cmd->cmd_complete = addr_cmd_complete;
2723 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2724 reply.pin_len = cp->pin_len;
2725 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2727 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2729 mgmt_pending_remove(cmd);
2732 hci_dev_unlock(hdev);
2736 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2739 struct mgmt_cp_set_io_capability *cp = data;
2743 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2745 MGMT_STATUS_INVALID_PARAMS);
2749 hdev->io_capability = cp->io_capability;
2751 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2752 hdev->io_capability);
2754 hci_dev_unlock(hdev);
2756 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2760 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2762 struct hci_dev *hdev = conn->hdev;
2763 struct mgmt_pending_cmd *cmd;
2765 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2766 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2769 if (cmd->user_data != conn)
2778 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2780 struct mgmt_rp_pair_device rp;
2781 struct hci_conn *conn = cmd->user_data;
2784 bacpy(&rp.addr.bdaddr, &conn->dst);
2785 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2787 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2788 status, &rp, sizeof(rp));
2790 /* So we don't get further callbacks for this connection */
2791 conn->connect_cfm_cb = NULL;
2792 conn->security_cfm_cb = NULL;
2793 conn->disconn_cfm_cb = NULL;
2795 hci_conn_drop(conn);
2797 /* The device is paired so there is no need to remove
2798 * its connection parameters anymore.
2800 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2807 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2809 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2810 struct mgmt_pending_cmd *cmd;
2812 cmd = find_pairing(conn);
2814 cmd->cmd_complete(cmd, status);
2815 mgmt_pending_remove(cmd);
2819 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2821 struct mgmt_pending_cmd *cmd;
2823 BT_DBG("status %u", status);
2825 cmd = find_pairing(conn);
2827 BT_DBG("Unable to find a pending command");
2831 cmd->cmd_complete(cmd, mgmt_status(status));
2832 mgmt_pending_remove(cmd);
2835 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2837 struct mgmt_pending_cmd *cmd;
2839 BT_DBG("status %u", status);
2844 cmd = find_pairing(conn);
2846 BT_DBG("Unable to find a pending command");
2850 cmd->cmd_complete(cmd, mgmt_status(status));
2851 mgmt_pending_remove(cmd);
2854 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2857 struct mgmt_cp_pair_device *cp = data;
2858 struct mgmt_rp_pair_device rp;
2859 struct mgmt_pending_cmd *cmd;
2860 u8 sec_level, auth_type;
2861 struct hci_conn *conn;
2866 memset(&rp, 0, sizeof(rp));
2867 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2868 rp.addr.type = cp->addr.type;
2870 if (!bdaddr_type_is_valid(cp->addr.type))
2871 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2872 MGMT_STATUS_INVALID_PARAMS,
2875 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2876 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2877 MGMT_STATUS_INVALID_PARAMS,
2882 if (!hdev_is_powered(hdev)) {
2883 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2884 MGMT_STATUS_NOT_POWERED, &rp,
2889 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2890 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2891 MGMT_STATUS_ALREADY_PAIRED, &rp,
2896 sec_level = BT_SECURITY_MEDIUM;
2897 auth_type = HCI_AT_DEDICATED_BONDING;
2899 if (cp->addr.type == BDADDR_BREDR) {
2900 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2903 u8 addr_type = le_addr_type(cp->addr.type);
2904 struct hci_conn_params *p;
2906 /* When pairing a new device, it is expected to remember
2907 * this device for future connections. Adding the connection
2908 * parameter information ahead of time allows tracking
2909 * of the slave preferred values and will speed up any
2910 * further connection establishment.
2912 * If connection parameters already exist, then they
2913 * will be kept and this function does nothing.
2915 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2917 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2918 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2920 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2921 addr_type, sec_level,
2922 HCI_LE_CONN_TIMEOUT);
2928 if (PTR_ERR(conn) == -EBUSY)
2929 status = MGMT_STATUS_BUSY;
2930 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2931 status = MGMT_STATUS_NOT_SUPPORTED;
2932 else if (PTR_ERR(conn) == -ECONNREFUSED)
2933 status = MGMT_STATUS_REJECTED;
2935 status = MGMT_STATUS_CONNECT_FAILED;
2937 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2938 status, &rp, sizeof(rp));
2942 if (conn->connect_cfm_cb) {
2943 hci_conn_drop(conn);
2944 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2945 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2949 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2952 hci_conn_drop(conn);
2956 cmd->cmd_complete = pairing_complete;
2958 /* For LE, just connecting isn't a proof that the pairing finished */
2959 if (cp->addr.type == BDADDR_BREDR) {
2960 conn->connect_cfm_cb = pairing_complete_cb;
2961 conn->security_cfm_cb = pairing_complete_cb;
2962 conn->disconn_cfm_cb = pairing_complete_cb;
2964 conn->connect_cfm_cb = le_pairing_complete_cb;
2965 conn->security_cfm_cb = le_pairing_complete_cb;
2966 conn->disconn_cfm_cb = le_pairing_complete_cb;
2969 conn->io_capability = cp->io_cap;
2970 cmd->user_data = hci_conn_get(conn);
2972 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2973 hci_conn_security(conn, sec_level, auth_type, true)) {
2974 cmd->cmd_complete(cmd, 0);
2975 mgmt_pending_remove(cmd);
2981 hci_dev_unlock(hdev);
2985 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2988 struct mgmt_addr_info *addr = data;
2989 struct mgmt_pending_cmd *cmd;
2990 struct hci_conn *conn;
2997 if (!hdev_is_powered(hdev)) {
2998 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2999 MGMT_STATUS_NOT_POWERED);
3003 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3005 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3006 MGMT_STATUS_INVALID_PARAMS);
3010 conn = cmd->user_data;
3012 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3013 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3014 MGMT_STATUS_INVALID_PARAMS);
3018 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3019 mgmt_pending_remove(cmd);
3021 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3022 addr, sizeof(*addr));
3024 hci_dev_unlock(hdev);
3028 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3029 struct mgmt_addr_info *addr, u16 mgmt_op,
3030 u16 hci_op, __le32 passkey)
3032 struct mgmt_pending_cmd *cmd;
3033 struct hci_conn *conn;
3038 if (!hdev_is_powered(hdev)) {
3039 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3040 MGMT_STATUS_NOT_POWERED, addr,
3045 if (addr->type == BDADDR_BREDR)
3046 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3048 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3049 le_addr_type(addr->type));
3052 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3053 MGMT_STATUS_NOT_CONNECTED, addr,
3058 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3059 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3061 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3062 MGMT_STATUS_SUCCESS, addr,
3065 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3066 MGMT_STATUS_FAILED, addr,
3072 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3078 cmd->cmd_complete = addr_cmd_complete;
3080 /* Continue with pairing via HCI */
3081 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3082 struct hci_cp_user_passkey_reply cp;
3084 bacpy(&cp.bdaddr, &addr->bdaddr);
3085 cp.passkey = passkey;
3086 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3088 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3092 mgmt_pending_remove(cmd);
3095 hci_dev_unlock(hdev);
3099 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3100 void *data, u16 len)
3102 struct mgmt_cp_pin_code_neg_reply *cp = data;
3106 return user_pairing_resp(sk, hdev, &cp->addr,
3107 MGMT_OP_PIN_CODE_NEG_REPLY,
3108 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3111 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3114 struct mgmt_cp_user_confirm_reply *cp = data;
3118 if (len != sizeof(*cp))
3119 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3120 MGMT_STATUS_INVALID_PARAMS);
3122 return user_pairing_resp(sk, hdev, &cp->addr,
3123 MGMT_OP_USER_CONFIRM_REPLY,
3124 HCI_OP_USER_CONFIRM_REPLY, 0);
3127 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3128 void *data, u16 len)
3130 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3134 return user_pairing_resp(sk, hdev, &cp->addr,
3135 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3136 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3139 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3142 struct mgmt_cp_user_passkey_reply *cp = data;
3146 return user_pairing_resp(sk, hdev, &cp->addr,
3147 MGMT_OP_USER_PASSKEY_REPLY,
3148 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3151 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3152 void *data, u16 len)
3154 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3158 return user_pairing_resp(sk, hdev, &cp->addr,
3159 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3160 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3163 static void adv_expire(struct hci_dev *hdev, u32 flags)
3165 struct adv_info *adv_instance;
3166 struct hci_request req;
3169 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3173 /* stop if current instance doesn't need to be changed */
3174 if (!(adv_instance->flags & flags))
3177 cancel_adv_timeout(hdev);
3179 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3183 hci_req_init(&req, hdev);
3184 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3189 hci_req_run(&req, NULL);
3192 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3194 struct mgmt_cp_set_local_name *cp;
3195 struct mgmt_pending_cmd *cmd;
3197 BT_DBG("status 0x%02x", status);
3201 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3208 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3209 mgmt_status(status));
3211 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3214 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3215 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3218 mgmt_pending_remove(cmd);
3221 hci_dev_unlock(hdev);
3224 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3227 struct mgmt_cp_set_local_name *cp = data;
3228 struct mgmt_pending_cmd *cmd;
3229 struct hci_request req;
3236 /* If the old values are the same as the new ones just return a
3237 * direct command complete event.
3239 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3240 !memcmp(hdev->short_name, cp->short_name,
3241 sizeof(hdev->short_name))) {
3242 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3247 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3249 if (!hdev_is_powered(hdev)) {
3250 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3252 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3257 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3258 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3259 ext_info_changed(hdev, sk);
3264 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3270 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3272 hci_req_init(&req, hdev);
3274 if (lmp_bredr_capable(hdev)) {
3275 __hci_req_update_name(&req);
3276 __hci_req_update_eir(&req);
3279 /* The name is stored in the scan response data and so
3280 * no need to udpate the advertising data here.
3282 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3283 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3285 err = hci_req_run(&req, set_name_complete);
3287 mgmt_pending_remove(cmd);
3290 hci_dev_unlock(hdev);
3294 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3297 struct mgmt_cp_set_appearance *cp = data;
3303 if (!lmp_le_capable(hdev))
3304 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3305 MGMT_STATUS_NOT_SUPPORTED);
3307 apperance = le16_to_cpu(cp->appearance);
3311 if (hdev->appearance != apperance) {
3312 hdev->appearance = apperance;
3314 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3315 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3317 ext_info_changed(hdev, sk);
3320 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3323 hci_dev_unlock(hdev);
3328 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3329 void *data, u16 len)
3331 struct mgmt_rp_get_phy_confguration rp;
3333 BT_DBG("sock %p %s", sk, hdev->name);
3337 memset(&rp, 0, sizeof(rp));
3339 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3340 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3341 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3343 hci_dev_unlock(hdev);
3345 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3349 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3351 struct mgmt_ev_phy_configuration_changed ev;
3353 memset(&ev, 0, sizeof(ev));
3355 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3357 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3361 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3362 u16 opcode, struct sk_buff *skb)
3364 struct mgmt_pending_cmd *cmd;
3366 BT_DBG("status 0x%02x", status);
3370 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3375 mgmt_cmd_status(cmd->sk, hdev->id,
3376 MGMT_OP_SET_PHY_CONFIGURATION,
3377 mgmt_status(status));
3379 mgmt_cmd_complete(cmd->sk, hdev->id,
3380 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3383 mgmt_phy_configuration_changed(hdev, cmd->sk);
3386 mgmt_pending_remove(cmd);
3389 hci_dev_unlock(hdev);
3392 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3393 void *data, u16 len)
3395 struct mgmt_cp_set_phy_confguration *cp = data;
3396 struct hci_cp_le_set_default_phy cp_phy;
3397 struct mgmt_pending_cmd *cmd;
3398 struct hci_request req;
3399 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3400 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3401 bool changed = false;
3404 BT_DBG("sock %p %s", sk, hdev->name);
3406 configurable_phys = get_configurable_phys(hdev);
3407 supported_phys = get_supported_phys(hdev);
3408 selected_phys = __le32_to_cpu(cp->selected_phys);
3410 if (selected_phys & ~supported_phys)
3411 return mgmt_cmd_status(sk, hdev->id,
3412 MGMT_OP_SET_PHY_CONFIGURATION,
3413 MGMT_STATUS_INVALID_PARAMS);
3415 unconfigure_phys = supported_phys & ~configurable_phys;
3417 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3418 return mgmt_cmd_status(sk, hdev->id,
3419 MGMT_OP_SET_PHY_CONFIGURATION,
3420 MGMT_STATUS_INVALID_PARAMS);
3422 if (selected_phys == get_selected_phys(hdev))
3423 return mgmt_cmd_complete(sk, hdev->id,
3424 MGMT_OP_SET_PHY_CONFIGURATION,
3429 if (!hdev_is_powered(hdev)) {
3430 err = mgmt_cmd_status(sk, hdev->id,
3431 MGMT_OP_SET_PHY_CONFIGURATION,
3432 MGMT_STATUS_REJECTED);
3436 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3437 err = mgmt_cmd_status(sk, hdev->id,
3438 MGMT_OP_SET_PHY_CONFIGURATION,
3443 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3444 pkt_type |= (HCI_DH3 | HCI_DM3);
3446 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3448 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3449 pkt_type |= (HCI_DH5 | HCI_DM5);
3451 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3453 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3454 pkt_type &= ~HCI_2DH1;
3456 pkt_type |= HCI_2DH1;
3458 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3459 pkt_type &= ~HCI_2DH3;
3461 pkt_type |= HCI_2DH3;
3463 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3464 pkt_type &= ~HCI_2DH5;
3466 pkt_type |= HCI_2DH5;
3468 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3469 pkt_type &= ~HCI_3DH1;
3471 pkt_type |= HCI_3DH1;
3473 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3474 pkt_type &= ~HCI_3DH3;
3476 pkt_type |= HCI_3DH3;
3478 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3479 pkt_type &= ~HCI_3DH5;
3481 pkt_type |= HCI_3DH5;
3483 if (pkt_type != hdev->pkt_type) {
3484 hdev->pkt_type = pkt_type;
3488 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3489 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3491 mgmt_phy_configuration_changed(hdev, sk);
3493 err = mgmt_cmd_complete(sk, hdev->id,
3494 MGMT_OP_SET_PHY_CONFIGURATION,
3500 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3507 hci_req_init(&req, hdev);
3509 memset(&cp_phy, 0, sizeof(cp_phy));
3511 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3512 cp_phy.all_phys |= 0x01;
3514 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3515 cp_phy.all_phys |= 0x02;
3517 if (selected_phys & MGMT_PHY_LE_1M_TX)
3518 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3520 if (selected_phys & MGMT_PHY_LE_2M_TX)
3521 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3523 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3524 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3526 if (selected_phys & MGMT_PHY_LE_1M_RX)
3527 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3529 if (selected_phys & MGMT_PHY_LE_2M_RX)
3530 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3532 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3533 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3535 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3537 err = hci_req_run_skb(&req, set_default_phy_complete);
3539 mgmt_pending_remove(cmd);
3542 hci_dev_unlock(hdev);
3547 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3548 u16 opcode, struct sk_buff *skb)
3550 struct mgmt_rp_read_local_oob_data mgmt_rp;
3551 size_t rp_size = sizeof(mgmt_rp);
3552 struct mgmt_pending_cmd *cmd;
3554 BT_DBG("%s status %u", hdev->name, status);
3556 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3560 if (status || !skb) {
3561 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3562 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3566 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3568 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3569 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3571 if (skb->len < sizeof(*rp)) {
3572 mgmt_cmd_status(cmd->sk, hdev->id,
3573 MGMT_OP_READ_LOCAL_OOB_DATA,
3574 MGMT_STATUS_FAILED);
3578 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3579 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3581 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3583 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3585 if (skb->len < sizeof(*rp)) {
3586 mgmt_cmd_status(cmd->sk, hdev->id,
3587 MGMT_OP_READ_LOCAL_OOB_DATA,
3588 MGMT_STATUS_FAILED);
3592 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3593 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3595 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3596 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3599 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3600 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3603 mgmt_pending_remove(cmd);
3606 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3607 void *data, u16 data_len)
3609 struct mgmt_pending_cmd *cmd;
3610 struct hci_request req;
3613 BT_DBG("%s", hdev->name);
3617 if (!hdev_is_powered(hdev)) {
3618 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3619 MGMT_STATUS_NOT_POWERED);
3623 if (!lmp_ssp_capable(hdev)) {
3624 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3625 MGMT_STATUS_NOT_SUPPORTED);
3629 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3630 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3635 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3641 hci_req_init(&req, hdev);
3643 if (bredr_sc_enabled(hdev))
3644 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3646 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3648 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3650 mgmt_pending_remove(cmd);
3653 hci_dev_unlock(hdev);
3657 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3658 void *data, u16 len)
3660 struct mgmt_addr_info *addr = data;
3663 BT_DBG("%s ", hdev->name);
3665 if (!bdaddr_type_is_valid(addr->type))
3666 return mgmt_cmd_complete(sk, hdev->id,
3667 MGMT_OP_ADD_REMOTE_OOB_DATA,
3668 MGMT_STATUS_INVALID_PARAMS,
3669 addr, sizeof(*addr));
3673 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3674 struct mgmt_cp_add_remote_oob_data *cp = data;
3677 if (cp->addr.type != BDADDR_BREDR) {
3678 err = mgmt_cmd_complete(sk, hdev->id,
3679 MGMT_OP_ADD_REMOTE_OOB_DATA,
3680 MGMT_STATUS_INVALID_PARAMS,
3681 &cp->addr, sizeof(cp->addr));
3685 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3686 cp->addr.type, cp->hash,
3687 cp->rand, NULL, NULL);
3689 status = MGMT_STATUS_FAILED;
3691 status = MGMT_STATUS_SUCCESS;
3693 err = mgmt_cmd_complete(sk, hdev->id,
3694 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3695 &cp->addr, sizeof(cp->addr));
3696 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3697 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3698 u8 *rand192, *hash192, *rand256, *hash256;
3701 if (bdaddr_type_is_le(cp->addr.type)) {
3702 /* Enforce zero-valued 192-bit parameters as
3703 * long as legacy SMP OOB isn't implemented.
3705 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3706 memcmp(cp->hash192, ZERO_KEY, 16)) {
3707 err = mgmt_cmd_complete(sk, hdev->id,
3708 MGMT_OP_ADD_REMOTE_OOB_DATA,
3709 MGMT_STATUS_INVALID_PARAMS,
3710 addr, sizeof(*addr));
3717 /* In case one of the P-192 values is set to zero,
3718 * then just disable OOB data for P-192.
3720 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3721 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3725 rand192 = cp->rand192;
3726 hash192 = cp->hash192;
3730 /* In case one of the P-256 values is set to zero, then just
3731 * disable OOB data for P-256.
3733 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3734 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3738 rand256 = cp->rand256;
3739 hash256 = cp->hash256;
3742 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3743 cp->addr.type, hash192, rand192,
3746 status = MGMT_STATUS_FAILED;
3748 status = MGMT_STATUS_SUCCESS;
3750 err = mgmt_cmd_complete(sk, hdev->id,
3751 MGMT_OP_ADD_REMOTE_OOB_DATA,
3752 status, &cp->addr, sizeof(cp->addr));
3754 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3756 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3757 MGMT_STATUS_INVALID_PARAMS);
3761 hci_dev_unlock(hdev);
3765 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3766 void *data, u16 len)
3768 struct mgmt_cp_remove_remote_oob_data *cp = data;
3772 BT_DBG("%s", hdev->name);
3774 if (cp->addr.type != BDADDR_BREDR)
3775 return mgmt_cmd_complete(sk, hdev->id,
3776 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3777 MGMT_STATUS_INVALID_PARAMS,
3778 &cp->addr, sizeof(cp->addr));
3782 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3783 hci_remote_oob_data_clear(hdev);
3784 status = MGMT_STATUS_SUCCESS;
3788 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3790 status = MGMT_STATUS_INVALID_PARAMS;
3792 status = MGMT_STATUS_SUCCESS;
3795 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3796 status, &cp->addr, sizeof(cp->addr));
3798 hci_dev_unlock(hdev);
3802 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3804 struct mgmt_pending_cmd *cmd;
3806 BT_DBG("status %d", status);
3810 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3812 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3815 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3818 cmd->cmd_complete(cmd, mgmt_status(status));
3819 mgmt_pending_remove(cmd);
3822 hci_dev_unlock(hdev);
3825 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3826 uint8_t *mgmt_status)
3829 case DISCOV_TYPE_LE:
3830 *mgmt_status = mgmt_le_support(hdev);
3834 case DISCOV_TYPE_INTERLEAVED:
3835 *mgmt_status = mgmt_le_support(hdev);
3838 /* Intentional fall-through */
3839 case DISCOV_TYPE_BREDR:
3840 *mgmt_status = mgmt_bredr_support(hdev);
3845 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3852 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3853 u16 op, void *data, u16 len)
3855 struct mgmt_cp_start_discovery *cp = data;
3856 struct mgmt_pending_cmd *cmd;
3860 BT_DBG("%s", hdev->name);
3864 if (!hdev_is_powered(hdev)) {
3865 err = mgmt_cmd_complete(sk, hdev->id, op,
3866 MGMT_STATUS_NOT_POWERED,
3867 &cp->type, sizeof(cp->type));
3871 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3872 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3873 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3874 &cp->type, sizeof(cp->type));
3878 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3879 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3880 &cp->type, sizeof(cp->type));
3884 /* Clear the discovery filter first to free any previously
3885 * allocated memory for the UUID list.
3887 hci_discovery_filter_clear(hdev);
3889 hdev->discovery.type = cp->type;
3890 hdev->discovery.report_invalid_rssi = false;
3891 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3892 hdev->discovery.limited = true;
3894 hdev->discovery.limited = false;
3896 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3902 cmd->cmd_complete = generic_cmd_complete;
3904 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3905 queue_work(hdev->req_workqueue, &hdev->discov_update);
3909 hci_dev_unlock(hdev);
3913 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3914 void *data, u16 len)
3916 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3920 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3921 void *data, u16 len)
3923 return start_discovery_internal(sk, hdev,
3924 MGMT_OP_START_LIMITED_DISCOVERY,
3928 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3931 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3935 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3936 void *data, u16 len)
3938 struct mgmt_cp_start_service_discovery *cp = data;
3939 struct mgmt_pending_cmd *cmd;
3940 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3941 u16 uuid_count, expected_len;
3945 BT_DBG("%s", hdev->name);
3949 if (!hdev_is_powered(hdev)) {
3950 err = mgmt_cmd_complete(sk, hdev->id,
3951 MGMT_OP_START_SERVICE_DISCOVERY,
3952 MGMT_STATUS_NOT_POWERED,
3953 &cp->type, sizeof(cp->type));
3957 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3958 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3959 err = mgmt_cmd_complete(sk, hdev->id,
3960 MGMT_OP_START_SERVICE_DISCOVERY,
3961 MGMT_STATUS_BUSY, &cp->type,
3966 uuid_count = __le16_to_cpu(cp->uuid_count);
3967 if (uuid_count > max_uuid_count) {
3968 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3970 err = mgmt_cmd_complete(sk, hdev->id,
3971 MGMT_OP_START_SERVICE_DISCOVERY,
3972 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3977 expected_len = sizeof(*cp) + uuid_count * 16;
3978 if (expected_len != len) {
3979 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3981 err = mgmt_cmd_complete(sk, hdev->id,
3982 MGMT_OP_START_SERVICE_DISCOVERY,
3983 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3988 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3989 err = mgmt_cmd_complete(sk, hdev->id,
3990 MGMT_OP_START_SERVICE_DISCOVERY,
3991 status, &cp->type, sizeof(cp->type));
3995 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4002 cmd->cmd_complete = service_discovery_cmd_complete;
4004 /* Clear the discovery filter first to free any previously
4005 * allocated memory for the UUID list.
4007 hci_discovery_filter_clear(hdev);
4009 hdev->discovery.result_filtering = true;
4010 hdev->discovery.type = cp->type;
4011 hdev->discovery.rssi = cp->rssi;
4012 hdev->discovery.uuid_count = uuid_count;
4014 if (uuid_count > 0) {
4015 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4017 if (!hdev->discovery.uuids) {
4018 err = mgmt_cmd_complete(sk, hdev->id,
4019 MGMT_OP_START_SERVICE_DISCOVERY,
4021 &cp->type, sizeof(cp->type));
4022 mgmt_pending_remove(cmd);
4027 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4028 queue_work(hdev->req_workqueue, &hdev->discov_update);
4032 hci_dev_unlock(hdev);
4036 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4038 struct mgmt_pending_cmd *cmd;
4040 BT_DBG("status %d", status);
4044 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4046 cmd->cmd_complete(cmd, mgmt_status(status));
4047 mgmt_pending_remove(cmd);
4050 hci_dev_unlock(hdev);
4053 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4056 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4057 struct mgmt_pending_cmd *cmd;
4060 BT_DBG("%s", hdev->name);
4064 if (!hci_discovery_active(hdev)) {
4065 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4066 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4067 sizeof(mgmt_cp->type));
4071 if (hdev->discovery.type != mgmt_cp->type) {
4072 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4073 MGMT_STATUS_INVALID_PARAMS,
4074 &mgmt_cp->type, sizeof(mgmt_cp->type));
4078 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4084 cmd->cmd_complete = generic_cmd_complete;
4086 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4087 queue_work(hdev->req_workqueue, &hdev->discov_update);
4091 hci_dev_unlock(hdev);
4095 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4098 struct mgmt_cp_confirm_name *cp = data;
4099 struct inquiry_entry *e;
4102 BT_DBG("%s", hdev->name);
4106 if (!hci_discovery_active(hdev)) {
4107 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4108 MGMT_STATUS_FAILED, &cp->addr,
4113 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4115 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4116 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4121 if (cp->name_known) {
4122 e->name_state = NAME_KNOWN;
4125 e->name_state = NAME_NEEDED;
4126 hci_inquiry_cache_update_resolve(hdev, e);
4129 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4130 &cp->addr, sizeof(cp->addr));
4133 hci_dev_unlock(hdev);
4137 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4140 struct mgmt_cp_block_device *cp = data;
4144 BT_DBG("%s", hdev->name);
4146 if (!bdaddr_type_is_valid(cp->addr.type))
4147 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4148 MGMT_STATUS_INVALID_PARAMS,
4149 &cp->addr, sizeof(cp->addr));
4153 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4156 status = MGMT_STATUS_FAILED;
4160 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4162 status = MGMT_STATUS_SUCCESS;
4165 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4166 &cp->addr, sizeof(cp->addr));
4168 hci_dev_unlock(hdev);
4173 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4176 struct mgmt_cp_unblock_device *cp = data;
4180 BT_DBG("%s", hdev->name);
4182 if (!bdaddr_type_is_valid(cp->addr.type))
4183 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4184 MGMT_STATUS_INVALID_PARAMS,
4185 &cp->addr, sizeof(cp->addr));
4189 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4192 status = MGMT_STATUS_INVALID_PARAMS;
4196 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4198 status = MGMT_STATUS_SUCCESS;
4201 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4202 &cp->addr, sizeof(cp->addr));
4204 hci_dev_unlock(hdev);
4209 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4212 struct mgmt_cp_set_device_id *cp = data;
4213 struct hci_request req;
4217 BT_DBG("%s", hdev->name);
4219 source = __le16_to_cpu(cp->source);
4221 if (source > 0x0002)
4222 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4223 MGMT_STATUS_INVALID_PARAMS);
4227 hdev->devid_source = source;
4228 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4229 hdev->devid_product = __le16_to_cpu(cp->product);
4230 hdev->devid_version = __le16_to_cpu(cp->version);
4232 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4235 hci_req_init(&req, hdev);
4236 __hci_req_update_eir(&req);
4237 hci_req_run(&req, NULL);
4239 hci_dev_unlock(hdev);
4244 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4247 BT_DBG("status %d", status);
4250 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4253 struct cmd_lookup match = { NULL, hdev };
4254 struct hci_request req;
4256 struct adv_info *adv_instance;
4262 u8 mgmt_err = mgmt_status(status);
4264 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4265 cmd_status_rsp, &mgmt_err);
4269 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4270 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4272 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4274 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4277 new_settings(hdev, match.sk);
4282 /* If "Set Advertising" was just disabled and instance advertising was
4283 * set up earlier, then re-enable multi-instance advertising.
4285 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4286 list_empty(&hdev->adv_instances))
4289 instance = hdev->cur_adv_instance;
4291 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4292 struct adv_info, list);
4296 instance = adv_instance->instance;
4299 hci_req_init(&req, hdev);
4301 err = __hci_req_schedule_adv_instance(&req, instance, true);
4304 err = hci_req_run(&req, enable_advertising_instance);
4307 bt_dev_err(hdev, "failed to re-configure advertising");
4310 hci_dev_unlock(hdev);
4313 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4316 struct mgmt_mode *cp = data;
4317 struct mgmt_pending_cmd *cmd;
4318 struct hci_request req;
4322 BT_DBG("request for %s", hdev->name);
4324 status = mgmt_le_support(hdev);
4326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4329 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4331 MGMT_STATUS_INVALID_PARAMS);
4337 /* The following conditions are ones which mean that we should
4338 * not do any HCI communication but directly send a mgmt
4339 * response to user space (after toggling the flag if
4342 if (!hdev_is_powered(hdev) ||
4343 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4344 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4345 hci_conn_num(hdev, LE_LINK) > 0 ||
4346 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4347 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4351 hdev->cur_adv_instance = 0x00;
4352 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4353 if (cp->val == 0x02)
4354 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4356 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4358 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4359 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4362 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4367 err = new_settings(hdev, sk);
4372 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4373 pending_find(MGMT_OP_SET_LE, hdev)) {
4374 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4379 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4385 hci_req_init(&req, hdev);
4387 if (cp->val == 0x02)
4388 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4390 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4392 cancel_adv_timeout(hdev);
4395 /* Switch to instance "0" for the Set Advertising setting.
4396 * We cannot use update_[adv|scan_rsp]_data() here as the
4397 * HCI_ADVERTISING flag is not yet set.
4399 hdev->cur_adv_instance = 0x00;
4401 if (ext_adv_capable(hdev)) {
4402 __hci_req_start_ext_adv(&req, 0x00);
4404 __hci_req_update_adv_data(&req, 0x00);
4405 __hci_req_update_scan_rsp_data(&req, 0x00);
4406 __hci_req_enable_advertising(&req);
4409 __hci_req_disable_advertising(&req);
4412 err = hci_req_run(&req, set_advertising_complete);
4414 mgmt_pending_remove(cmd);
4417 hci_dev_unlock(hdev);
4421 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4422 void *data, u16 len)
4424 struct mgmt_cp_set_static_address *cp = data;
4427 BT_DBG("%s", hdev->name);
4429 if (!lmp_le_capable(hdev))
4430 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4431 MGMT_STATUS_NOT_SUPPORTED);
4433 if (hdev_is_powered(hdev))
4434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4435 MGMT_STATUS_REJECTED);
4437 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4438 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4439 return mgmt_cmd_status(sk, hdev->id,
4440 MGMT_OP_SET_STATIC_ADDRESS,
4441 MGMT_STATUS_INVALID_PARAMS);
4443 /* Two most significant bits shall be set */
4444 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4445 return mgmt_cmd_status(sk, hdev->id,
4446 MGMT_OP_SET_STATIC_ADDRESS,
4447 MGMT_STATUS_INVALID_PARAMS);
4452 bacpy(&hdev->static_addr, &cp->bdaddr);
4454 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4458 err = new_settings(hdev, sk);
4461 hci_dev_unlock(hdev);
4465 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4466 void *data, u16 len)
4468 struct mgmt_cp_set_scan_params *cp = data;
4469 __u16 interval, window;
4472 BT_DBG("%s", hdev->name);
4474 if (!lmp_le_capable(hdev))
4475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4476 MGMT_STATUS_NOT_SUPPORTED);
4478 interval = __le16_to_cpu(cp->interval);
4480 if (interval < 0x0004 || interval > 0x4000)
4481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4482 MGMT_STATUS_INVALID_PARAMS);
4484 window = __le16_to_cpu(cp->window);
4486 if (window < 0x0004 || window > 0x4000)
4487 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4488 MGMT_STATUS_INVALID_PARAMS);
4490 if (window > interval)
4491 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4492 MGMT_STATUS_INVALID_PARAMS);
4496 hdev->le_scan_interval = interval;
4497 hdev->le_scan_window = window;
4499 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4502 /* If background scan is running, restart it so new parameters are
4505 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4506 hdev->discovery.state == DISCOVERY_STOPPED) {
4507 struct hci_request req;
4509 hci_req_init(&req, hdev);
4511 hci_req_add_le_scan_disable(&req);
4512 hci_req_add_le_passive_scan(&req);
4514 hci_req_run(&req, NULL);
4517 hci_dev_unlock(hdev);
4522 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4525 struct mgmt_pending_cmd *cmd;
4527 BT_DBG("status 0x%02x", status);
4531 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4536 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4537 mgmt_status(status));
4539 struct mgmt_mode *cp = cmd->param;
4542 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4544 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4546 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4547 new_settings(hdev, cmd->sk);
4550 mgmt_pending_remove(cmd);
4553 hci_dev_unlock(hdev);
4556 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4557 void *data, u16 len)
4559 struct mgmt_mode *cp = data;
4560 struct mgmt_pending_cmd *cmd;
4561 struct hci_request req;
4564 BT_DBG("%s", hdev->name);
4566 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4567 hdev->hci_ver < BLUETOOTH_VER_1_2)
4568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4569 MGMT_STATUS_NOT_SUPPORTED);
4571 if (cp->val != 0x00 && cp->val != 0x01)
4572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4573 MGMT_STATUS_INVALID_PARAMS);
4577 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4583 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4584 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4589 if (!hdev_is_powered(hdev)) {
4590 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4591 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4593 new_settings(hdev, sk);
4597 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4604 hci_req_init(&req, hdev);
4606 __hci_req_write_fast_connectable(&req, cp->val);
4608 err = hci_req_run(&req, fast_connectable_complete);
4610 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4611 MGMT_STATUS_FAILED);
4612 mgmt_pending_remove(cmd);
4616 hci_dev_unlock(hdev);
4621 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4623 struct mgmt_pending_cmd *cmd;
4625 BT_DBG("status 0x%02x", status);
4629 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4634 u8 mgmt_err = mgmt_status(status);
4636 /* We need to restore the flag if related HCI commands
4639 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4641 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4643 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4644 new_settings(hdev, cmd->sk);
4647 mgmt_pending_remove(cmd);
4650 hci_dev_unlock(hdev);
4653 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4655 struct mgmt_mode *cp = data;
4656 struct mgmt_pending_cmd *cmd;
4657 struct hci_request req;
4660 BT_DBG("request for %s", hdev->name);
4662 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4663 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4664 MGMT_STATUS_NOT_SUPPORTED);
4666 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4667 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4668 MGMT_STATUS_REJECTED);
4670 if (cp->val != 0x00 && cp->val != 0x01)
4671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4672 MGMT_STATUS_INVALID_PARAMS);
4676 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4677 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4681 if (!hdev_is_powered(hdev)) {
4683 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4684 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4685 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4686 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4687 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4690 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4692 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4696 err = new_settings(hdev, sk);
4700 /* Reject disabling when powered on */
4702 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4703 MGMT_STATUS_REJECTED);
4706 /* When configuring a dual-mode controller to operate
4707 * with LE only and using a static address, then switching
4708 * BR/EDR back on is not allowed.
4710 * Dual-mode controllers shall operate with the public
4711 * address as its identity address for BR/EDR and LE. So
4712 * reject the attempt to create an invalid configuration.
4714 * The same restrictions applies when secure connections
4715 * has been enabled. For BR/EDR this is a controller feature
4716 * while for LE it is a host stack feature. This means that
4717 * switching BR/EDR back on when secure connections has been
4718 * enabled is not a supported transaction.
4720 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4721 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4722 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4723 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4724 MGMT_STATUS_REJECTED);
4729 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4730 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4735 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4741 /* We need to flip the bit already here so that
4742 * hci_req_update_adv_data generates the correct flags.
4744 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4746 hci_req_init(&req, hdev);
4748 __hci_req_write_fast_connectable(&req, false);
4749 __hci_req_update_scan(&req);
4751 /* Since only the advertising data flags will change, there
4752 * is no need to update the scan response data.
4754 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4756 err = hci_req_run(&req, set_bredr_complete);
4758 mgmt_pending_remove(cmd);
4761 hci_dev_unlock(hdev);
4765 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4767 struct mgmt_pending_cmd *cmd;
4768 struct mgmt_mode *cp;
4770 BT_DBG("%s status %u", hdev->name, status);
4774 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4779 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4780 mgmt_status(status));
4788 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4789 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4792 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4793 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4796 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4797 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4801 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4802 new_settings(hdev, cmd->sk);
4805 mgmt_pending_remove(cmd);
4807 hci_dev_unlock(hdev);
4810 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4811 void *data, u16 len)
4813 struct mgmt_mode *cp = data;
4814 struct mgmt_pending_cmd *cmd;
4815 struct hci_request req;
4819 BT_DBG("request for %s", hdev->name);
4821 if (!lmp_sc_capable(hdev) &&
4822 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4824 MGMT_STATUS_NOT_SUPPORTED);
4826 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4827 lmp_sc_capable(hdev) &&
4828 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4830 MGMT_STATUS_REJECTED);
4832 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4834 MGMT_STATUS_INVALID_PARAMS);
4838 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4839 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4843 changed = !hci_dev_test_and_set_flag(hdev,
4845 if (cp->val == 0x02)
4846 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4848 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4850 changed = hci_dev_test_and_clear_flag(hdev,
4852 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4855 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4860 err = new_settings(hdev, sk);
4865 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4866 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4873 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4874 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4875 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4879 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4885 hci_req_init(&req, hdev);
4886 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4887 err = hci_req_run(&req, sc_enable_complete);
4889 mgmt_pending_remove(cmd);
4894 hci_dev_unlock(hdev);
4898 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4899 void *data, u16 len)
4901 struct mgmt_mode *cp = data;
4902 bool changed, use_changed;
4905 BT_DBG("request for %s", hdev->name);
4907 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4909 MGMT_STATUS_INVALID_PARAMS);
4914 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4916 changed = hci_dev_test_and_clear_flag(hdev,
4917 HCI_KEEP_DEBUG_KEYS);
4919 if (cp->val == 0x02)
4920 use_changed = !hci_dev_test_and_set_flag(hdev,
4921 HCI_USE_DEBUG_KEYS);
4923 use_changed = hci_dev_test_and_clear_flag(hdev,
4924 HCI_USE_DEBUG_KEYS);
4926 if (hdev_is_powered(hdev) && use_changed &&
4927 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4928 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4929 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4930 sizeof(mode), &mode);
4933 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4938 err = new_settings(hdev, sk);
4941 hci_dev_unlock(hdev);
4945 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4948 struct mgmt_cp_set_privacy *cp = cp_data;
4952 BT_DBG("request for %s", hdev->name);
4954 if (!lmp_le_capable(hdev))
4955 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4956 MGMT_STATUS_NOT_SUPPORTED);
4958 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4960 MGMT_STATUS_INVALID_PARAMS);
4962 if (hdev_is_powered(hdev))
4963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4964 MGMT_STATUS_REJECTED);
4968 /* If user space supports this command it is also expected to
4969 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4971 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4974 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4975 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4976 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4977 hci_adv_instances_set_rpa_expired(hdev, true);
4978 if (cp->privacy == 0x02)
4979 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4981 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4983 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4984 memset(hdev->irk, 0, sizeof(hdev->irk));
4985 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4986 hci_adv_instances_set_rpa_expired(hdev, false);
4987 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4990 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4995 err = new_settings(hdev, sk);
4998 hci_dev_unlock(hdev);
5002 static bool irk_is_valid(struct mgmt_irk_info *irk)
5004 switch (irk->addr.type) {
5005 case BDADDR_LE_PUBLIC:
5008 case BDADDR_LE_RANDOM:
5009 /* Two most significant bits shall be set */
5010 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5018 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5021 struct mgmt_cp_load_irks *cp = cp_data;
5022 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5023 sizeof(struct mgmt_irk_info));
5024 u16 irk_count, expected_len;
5027 BT_DBG("request for %s", hdev->name);
5029 if (!lmp_le_capable(hdev))
5030 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5031 MGMT_STATUS_NOT_SUPPORTED);
5033 irk_count = __le16_to_cpu(cp->irk_count);
5034 if (irk_count > max_irk_count) {
5035 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5038 MGMT_STATUS_INVALID_PARAMS);
5041 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5042 if (expected_len != len) {
5043 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5045 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5046 MGMT_STATUS_INVALID_PARAMS);
5049 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5051 for (i = 0; i < irk_count; i++) {
5052 struct mgmt_irk_info *key = &cp->irks[i];
5054 if (!irk_is_valid(key))
5055 return mgmt_cmd_status(sk, hdev->id,
5057 MGMT_STATUS_INVALID_PARAMS);
5062 hci_smp_irks_clear(hdev);
5064 for (i = 0; i < irk_count; i++) {
5065 struct mgmt_irk_info *irk = &cp->irks[i];
5067 hci_add_irk(hdev, &irk->addr.bdaddr,
5068 le_addr_type(irk->addr.type), irk->val,
5072 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5074 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5076 hci_dev_unlock(hdev);
5081 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5083 if (key->master != 0x00 && key->master != 0x01)
5086 switch (key->addr.type) {
5087 case BDADDR_LE_PUBLIC:
5090 case BDADDR_LE_RANDOM:
5091 /* Two most significant bits shall be set */
5092 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5100 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5101 void *cp_data, u16 len)
5103 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5104 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5105 sizeof(struct mgmt_ltk_info));
5106 u16 key_count, expected_len;
5109 BT_DBG("request for %s", hdev->name);
5111 if (!lmp_le_capable(hdev))
5112 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5113 MGMT_STATUS_NOT_SUPPORTED);
5115 key_count = __le16_to_cpu(cp->key_count);
5116 if (key_count > max_key_count) {
5117 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5119 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5120 MGMT_STATUS_INVALID_PARAMS);
5123 expected_len = sizeof(*cp) + key_count *
5124 sizeof(struct mgmt_ltk_info);
5125 if (expected_len != len) {
5126 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5128 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5129 MGMT_STATUS_INVALID_PARAMS);
5132 BT_DBG("%s key_count %u", hdev->name, key_count);
5134 for (i = 0; i < key_count; i++) {
5135 struct mgmt_ltk_info *key = &cp->keys[i];
5137 if (!ltk_is_valid(key))
5138 return mgmt_cmd_status(sk, hdev->id,
5139 MGMT_OP_LOAD_LONG_TERM_KEYS,
5140 MGMT_STATUS_INVALID_PARAMS);
5145 hci_smp_ltks_clear(hdev);
5147 for (i = 0; i < key_count; i++) {
5148 struct mgmt_ltk_info *key = &cp->keys[i];
5149 u8 type, authenticated;
5151 switch (key->type) {
5152 case MGMT_LTK_UNAUTHENTICATED:
5153 authenticated = 0x00;
5154 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5156 case MGMT_LTK_AUTHENTICATED:
5157 authenticated = 0x01;
5158 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5160 case MGMT_LTK_P256_UNAUTH:
5161 authenticated = 0x00;
5162 type = SMP_LTK_P256;
5164 case MGMT_LTK_P256_AUTH:
5165 authenticated = 0x01;
5166 type = SMP_LTK_P256;
5168 case MGMT_LTK_P256_DEBUG:
5169 authenticated = 0x00;
5170 type = SMP_LTK_P256_DEBUG;
5176 hci_add_ltk(hdev, &key->addr.bdaddr,
5177 le_addr_type(key->addr.type), type, authenticated,
5178 key->val, key->enc_size, key->ediv, key->rand);
5181 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5184 hci_dev_unlock(hdev);
5189 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5191 struct hci_conn *conn = cmd->user_data;
5192 struct mgmt_rp_get_conn_info rp;
5195 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5197 if (status == MGMT_STATUS_SUCCESS) {
5198 rp.rssi = conn->rssi;
5199 rp.tx_power = conn->tx_power;
5200 rp.max_tx_power = conn->max_tx_power;
5202 rp.rssi = HCI_RSSI_INVALID;
5203 rp.tx_power = HCI_TX_POWER_INVALID;
5204 rp.max_tx_power = HCI_TX_POWER_INVALID;
5207 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5208 status, &rp, sizeof(rp));
5210 hci_conn_drop(conn);
5216 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5219 struct hci_cp_read_rssi *cp;
5220 struct mgmt_pending_cmd *cmd;
5221 struct hci_conn *conn;
5225 BT_DBG("status 0x%02x", hci_status);
5229 /* Commands sent in request are either Read RSSI or Read Transmit Power
5230 * Level so we check which one was last sent to retrieve connection
5231 * handle. Both commands have handle as first parameter so it's safe to
5232 * cast data on the same command struct.
5234 * First command sent is always Read RSSI and we fail only if it fails.
5235 * In other case we simply override error to indicate success as we
5236 * already remembered if TX power value is actually valid.
5238 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5240 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5241 status = MGMT_STATUS_SUCCESS;
5243 status = mgmt_status(hci_status);
5247 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5251 handle = __le16_to_cpu(cp->handle);
5252 conn = hci_conn_hash_lookup_handle(hdev, handle);
5254 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5259 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5263 cmd->cmd_complete(cmd, status);
5264 mgmt_pending_remove(cmd);
5267 hci_dev_unlock(hdev);
5270 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5273 struct mgmt_cp_get_conn_info *cp = data;
5274 struct mgmt_rp_get_conn_info rp;
5275 struct hci_conn *conn;
5276 unsigned long conn_info_age;
5279 BT_DBG("%s", hdev->name);
5281 memset(&rp, 0, sizeof(rp));
5282 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5283 rp.addr.type = cp->addr.type;
5285 if (!bdaddr_type_is_valid(cp->addr.type))
5286 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5287 MGMT_STATUS_INVALID_PARAMS,
5292 if (!hdev_is_powered(hdev)) {
5293 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5294 MGMT_STATUS_NOT_POWERED, &rp,
5299 if (cp->addr.type == BDADDR_BREDR)
5300 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5303 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5305 if (!conn || conn->state != BT_CONNECTED) {
5306 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5307 MGMT_STATUS_NOT_CONNECTED, &rp,
5312 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5314 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5318 /* To avoid client trying to guess when to poll again for information we
5319 * calculate conn info age as random value between min/max set in hdev.
5321 conn_info_age = hdev->conn_info_min_age +
5322 prandom_u32_max(hdev->conn_info_max_age -
5323 hdev->conn_info_min_age);
5325 /* Query controller to refresh cached values if they are too old or were
5328 if (time_after(jiffies, conn->conn_info_timestamp +
5329 msecs_to_jiffies(conn_info_age)) ||
5330 !conn->conn_info_timestamp) {
5331 struct hci_request req;
5332 struct hci_cp_read_tx_power req_txp_cp;
5333 struct hci_cp_read_rssi req_rssi_cp;
5334 struct mgmt_pending_cmd *cmd;
5336 hci_req_init(&req, hdev);
5337 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5338 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5341 /* For LE links TX power does not change thus we don't need to
5342 * query for it once value is known.
5344 if (!bdaddr_type_is_le(cp->addr.type) ||
5345 conn->tx_power == HCI_TX_POWER_INVALID) {
5346 req_txp_cp.handle = cpu_to_le16(conn->handle);
5347 req_txp_cp.type = 0x00;
5348 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5349 sizeof(req_txp_cp), &req_txp_cp);
5352 /* Max TX power needs to be read only once per connection */
5353 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5354 req_txp_cp.handle = cpu_to_le16(conn->handle);
5355 req_txp_cp.type = 0x01;
5356 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5357 sizeof(req_txp_cp), &req_txp_cp);
5360 err = hci_req_run(&req, conn_info_refresh_complete);
5364 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5371 hci_conn_hold(conn);
5372 cmd->user_data = hci_conn_get(conn);
5373 cmd->cmd_complete = conn_info_cmd_complete;
5375 conn->conn_info_timestamp = jiffies;
5377 /* Cache is valid, just reply with values cached in hci_conn */
5378 rp.rssi = conn->rssi;
5379 rp.tx_power = conn->tx_power;
5380 rp.max_tx_power = conn->max_tx_power;
5382 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5383 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5387 hci_dev_unlock(hdev);
5391 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5393 struct hci_conn *conn = cmd->user_data;
5394 struct mgmt_rp_get_clock_info rp;
5395 struct hci_dev *hdev;
5398 memset(&rp, 0, sizeof(rp));
5399 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5404 hdev = hci_dev_get(cmd->index);
5406 rp.local_clock = cpu_to_le32(hdev->clock);
5411 rp.piconet_clock = cpu_to_le32(conn->clock);
5412 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5416 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5420 hci_conn_drop(conn);
5427 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5429 struct hci_cp_read_clock *hci_cp;
5430 struct mgmt_pending_cmd *cmd;
5431 struct hci_conn *conn;
5433 BT_DBG("%s status %u", hdev->name, status);
5437 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5441 if (hci_cp->which) {
5442 u16 handle = __le16_to_cpu(hci_cp->handle);
5443 conn = hci_conn_hash_lookup_handle(hdev, handle);
5448 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5452 cmd->cmd_complete(cmd, mgmt_status(status));
5453 mgmt_pending_remove(cmd);
5456 hci_dev_unlock(hdev);
5459 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5462 struct mgmt_cp_get_clock_info *cp = data;
5463 struct mgmt_rp_get_clock_info rp;
5464 struct hci_cp_read_clock hci_cp;
5465 struct mgmt_pending_cmd *cmd;
5466 struct hci_request req;
5467 struct hci_conn *conn;
5470 BT_DBG("%s", hdev->name);
5472 memset(&rp, 0, sizeof(rp));
5473 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5474 rp.addr.type = cp->addr.type;
5476 if (cp->addr.type != BDADDR_BREDR)
5477 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5478 MGMT_STATUS_INVALID_PARAMS,
5483 if (!hdev_is_powered(hdev)) {
5484 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5485 MGMT_STATUS_NOT_POWERED, &rp,
5490 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5491 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5493 if (!conn || conn->state != BT_CONNECTED) {
5494 err = mgmt_cmd_complete(sk, hdev->id,
5495 MGMT_OP_GET_CLOCK_INFO,
5496 MGMT_STATUS_NOT_CONNECTED,
5504 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5510 cmd->cmd_complete = clock_info_cmd_complete;
5512 hci_req_init(&req, hdev);
5514 memset(&hci_cp, 0, sizeof(hci_cp));
5515 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5518 hci_conn_hold(conn);
5519 cmd->user_data = hci_conn_get(conn);
5521 hci_cp.handle = cpu_to_le16(conn->handle);
5522 hci_cp.which = 0x01; /* Piconet clock */
5523 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5526 err = hci_req_run(&req, get_clock_info_complete);
5528 mgmt_pending_remove(cmd);
5531 hci_dev_unlock(hdev);
5535 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5537 struct hci_conn *conn;
5539 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5543 if (conn->dst_type != type)
5546 if (conn->state != BT_CONNECTED)
5552 /* This function requires the caller holds hdev->lock */
5553 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5554 u8 addr_type, u8 auto_connect)
5556 struct hci_conn_params *params;
5558 params = hci_conn_params_add(hdev, addr, addr_type);
5562 if (params->auto_connect == auto_connect)
5565 list_del_init(¶ms->action);
5567 switch (auto_connect) {
5568 case HCI_AUTO_CONN_DISABLED:
5569 case HCI_AUTO_CONN_LINK_LOSS:
5570 /* If auto connect is being disabled when we're trying to
5571 * connect to device, keep connecting.
5573 if (params->explicit_connect)
5574 list_add(¶ms->action, &hdev->pend_le_conns);
5576 case HCI_AUTO_CONN_REPORT:
5577 if (params->explicit_connect)
5578 list_add(¶ms->action, &hdev->pend_le_conns);
5580 list_add(¶ms->action, &hdev->pend_le_reports);
5582 case HCI_AUTO_CONN_DIRECT:
5583 case HCI_AUTO_CONN_ALWAYS:
5584 if (!is_connected(hdev, addr, addr_type))
5585 list_add(¶ms->action, &hdev->pend_le_conns);
5589 params->auto_connect = auto_connect;
5591 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5597 static void device_added(struct sock *sk, struct hci_dev *hdev,
5598 bdaddr_t *bdaddr, u8 type, u8 action)
5600 struct mgmt_ev_device_added ev;
5602 bacpy(&ev.addr.bdaddr, bdaddr);
5603 ev.addr.type = type;
5606 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5609 static int add_device(struct sock *sk, struct hci_dev *hdev,
5610 void *data, u16 len)
5612 struct mgmt_cp_add_device *cp = data;
5613 u8 auto_conn, addr_type;
5616 BT_DBG("%s", hdev->name);
5618 if (!bdaddr_type_is_valid(cp->addr.type) ||
5619 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5620 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5621 MGMT_STATUS_INVALID_PARAMS,
5622 &cp->addr, sizeof(cp->addr));
5624 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5625 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5626 MGMT_STATUS_INVALID_PARAMS,
5627 &cp->addr, sizeof(cp->addr));
5631 if (cp->addr.type == BDADDR_BREDR) {
5632 /* Only incoming connections action is supported for now */
5633 if (cp->action != 0x01) {
5634 err = mgmt_cmd_complete(sk, hdev->id,
5636 MGMT_STATUS_INVALID_PARAMS,
5637 &cp->addr, sizeof(cp->addr));
5641 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5646 hci_req_update_scan(hdev);
5651 addr_type = le_addr_type(cp->addr.type);
5653 if (cp->action == 0x02)
5654 auto_conn = HCI_AUTO_CONN_ALWAYS;
5655 else if (cp->action == 0x01)
5656 auto_conn = HCI_AUTO_CONN_DIRECT;
5658 auto_conn = HCI_AUTO_CONN_REPORT;
5660 /* Kernel internally uses conn_params with resolvable private
5661 * address, but Add Device allows only identity addresses.
5662 * Make sure it is enforced before calling
5663 * hci_conn_params_lookup.
5665 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5666 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5667 MGMT_STATUS_INVALID_PARAMS,
5668 &cp->addr, sizeof(cp->addr));
5672 /* If the connection parameters don't exist for this device,
5673 * they will be created and configured with defaults.
5675 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5677 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5678 MGMT_STATUS_FAILED, &cp->addr,
5683 hci_update_background_scan(hdev);
5686 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5688 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5689 MGMT_STATUS_SUCCESS, &cp->addr,
5693 hci_dev_unlock(hdev);
5697 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5698 bdaddr_t *bdaddr, u8 type)
5700 struct mgmt_ev_device_removed ev;
5702 bacpy(&ev.addr.bdaddr, bdaddr);
5703 ev.addr.type = type;
5705 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5708 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5709 void *data, u16 len)
5711 struct mgmt_cp_remove_device *cp = data;
5714 BT_DBG("%s", hdev->name);
5718 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5719 struct hci_conn_params *params;
5722 if (!bdaddr_type_is_valid(cp->addr.type)) {
5723 err = mgmt_cmd_complete(sk, hdev->id,
5724 MGMT_OP_REMOVE_DEVICE,
5725 MGMT_STATUS_INVALID_PARAMS,
5726 &cp->addr, sizeof(cp->addr));
5730 if (cp->addr.type == BDADDR_BREDR) {
5731 err = hci_bdaddr_list_del(&hdev->whitelist,
5735 err = mgmt_cmd_complete(sk, hdev->id,
5736 MGMT_OP_REMOVE_DEVICE,
5737 MGMT_STATUS_INVALID_PARAMS,
5743 hci_req_update_scan(hdev);
5745 device_removed(sk, hdev, &cp->addr.bdaddr,
5750 addr_type = le_addr_type(cp->addr.type);
5752 /* Kernel internally uses conn_params with resolvable private
5753 * address, but Remove Device allows only identity addresses.
5754 * Make sure it is enforced before calling
5755 * hci_conn_params_lookup.
5757 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5758 err = mgmt_cmd_complete(sk, hdev->id,
5759 MGMT_OP_REMOVE_DEVICE,
5760 MGMT_STATUS_INVALID_PARAMS,
5761 &cp->addr, sizeof(cp->addr));
5765 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5768 err = mgmt_cmd_complete(sk, hdev->id,
5769 MGMT_OP_REMOVE_DEVICE,
5770 MGMT_STATUS_INVALID_PARAMS,
5771 &cp->addr, sizeof(cp->addr));
5775 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5776 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5777 err = mgmt_cmd_complete(sk, hdev->id,
5778 MGMT_OP_REMOVE_DEVICE,
5779 MGMT_STATUS_INVALID_PARAMS,
5780 &cp->addr, sizeof(cp->addr));
5784 list_del(¶ms->action);
5785 list_del(¶ms->list);
5787 hci_update_background_scan(hdev);
5789 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5791 struct hci_conn_params *p, *tmp;
5792 struct bdaddr_list *b, *btmp;
5794 if (cp->addr.type) {
5795 err = mgmt_cmd_complete(sk, hdev->id,
5796 MGMT_OP_REMOVE_DEVICE,
5797 MGMT_STATUS_INVALID_PARAMS,
5798 &cp->addr, sizeof(cp->addr));
5802 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5803 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5808 hci_req_update_scan(hdev);
5810 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5811 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5813 device_removed(sk, hdev, &p->addr, p->addr_type);
5814 if (p->explicit_connect) {
5815 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5818 list_del(&p->action);
5823 BT_DBG("All LE connection parameters were removed");
5825 hci_update_background_scan(hdev);
5829 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5830 MGMT_STATUS_SUCCESS, &cp->addr,
5833 hci_dev_unlock(hdev);
5837 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5840 struct mgmt_cp_load_conn_param *cp = data;
5841 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5842 sizeof(struct mgmt_conn_param));
5843 u16 param_count, expected_len;
5846 if (!lmp_le_capable(hdev))
5847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5848 MGMT_STATUS_NOT_SUPPORTED);
5850 param_count = __le16_to_cpu(cp->param_count);
5851 if (param_count > max_param_count) {
5852 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
5854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5855 MGMT_STATUS_INVALID_PARAMS);
5858 expected_len = sizeof(*cp) + param_count *
5859 sizeof(struct mgmt_conn_param);
5860 if (expected_len != len) {
5861 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
5863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5864 MGMT_STATUS_INVALID_PARAMS);
5867 BT_DBG("%s param_count %u", hdev->name, param_count);
5871 hci_conn_params_clear_disabled(hdev);
5873 for (i = 0; i < param_count; i++) {
5874 struct mgmt_conn_param *param = &cp->params[i];
5875 struct hci_conn_params *hci_param;
5876 u16 min, max, latency, timeout;
5879 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5882 if (param->addr.type == BDADDR_LE_PUBLIC) {
5883 addr_type = ADDR_LE_DEV_PUBLIC;
5884 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5885 addr_type = ADDR_LE_DEV_RANDOM;
5887 bt_dev_err(hdev, "ignoring invalid connection parameters");
5891 min = le16_to_cpu(param->min_interval);
5892 max = le16_to_cpu(param->max_interval);
5893 latency = le16_to_cpu(param->latency);
5894 timeout = le16_to_cpu(param->timeout);
5896 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5897 min, max, latency, timeout);
5899 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5900 bt_dev_err(hdev, "ignoring invalid connection parameters");
5904 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5907 bt_dev_err(hdev, "failed to add connection parameters");
5911 hci_param->conn_min_interval = min;
5912 hci_param->conn_max_interval = max;
5913 hci_param->conn_latency = latency;
5914 hci_param->supervision_timeout = timeout;
5917 hci_dev_unlock(hdev);
5919 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5923 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5924 void *data, u16 len)
5926 struct mgmt_cp_set_external_config *cp = data;
5930 BT_DBG("%s", hdev->name);
5932 if (hdev_is_powered(hdev))
5933 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5934 MGMT_STATUS_REJECTED);
5936 if (cp->config != 0x00 && cp->config != 0x01)
5937 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5938 MGMT_STATUS_INVALID_PARAMS);
5940 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5941 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5942 MGMT_STATUS_NOT_SUPPORTED);
5947 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5949 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5951 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5958 err = new_options(hdev, sk);
5960 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5961 mgmt_index_removed(hdev);
5963 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5964 hci_dev_set_flag(hdev, HCI_CONFIG);
5965 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5967 queue_work(hdev->req_workqueue, &hdev->power_on);
5969 set_bit(HCI_RAW, &hdev->flags);
5970 mgmt_index_added(hdev);
5975 hci_dev_unlock(hdev);
5979 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5980 void *data, u16 len)
5982 struct mgmt_cp_set_public_address *cp = data;
5986 BT_DBG("%s", hdev->name);
5988 if (hdev_is_powered(hdev))
5989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5990 MGMT_STATUS_REJECTED);
5992 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5994 MGMT_STATUS_INVALID_PARAMS);
5996 if (!hdev->set_bdaddr)
5997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5998 MGMT_STATUS_NOT_SUPPORTED);
6002 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6003 bacpy(&hdev->public_addr, &cp->bdaddr);
6005 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6013 err = new_options(hdev, sk);
6015 if (is_configured(hdev)) {
6016 mgmt_index_removed(hdev);
6018 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6020 hci_dev_set_flag(hdev, HCI_CONFIG);
6021 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6023 queue_work(hdev->req_workqueue, &hdev->power_on);
6027 hci_dev_unlock(hdev);
6031 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6032 u16 opcode, struct sk_buff *skb)
6034 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6035 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6036 u8 *h192, *r192, *h256, *r256;
6037 struct mgmt_pending_cmd *cmd;
6041 BT_DBG("%s status %u", hdev->name, status);
6043 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6047 mgmt_cp = cmd->param;
6050 status = mgmt_status(status);
6057 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6058 struct hci_rp_read_local_oob_data *rp;
6060 if (skb->len != sizeof(*rp)) {
6061 status = MGMT_STATUS_FAILED;
6064 status = MGMT_STATUS_SUCCESS;
6065 rp = (void *)skb->data;
6067 eir_len = 5 + 18 + 18;
6074 struct hci_rp_read_local_oob_ext_data *rp;
6076 if (skb->len != sizeof(*rp)) {
6077 status = MGMT_STATUS_FAILED;
6080 status = MGMT_STATUS_SUCCESS;
6081 rp = (void *)skb->data;
6083 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6084 eir_len = 5 + 18 + 18;
6088 eir_len = 5 + 18 + 18 + 18 + 18;
6098 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6105 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6106 hdev->dev_class, 3);
6109 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6110 EIR_SSP_HASH_C192, h192, 16);
6111 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6112 EIR_SSP_RAND_R192, r192, 16);
6116 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6117 EIR_SSP_HASH_C256, h256, 16);
6118 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6119 EIR_SSP_RAND_R256, r256, 16);
6123 mgmt_rp->type = mgmt_cp->type;
6124 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6126 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6127 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6128 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6129 if (err < 0 || status)
6132 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6134 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6135 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6136 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6139 mgmt_pending_remove(cmd);
6142 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6143 struct mgmt_cp_read_local_oob_ext_data *cp)
6145 struct mgmt_pending_cmd *cmd;
6146 struct hci_request req;
6149 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6154 hci_req_init(&req, hdev);
6156 if (bredr_sc_enabled(hdev))
6157 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6159 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6161 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6163 mgmt_pending_remove(cmd);
6170 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6171 void *data, u16 data_len)
6173 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6174 struct mgmt_rp_read_local_oob_ext_data *rp;
6177 u8 status, flags, role, addr[7], hash[16], rand[16];
6180 BT_DBG("%s", hdev->name);
6182 if (hdev_is_powered(hdev)) {
6184 case BIT(BDADDR_BREDR):
6185 status = mgmt_bredr_support(hdev);
6191 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6192 status = mgmt_le_support(hdev);
6196 eir_len = 9 + 3 + 18 + 18 + 3;
6199 status = MGMT_STATUS_INVALID_PARAMS;
6204 status = MGMT_STATUS_NOT_POWERED;
6208 rp_len = sizeof(*rp) + eir_len;
6209 rp = kmalloc(rp_len, GFP_ATOMIC);
6220 case BIT(BDADDR_BREDR):
6221 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6222 err = read_local_ssp_oob_req(hdev, sk, cp);
6223 hci_dev_unlock(hdev);
6227 status = MGMT_STATUS_FAILED;
6230 eir_len = eir_append_data(rp->eir, eir_len,
6232 hdev->dev_class, 3);
6235 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6236 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6237 smp_generate_oob(hdev, hash, rand) < 0) {
6238 hci_dev_unlock(hdev);
6239 status = MGMT_STATUS_FAILED;
6243 /* This should return the active RPA, but since the RPA
6244 * is only programmed on demand, it is really hard to fill
6245 * this in at the moment. For now disallow retrieving
6246 * local out-of-band data when privacy is in use.
6248 * Returning the identity address will not help here since
6249 * pairing happens before the identity resolving key is
6250 * known and thus the connection establishment happens
6251 * based on the RPA and not the identity address.
6253 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6254 hci_dev_unlock(hdev);
6255 status = MGMT_STATUS_REJECTED;
6259 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6260 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6261 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6262 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6263 memcpy(addr, &hdev->static_addr, 6);
6266 memcpy(addr, &hdev->bdaddr, 6);
6270 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6271 addr, sizeof(addr));
6273 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6278 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6279 &role, sizeof(role));
6281 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6282 eir_len = eir_append_data(rp->eir, eir_len,
6284 hash, sizeof(hash));
6286 eir_len = eir_append_data(rp->eir, eir_len,
6288 rand, sizeof(rand));
6291 flags = mgmt_get_adv_discov_flags(hdev);
6293 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6294 flags |= LE_AD_NO_BREDR;
6296 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6297 &flags, sizeof(flags));
6301 hci_dev_unlock(hdev);
6303 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6305 status = MGMT_STATUS_SUCCESS;
6308 rp->type = cp->type;
6309 rp->eir_len = cpu_to_le16(eir_len);
6311 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6312 status, rp, sizeof(*rp) + eir_len);
6313 if (err < 0 || status)
6316 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6317 rp, sizeof(*rp) + eir_len,
6318 HCI_MGMT_OOB_DATA_EVENTS, sk);
6326 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6330 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6331 flags |= MGMT_ADV_FLAG_DISCOV;
6332 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6333 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6334 flags |= MGMT_ADV_FLAG_APPEARANCE;
6335 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6337 /* In extended adv TX_POWER returned from Set Adv Param
6338 * will be always valid.
6340 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6341 ext_adv_capable(hdev))
6342 flags |= MGMT_ADV_FLAG_TX_POWER;
6344 if (ext_adv_capable(hdev)) {
6345 flags |= MGMT_ADV_FLAG_SEC_1M;
6347 if (hdev->le_features[1] & HCI_LE_PHY_2M)
6348 flags |= MGMT_ADV_FLAG_SEC_2M;
6350 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6351 flags |= MGMT_ADV_FLAG_SEC_CODED;
6357 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6358 void *data, u16 data_len)
6360 struct mgmt_rp_read_adv_features *rp;
6363 struct adv_info *adv_instance;
6364 u32 supported_flags;
6367 BT_DBG("%s", hdev->name);
6369 if (!lmp_le_capable(hdev))
6370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6371 MGMT_STATUS_REJECTED);
6375 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6376 rp = kmalloc(rp_len, GFP_ATOMIC);
6378 hci_dev_unlock(hdev);
6382 supported_flags = get_supported_adv_flags(hdev);
6384 rp->supported_flags = cpu_to_le32(supported_flags);
6385 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6386 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6387 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6388 rp->num_instances = hdev->adv_instance_cnt;
6390 instance = rp->instance;
6391 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6392 *instance = adv_instance->instance;
6396 hci_dev_unlock(hdev);
6398 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6399 MGMT_STATUS_SUCCESS, rp, rp_len);
6406 static u8 calculate_name_len(struct hci_dev *hdev)
6408 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6410 return append_local_name(hdev, buf, 0);
6413 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6416 u8 max_len = HCI_MAX_AD_LENGTH;
6419 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6420 MGMT_ADV_FLAG_LIMITED_DISCOV |
6421 MGMT_ADV_FLAG_MANAGED_FLAGS))
6424 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6427 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6428 max_len -= calculate_name_len(hdev);
6430 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6437 static bool flags_managed(u32 adv_flags)
6439 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6440 MGMT_ADV_FLAG_LIMITED_DISCOV |
6441 MGMT_ADV_FLAG_MANAGED_FLAGS);
6444 static bool tx_power_managed(u32 adv_flags)
6446 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6449 static bool name_managed(u32 adv_flags)
6451 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6454 static bool appearance_managed(u32 adv_flags)
6456 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6459 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6460 u8 len, bool is_adv_data)
6465 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6470 /* Make sure that the data is correctly formatted. */
6471 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6477 if (data[i + 1] == EIR_FLAGS &&
6478 (!is_adv_data || flags_managed(adv_flags)))
6481 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6484 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6487 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6490 if (data[i + 1] == EIR_APPEARANCE &&
6491 appearance_managed(adv_flags))
6494 /* If the current field length would exceed the total data
6495 * length, then it's invalid.
6497 if (i + cur_len >= len)
6504 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6507 struct mgmt_pending_cmd *cmd;
6508 struct mgmt_cp_add_advertising *cp;
6509 struct mgmt_rp_add_advertising rp;
6510 struct adv_info *adv_instance, *n;
6513 BT_DBG("status %d", status);
6517 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6519 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6520 if (!adv_instance->pending)
6524 adv_instance->pending = false;
6528 instance = adv_instance->instance;
6530 if (hdev->cur_adv_instance == instance)
6531 cancel_adv_timeout(hdev);
6533 hci_remove_adv_instance(hdev, instance);
6534 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6541 rp.instance = cp->instance;
6544 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6545 mgmt_status(status));
6547 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6548 mgmt_status(status), &rp, sizeof(rp));
6550 mgmt_pending_remove(cmd);
6553 hci_dev_unlock(hdev);
6556 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6557 void *data, u16 data_len)
6559 struct mgmt_cp_add_advertising *cp = data;
6560 struct mgmt_rp_add_advertising rp;
6562 u32 supported_flags, phy_flags;
6564 u16 timeout, duration;
6565 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6566 u8 schedule_instance = 0;
6567 struct adv_info *next_instance;
6569 struct mgmt_pending_cmd *cmd;
6570 struct hci_request req;
6572 BT_DBG("%s", hdev->name);
6574 status = mgmt_le_support(hdev);
6576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6579 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6581 MGMT_STATUS_INVALID_PARAMS);
6583 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6585 MGMT_STATUS_INVALID_PARAMS);
6587 flags = __le32_to_cpu(cp->flags);
6588 timeout = __le16_to_cpu(cp->timeout);
6589 duration = __le16_to_cpu(cp->duration);
6591 /* The current implementation only supports a subset of the specified
6592 * flags. Also need to check mutual exclusiveness of sec flags.
6594 supported_flags = get_supported_adv_flags(hdev);
6595 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6596 if (flags & ~supported_flags ||
6597 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6598 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6599 MGMT_STATUS_INVALID_PARAMS);
6603 if (timeout && !hdev_is_powered(hdev)) {
6604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6605 MGMT_STATUS_REJECTED);
6609 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6610 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6611 pending_find(MGMT_OP_SET_LE, hdev)) {
6612 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6617 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6618 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6619 cp->scan_rsp_len, false)) {
6620 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6621 MGMT_STATUS_INVALID_PARAMS);
6625 err = hci_add_adv_instance(hdev, cp->instance, flags,
6626 cp->adv_data_len, cp->data,
6628 cp->data + cp->adv_data_len,
6631 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6632 MGMT_STATUS_FAILED);
6636 /* Only trigger an advertising added event if a new instance was
6639 if (hdev->adv_instance_cnt > prev_instance_cnt)
6640 mgmt_advertising_added(sk, hdev, cp->instance);
6642 if (hdev->cur_adv_instance == cp->instance) {
6643 /* If the currently advertised instance is being changed then
6644 * cancel the current advertising and schedule the next
6645 * instance. If there is only one instance then the overridden
6646 * advertising data will be visible right away.
6648 cancel_adv_timeout(hdev);
6650 next_instance = hci_get_next_instance(hdev, cp->instance);
6652 schedule_instance = next_instance->instance;
6653 } else if (!hdev->adv_instance_timeout) {
6654 /* Immediately advertise the new instance if no other
6655 * instance is currently being advertised.
6657 schedule_instance = cp->instance;
6660 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6661 * there is no instance to be advertised then we have no HCI
6662 * communication to make. Simply return.
6664 if (!hdev_is_powered(hdev) ||
6665 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6666 !schedule_instance) {
6667 rp.instance = cp->instance;
6668 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6669 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6673 /* We're good to go, update advertising data, parameters, and start
6676 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6683 hci_req_init(&req, hdev);
6685 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6688 err = hci_req_run(&req, add_advertising_complete);
6691 mgmt_pending_remove(cmd);
6694 hci_dev_unlock(hdev);
6699 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6702 struct mgmt_pending_cmd *cmd;
6703 struct mgmt_cp_remove_advertising *cp;
6704 struct mgmt_rp_remove_advertising rp;
6706 BT_DBG("status %d", status);
6710 /* A failure status here only means that we failed to disable
6711 * advertising. Otherwise, the advertising instance has been removed,
6712 * so report success.
6714 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6719 rp.instance = cp->instance;
6721 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6723 mgmt_pending_remove(cmd);
6726 hci_dev_unlock(hdev);
6729 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6730 void *data, u16 data_len)
6732 struct mgmt_cp_remove_advertising *cp = data;
6733 struct mgmt_rp_remove_advertising rp;
6734 struct mgmt_pending_cmd *cmd;
6735 struct hci_request req;
6738 BT_DBG("%s", hdev->name);
6742 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6743 err = mgmt_cmd_status(sk, hdev->id,
6744 MGMT_OP_REMOVE_ADVERTISING,
6745 MGMT_STATUS_INVALID_PARAMS);
6749 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6750 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6751 pending_find(MGMT_OP_SET_LE, hdev)) {
6752 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6757 if (list_empty(&hdev->adv_instances)) {
6758 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6759 MGMT_STATUS_INVALID_PARAMS);
6763 hci_req_init(&req, hdev);
6765 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6767 if (list_empty(&hdev->adv_instances))
6768 __hci_req_disable_advertising(&req);
6770 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6771 * flag is set or the device isn't powered then we have no HCI
6772 * communication to make. Simply return.
6774 if (skb_queue_empty(&req.cmd_q) ||
6775 !hdev_is_powered(hdev) ||
6776 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6777 hci_req_purge(&req);
6778 rp.instance = cp->instance;
6779 err = mgmt_cmd_complete(sk, hdev->id,
6780 MGMT_OP_REMOVE_ADVERTISING,
6781 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6785 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6792 err = hci_req_run(&req, remove_advertising_complete);
6794 mgmt_pending_remove(cmd);
6797 hci_dev_unlock(hdev);
6802 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6803 void *data, u16 data_len)
6805 struct mgmt_cp_get_adv_size_info *cp = data;
6806 struct mgmt_rp_get_adv_size_info rp;
6807 u32 flags, supported_flags;
6810 BT_DBG("%s", hdev->name);
6812 if (!lmp_le_capable(hdev))
6813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6814 MGMT_STATUS_REJECTED);
6816 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6818 MGMT_STATUS_INVALID_PARAMS);
6820 flags = __le32_to_cpu(cp->flags);
6822 /* The current implementation only supports a subset of the specified
6825 supported_flags = get_supported_adv_flags(hdev);
6826 if (flags & ~supported_flags)
6827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6828 MGMT_STATUS_INVALID_PARAMS);
6830 rp.instance = cp->instance;
6831 rp.flags = cp->flags;
6832 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6833 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6835 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6836 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6841 static const struct hci_mgmt_handler mgmt_handlers[] = {
6842 { NULL }, /* 0x0000 (no command) */
6843 { read_version, MGMT_READ_VERSION_SIZE,
6845 HCI_MGMT_UNTRUSTED },
6846 { read_commands, MGMT_READ_COMMANDS_SIZE,
6848 HCI_MGMT_UNTRUSTED },
6849 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6851 HCI_MGMT_UNTRUSTED },
6852 { read_controller_info, MGMT_READ_INFO_SIZE,
6853 HCI_MGMT_UNTRUSTED },
6854 { set_powered, MGMT_SETTING_SIZE },
6855 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6856 { set_connectable, MGMT_SETTING_SIZE },
6857 { set_fast_connectable, MGMT_SETTING_SIZE },
6858 { set_bondable, MGMT_SETTING_SIZE },
6859 { set_link_security, MGMT_SETTING_SIZE },
6860 { set_ssp, MGMT_SETTING_SIZE },
6861 { set_hs, MGMT_SETTING_SIZE },
6862 { set_le, MGMT_SETTING_SIZE },
6863 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6864 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6865 { add_uuid, MGMT_ADD_UUID_SIZE },
6866 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6867 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6869 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6871 { disconnect, MGMT_DISCONNECT_SIZE },
6872 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6873 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6874 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6875 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6876 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6877 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6878 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6879 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6880 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6881 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6882 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6883 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6884 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6886 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6887 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6888 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6889 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6890 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6891 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6892 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6893 { set_advertising, MGMT_SETTING_SIZE },
6894 { set_bredr, MGMT_SETTING_SIZE },
6895 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6896 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6897 { set_secure_conn, MGMT_SETTING_SIZE },
6898 { set_debug_keys, MGMT_SETTING_SIZE },
6899 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6900 { load_irks, MGMT_LOAD_IRKS_SIZE,
6902 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6903 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6904 { add_device, MGMT_ADD_DEVICE_SIZE },
6905 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6906 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6908 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6910 HCI_MGMT_UNTRUSTED },
6911 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6912 HCI_MGMT_UNCONFIGURED |
6913 HCI_MGMT_UNTRUSTED },
6914 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6915 HCI_MGMT_UNCONFIGURED },
6916 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6917 HCI_MGMT_UNCONFIGURED },
6918 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6920 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6921 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6923 HCI_MGMT_UNTRUSTED },
6924 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6925 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6927 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6928 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6929 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6930 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6931 HCI_MGMT_UNTRUSTED },
6932 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
6933 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
6934 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
6937 void mgmt_index_added(struct hci_dev *hdev)
6939 struct mgmt_ev_ext_index ev;
6941 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6944 switch (hdev->dev_type) {
6946 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6947 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6948 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6951 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6952 HCI_MGMT_INDEX_EVENTS);
6965 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6966 HCI_MGMT_EXT_INDEX_EVENTS);
6969 void mgmt_index_removed(struct hci_dev *hdev)
6971 struct mgmt_ev_ext_index ev;
6972 u8 status = MGMT_STATUS_INVALID_INDEX;
6974 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6977 switch (hdev->dev_type) {
6979 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6981 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6982 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6983 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6986 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6987 HCI_MGMT_INDEX_EVENTS);
7000 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7001 HCI_MGMT_EXT_INDEX_EVENTS);
7004 /* This function requires the caller holds hdev->lock */
7005 static void restart_le_actions(struct hci_dev *hdev)
7007 struct hci_conn_params *p;
7009 list_for_each_entry(p, &hdev->le_conn_params, list) {
7010 /* Needed for AUTO_OFF case where might not "really"
7011 * have been powered off.
7013 list_del_init(&p->action);
7015 switch (p->auto_connect) {
7016 case HCI_AUTO_CONN_DIRECT:
7017 case HCI_AUTO_CONN_ALWAYS:
7018 list_add(&p->action, &hdev->pend_le_conns);
7020 case HCI_AUTO_CONN_REPORT:
7021 list_add(&p->action, &hdev->pend_le_reports);
7029 void mgmt_power_on(struct hci_dev *hdev, int err)
7031 struct cmd_lookup match = { NULL, hdev };
7033 BT_DBG("err %d", err);
7038 restart_le_actions(hdev);
7039 hci_update_background_scan(hdev);
7042 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7044 new_settings(hdev, match.sk);
7049 hci_dev_unlock(hdev);
7052 void __mgmt_power_off(struct hci_dev *hdev)
7054 struct cmd_lookup match = { NULL, hdev };
7055 u8 status, zero_cod[] = { 0, 0, 0 };
7057 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7059 /* If the power off is because of hdev unregistration let
7060 * use the appropriate INVALID_INDEX status. Otherwise use
7061 * NOT_POWERED. We cover both scenarios here since later in
7062 * mgmt_index_removed() any hci_conn callbacks will have already
7063 * been triggered, potentially causing misleading DISCONNECTED
7066 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7067 status = MGMT_STATUS_INVALID_INDEX;
7069 status = MGMT_STATUS_NOT_POWERED;
7071 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7073 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7074 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7075 zero_cod, sizeof(zero_cod),
7076 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7077 ext_info_changed(hdev, NULL);
7080 new_settings(hdev, match.sk);
7086 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7088 struct mgmt_pending_cmd *cmd;
7091 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7095 if (err == -ERFKILL)
7096 status = MGMT_STATUS_RFKILLED;
7098 status = MGMT_STATUS_FAILED;
7100 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7102 mgmt_pending_remove(cmd);
7105 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7108 struct mgmt_ev_new_link_key ev;
7110 memset(&ev, 0, sizeof(ev));
7112 ev.store_hint = persistent;
7113 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7114 ev.key.addr.type = BDADDR_BREDR;
7115 ev.key.type = key->type;
7116 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7117 ev.key.pin_len = key->pin_len;
7119 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7122 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7124 switch (ltk->type) {
7127 if (ltk->authenticated)
7128 return MGMT_LTK_AUTHENTICATED;
7129 return MGMT_LTK_UNAUTHENTICATED;
7131 if (ltk->authenticated)
7132 return MGMT_LTK_P256_AUTH;
7133 return MGMT_LTK_P256_UNAUTH;
7134 case SMP_LTK_P256_DEBUG:
7135 return MGMT_LTK_P256_DEBUG;
7138 return MGMT_LTK_UNAUTHENTICATED;
7141 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7143 struct mgmt_ev_new_long_term_key ev;
7145 memset(&ev, 0, sizeof(ev));
7147 /* Devices using resolvable or non-resolvable random addresses
7148 * without providing an identity resolving key don't require
7149 * to store long term keys. Their addresses will change the
7152 * Only when a remote device provides an identity address
7153 * make sure the long term key is stored. If the remote
7154 * identity is known, the long term keys are internally
7155 * mapped to the identity address. So allow static random
7156 * and public addresses here.
7158 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7159 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7160 ev.store_hint = 0x00;
7162 ev.store_hint = persistent;
7164 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7165 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7166 ev.key.type = mgmt_ltk_type(key);
7167 ev.key.enc_size = key->enc_size;
7168 ev.key.ediv = key->ediv;
7169 ev.key.rand = key->rand;
7171 if (key->type == SMP_LTK)
7174 /* Make sure we copy only the significant bytes based on the
7175 * encryption key size, and set the rest of the value to zeroes.
7177 memcpy(ev.key.val, key->val, key->enc_size);
7178 memset(ev.key.val + key->enc_size, 0,
7179 sizeof(ev.key.val) - key->enc_size);
7181 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7184 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7186 struct mgmt_ev_new_irk ev;
7188 memset(&ev, 0, sizeof(ev));
7190 ev.store_hint = persistent;
7192 bacpy(&ev.rpa, &irk->rpa);
7193 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7194 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7195 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7197 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7200 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7203 struct mgmt_ev_new_csrk ev;
7205 memset(&ev, 0, sizeof(ev));
7207 /* Devices using resolvable or non-resolvable random addresses
7208 * without providing an identity resolving key don't require
7209 * to store signature resolving keys. Their addresses will change
7210 * the next time around.
7212 * Only when a remote device provides an identity address
7213 * make sure the signature resolving key is stored. So allow
7214 * static random and public addresses here.
7216 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7217 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7218 ev.store_hint = 0x00;
7220 ev.store_hint = persistent;
7222 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7223 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7224 ev.key.type = csrk->type;
7225 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7227 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7230 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7231 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7232 u16 max_interval, u16 latency, u16 timeout)
7234 struct mgmt_ev_new_conn_param ev;
7236 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7239 memset(&ev, 0, sizeof(ev));
7240 bacpy(&ev.addr.bdaddr, bdaddr);
7241 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7242 ev.store_hint = store_hint;
7243 ev.min_interval = cpu_to_le16(min_interval);
7244 ev.max_interval = cpu_to_le16(max_interval);
7245 ev.latency = cpu_to_le16(latency);
7246 ev.timeout = cpu_to_le16(timeout);
7248 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7251 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7252 u32 flags, u8 *name, u8 name_len)
7255 struct mgmt_ev_device_connected *ev = (void *) buf;
7258 bacpy(&ev->addr.bdaddr, &conn->dst);
7259 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7261 ev->flags = __cpu_to_le32(flags);
7263 /* We must ensure that the EIR Data fields are ordered and
7264 * unique. Keep it simple for now and avoid the problem by not
7265 * adding any BR/EDR data to the LE adv.
7267 if (conn->le_adv_data_len > 0) {
7268 memcpy(&ev->eir[eir_len],
7269 conn->le_adv_data, conn->le_adv_data_len);
7270 eir_len = conn->le_adv_data_len;
7273 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7276 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7277 eir_len = eir_append_data(ev->eir, eir_len,
7279 conn->dev_class, 3);
7282 ev->eir_len = cpu_to_le16(eir_len);
7284 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7285 sizeof(*ev) + eir_len, NULL);
7288 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7290 struct sock **sk = data;
7292 cmd->cmd_complete(cmd, 0);
7297 mgmt_pending_remove(cmd);
7300 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7302 struct hci_dev *hdev = data;
7303 struct mgmt_cp_unpair_device *cp = cmd->param;
7305 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7307 cmd->cmd_complete(cmd, 0);
7308 mgmt_pending_remove(cmd);
7311 bool mgmt_powering_down(struct hci_dev *hdev)
7313 struct mgmt_pending_cmd *cmd;
7314 struct mgmt_mode *cp;
7316 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7327 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7328 u8 link_type, u8 addr_type, u8 reason,
7329 bool mgmt_connected)
7331 struct mgmt_ev_device_disconnected ev;
7332 struct sock *sk = NULL;
7334 /* The connection is still in hci_conn_hash so test for 1
7335 * instead of 0 to know if this is the last one.
7337 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7338 cancel_delayed_work(&hdev->power_off);
7339 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7342 if (!mgmt_connected)
7345 if (link_type != ACL_LINK && link_type != LE_LINK)
7348 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7350 bacpy(&ev.addr.bdaddr, bdaddr);
7351 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7354 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7359 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7363 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7364 u8 link_type, u8 addr_type, u8 status)
7366 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7367 struct mgmt_cp_disconnect *cp;
7368 struct mgmt_pending_cmd *cmd;
7370 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7373 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7379 if (bacmp(bdaddr, &cp->addr.bdaddr))
7382 if (cp->addr.type != bdaddr_type)
7385 cmd->cmd_complete(cmd, mgmt_status(status));
7386 mgmt_pending_remove(cmd);
7389 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7390 u8 addr_type, u8 status)
7392 struct mgmt_ev_connect_failed ev;
7394 /* The connection is still in hci_conn_hash so test for 1
7395 * instead of 0 to know if this is the last one.
7397 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7398 cancel_delayed_work(&hdev->power_off);
7399 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7402 bacpy(&ev.addr.bdaddr, bdaddr);
7403 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7404 ev.status = mgmt_status(status);
7406 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7409 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7411 struct mgmt_ev_pin_code_request ev;
7413 bacpy(&ev.addr.bdaddr, bdaddr);
7414 ev.addr.type = BDADDR_BREDR;
7417 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7420 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7423 struct mgmt_pending_cmd *cmd;
7425 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7429 cmd->cmd_complete(cmd, mgmt_status(status));
7430 mgmt_pending_remove(cmd);
7433 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7436 struct mgmt_pending_cmd *cmd;
7438 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7442 cmd->cmd_complete(cmd, mgmt_status(status));
7443 mgmt_pending_remove(cmd);
7446 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7447 u8 link_type, u8 addr_type, u32 value,
7450 struct mgmt_ev_user_confirm_request ev;
7452 BT_DBG("%s", hdev->name);
7454 bacpy(&ev.addr.bdaddr, bdaddr);
7455 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7456 ev.confirm_hint = confirm_hint;
7457 ev.value = cpu_to_le32(value);
7459 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7463 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7464 u8 link_type, u8 addr_type)
7466 struct mgmt_ev_user_passkey_request ev;
7468 BT_DBG("%s", hdev->name);
7470 bacpy(&ev.addr.bdaddr, bdaddr);
7471 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7473 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7477 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7478 u8 link_type, u8 addr_type, u8 status,
7481 struct mgmt_pending_cmd *cmd;
7483 cmd = pending_find(opcode, hdev);
7487 cmd->cmd_complete(cmd, mgmt_status(status));
7488 mgmt_pending_remove(cmd);
7493 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7494 u8 link_type, u8 addr_type, u8 status)
7496 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7497 status, MGMT_OP_USER_CONFIRM_REPLY);
7500 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7501 u8 link_type, u8 addr_type, u8 status)
7503 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7505 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7508 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7509 u8 link_type, u8 addr_type, u8 status)
7511 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7512 status, MGMT_OP_USER_PASSKEY_REPLY);
7515 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7516 u8 link_type, u8 addr_type, u8 status)
7518 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7520 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7523 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7524 u8 link_type, u8 addr_type, u32 passkey,
7527 struct mgmt_ev_passkey_notify ev;
7529 BT_DBG("%s", hdev->name);
7531 bacpy(&ev.addr.bdaddr, bdaddr);
7532 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7533 ev.passkey = __cpu_to_le32(passkey);
7534 ev.entered = entered;
7536 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7539 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7541 struct mgmt_ev_auth_failed ev;
7542 struct mgmt_pending_cmd *cmd;
7543 u8 status = mgmt_status(hci_status);
7545 bacpy(&ev.addr.bdaddr, &conn->dst);
7546 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7549 cmd = find_pairing(conn);
7551 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7552 cmd ? cmd->sk : NULL);
7555 cmd->cmd_complete(cmd, status);
7556 mgmt_pending_remove(cmd);
7560 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7562 struct cmd_lookup match = { NULL, hdev };
7566 u8 mgmt_err = mgmt_status(status);
7567 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7568 cmd_status_rsp, &mgmt_err);
7572 if (test_bit(HCI_AUTH, &hdev->flags))
7573 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7575 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7577 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7581 new_settings(hdev, match.sk);
7587 static void clear_eir(struct hci_request *req)
7589 struct hci_dev *hdev = req->hdev;
7590 struct hci_cp_write_eir cp;
7592 if (!lmp_ext_inq_capable(hdev))
7595 memset(hdev->eir, 0, sizeof(hdev->eir));
7597 memset(&cp, 0, sizeof(cp));
7599 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7602 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7604 struct cmd_lookup match = { NULL, hdev };
7605 struct hci_request req;
7606 bool changed = false;
7609 u8 mgmt_err = mgmt_status(status);
7611 if (enable && hci_dev_test_and_clear_flag(hdev,
7613 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7614 new_settings(hdev, NULL);
7617 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7623 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7625 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7627 changed = hci_dev_test_and_clear_flag(hdev,
7630 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7633 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7636 new_settings(hdev, match.sk);
7641 hci_req_init(&req, hdev);
7643 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7644 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7645 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7646 sizeof(enable), &enable);
7647 __hci_req_update_eir(&req);
7652 hci_req_run(&req, NULL);
7655 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7657 struct cmd_lookup *match = data;
7659 if (match->sk == NULL) {
7660 match->sk = cmd->sk;
7661 sock_hold(match->sk);
7665 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7668 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7670 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7671 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7672 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7675 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7676 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7677 ext_info_changed(hdev, NULL);
7684 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7686 struct mgmt_cp_set_local_name ev;
7687 struct mgmt_pending_cmd *cmd;
7692 memset(&ev, 0, sizeof(ev));
7693 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7694 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7696 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7698 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7700 /* If this is a HCI command related to powering on the
7701 * HCI dev don't send any mgmt signals.
7703 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7707 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7708 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7709 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7712 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7716 for (i = 0; i < uuid_count; i++) {
7717 if (!memcmp(uuid, uuids[i], 16))
7724 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7728 while (parsed < eir_len) {
7729 u8 field_len = eir[0];
7736 if (eir_len - parsed < field_len + 1)
7740 case EIR_UUID16_ALL:
7741 case EIR_UUID16_SOME:
7742 for (i = 0; i + 3 <= field_len; i += 2) {
7743 memcpy(uuid, bluetooth_base_uuid, 16);
7744 uuid[13] = eir[i + 3];
7745 uuid[12] = eir[i + 2];
7746 if (has_uuid(uuid, uuid_count, uuids))
7750 case EIR_UUID32_ALL:
7751 case EIR_UUID32_SOME:
7752 for (i = 0; i + 5 <= field_len; i += 4) {
7753 memcpy(uuid, bluetooth_base_uuid, 16);
7754 uuid[15] = eir[i + 5];
7755 uuid[14] = eir[i + 4];
7756 uuid[13] = eir[i + 3];
7757 uuid[12] = eir[i + 2];
7758 if (has_uuid(uuid, uuid_count, uuids))
7762 case EIR_UUID128_ALL:
7763 case EIR_UUID128_SOME:
7764 for (i = 0; i + 17 <= field_len; i += 16) {
7765 memcpy(uuid, eir + i + 2, 16);
7766 if (has_uuid(uuid, uuid_count, uuids))
7772 parsed += field_len + 1;
7773 eir += field_len + 1;
7779 static void restart_le_scan(struct hci_dev *hdev)
7781 /* If controller is not scanning we are done. */
7782 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7785 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7786 hdev->discovery.scan_start +
7787 hdev->discovery.scan_duration))
7790 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7791 DISCOV_LE_RESTART_DELAY);
7794 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7795 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7797 /* If a RSSI threshold has been specified, and
7798 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7799 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7800 * is set, let it through for further processing, as we might need to
7803 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7804 * the results are also dropped.
7806 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7807 (rssi == HCI_RSSI_INVALID ||
7808 (rssi < hdev->discovery.rssi &&
7809 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7812 if (hdev->discovery.uuid_count != 0) {
7813 /* If a list of UUIDs is provided in filter, results with no
7814 * matching UUID should be dropped.
7816 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7817 hdev->discovery.uuids) &&
7818 !eir_has_uuids(scan_rsp, scan_rsp_len,
7819 hdev->discovery.uuid_count,
7820 hdev->discovery.uuids))
7824 /* If duplicate filtering does not report RSSI changes, then restart
7825 * scanning to ensure updated result with updated RSSI values.
7827 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7828 restart_le_scan(hdev);
7830 /* Validate RSSI value against the RSSI threshold once more. */
7831 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7832 rssi < hdev->discovery.rssi)
7839 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7840 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7841 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7844 struct mgmt_ev_device_found *ev = (void *)buf;
7847 /* Don't send events for a non-kernel initiated discovery. With
7848 * LE one exception is if we have pend_le_reports > 0 in which
7849 * case we're doing passive scanning and want these events.
7851 if (!hci_discovery_active(hdev)) {
7852 if (link_type == ACL_LINK)
7854 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7858 if (hdev->discovery.result_filtering) {
7859 /* We are using service discovery */
7860 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7865 if (hdev->discovery.limited) {
7866 /* Check for limited discoverable bit */
7868 if (!(dev_class[1] & 0x20))
7871 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7872 if (!flags || !(flags[0] & LE_AD_LIMITED))
7877 /* Make sure that the buffer is big enough. The 5 extra bytes
7878 * are for the potential CoD field.
7880 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7883 memset(buf, 0, sizeof(buf));
7885 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7886 * RSSI value was reported as 0 when not available. This behavior
7887 * is kept when using device discovery. This is required for full
7888 * backwards compatibility with the API.
7890 * However when using service discovery, the value 127 will be
7891 * returned when the RSSI is not available.
7893 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7894 link_type == ACL_LINK)
7897 bacpy(&ev->addr.bdaddr, bdaddr);
7898 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7900 ev->flags = cpu_to_le32(flags);
7903 /* Copy EIR or advertising data into event */
7904 memcpy(ev->eir, eir, eir_len);
7906 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7908 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7911 if (scan_rsp_len > 0)
7912 /* Append scan response data to event */
7913 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7915 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7916 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7918 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7921 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7922 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7924 struct mgmt_ev_device_found *ev;
7925 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7928 ev = (struct mgmt_ev_device_found *) buf;
7930 memset(buf, 0, sizeof(buf));
7932 bacpy(&ev->addr.bdaddr, bdaddr);
7933 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7936 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7939 ev->eir_len = cpu_to_le16(eir_len);
7941 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7944 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7946 struct mgmt_ev_discovering ev;
7948 BT_DBG("%s discovering %u", hdev->name, discovering);
7950 memset(&ev, 0, sizeof(ev));
7951 ev.type = hdev->discovery.type;
7952 ev.discovering = discovering;
7954 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7957 static struct hci_mgmt_chan chan = {
7958 .channel = HCI_CHANNEL_CONTROL,
7959 .handler_count = ARRAY_SIZE(mgmt_handlers),
7960 .handlers = mgmt_handlers,
7961 .hdev_init = mgmt_init_hdev,
7966 return hci_mgmt_chan_register(&chan);
7969 void mgmt_exit(void)
7971 hci_mgmt_chan_unregister(&chan);