2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
42 static DEFINE_IDA(sock_cookie_ida);
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter;
56 unsigned short channel;
59 char comm[TASK_COMM_LEN];
63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
65 struct hci_dev *hdev = hci_pi(sk)->hdev;
68 return ERR_PTR(-EBADFD);
69 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70 return ERR_PTR(-EPIPE);
74 void hci_sock_set_flag(struct sock *sk, int nr)
76 set_bit(nr, &hci_pi(sk)->flags);
79 void hci_sock_clear_flag(struct sock *sk, int nr)
81 clear_bit(nr, &hci_pi(sk)->flags);
84 int hci_sock_test_flag(struct sock *sk, int nr)
86 return test_bit(nr, &hci_pi(sk)->flags);
89 unsigned short hci_sock_get_channel(struct sock *sk)
91 return hci_pi(sk)->channel;
94 u32 hci_sock_get_cookie(struct sock *sk)
96 return hci_pi(sk)->cookie;
99 static bool hci_sock_gen_cookie(struct sock *sk)
101 int id = hci_pi(sk)->cookie;
104 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
108 hci_pi(sk)->cookie = id;
109 get_task_comm(hci_pi(sk)->comm, current);
116 static void hci_sock_free_cookie(struct sock *sk)
118 int id = hci_pi(sk)->cookie;
121 hci_pi(sk)->cookie = 0xffffffff;
122 ida_simple_remove(&sock_cookie_ida, id);
126 static inline int hci_test_bit(int nr, const void *addr)
128 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
131 /* Security filter */
132 #define HCI_SFLT_MAX_OGF 5
134 struct hci_sec_filter {
137 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
140 static const struct hci_sec_filter hci_sec_filter = {
144 { 0x1000d9fe, 0x0000b00c },
149 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150 /* OGF_LINK_POLICY */
151 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
153 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
155 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
156 /* OGF_STATUS_PARAM */
157 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
161 static struct bt_sock_list hci_sk_list = {
162 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
167 struct hci_filter *flt;
168 int flt_type, flt_event;
171 flt = &hci_pi(sk)->filter;
173 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
175 if (!test_bit(flt_type, &flt->type_mask))
178 /* Extra filter for event packets only */
179 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
182 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
184 if (!hci_test_bit(flt_event, &flt->event_mask))
187 /* Check filter only when opcode is set */
191 if (flt_event == HCI_EV_CMD_COMPLETE &&
192 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
195 if (flt_event == HCI_EV_CMD_STATUS &&
196 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
202 /* Send frame to RAW socket */
203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
206 struct sk_buff *skb_copy = NULL;
208 BT_DBG("hdev %p len %d", hdev, skb->len);
210 read_lock(&hci_sk_list.lock);
212 sk_for_each(sk, &hci_sk_list.head) {
213 struct sk_buff *nskb;
215 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
218 /* Don't send frame to the socket it came from */
222 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
229 if (is_filtered_packet(sk, skb))
231 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232 if (!bt_cb(skb)->incoming)
234 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
240 /* Don't send frame to other channel types */
245 /* Create a private copy with headroom */
246 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
250 /* Put type byte before the data */
251 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
254 nskb = skb_clone(skb_copy, GFP_ATOMIC);
258 if (sock_queue_rcv_skb(sk, nskb))
262 read_unlock(&hci_sk_list.lock);
267 /* Send frame to sockets with specific channel */
268 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
269 int flag, struct sock *skip_sk)
273 BT_DBG("channel %u len %d", channel, skb->len);
275 sk_for_each(sk, &hci_sk_list.head) {
276 struct sk_buff *nskb;
278 /* Ignore socket without the flag set */
279 if (!hci_sock_test_flag(sk, flag))
282 /* Skip the original socket */
286 if (sk->sk_state != BT_BOUND)
289 if (hci_pi(sk)->channel != channel)
292 nskb = skb_clone(skb, GFP_ATOMIC);
296 if (sock_queue_rcv_skb(sk, nskb))
302 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
303 int flag, struct sock *skip_sk)
305 read_lock(&hci_sk_list.lock);
306 __hci_send_to_channel(channel, skb, flag, skip_sk);
307 read_unlock(&hci_sk_list.lock);
310 /* Send frame to monitor socket */
311 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
313 struct sk_buff *skb_copy = NULL;
314 struct hci_mon_hdr *hdr;
317 if (!atomic_read(&monitor_promisc))
320 BT_DBG("hdev %p len %d", hdev, skb->len);
322 switch (hci_skb_pkt_type(skb)) {
323 case HCI_COMMAND_PKT:
324 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
327 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
329 case HCI_ACLDATA_PKT:
330 if (bt_cb(skb)->incoming)
331 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
333 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
335 case HCI_SCODATA_PKT:
336 if (bt_cb(skb)->incoming)
337 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
339 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
341 case HCI_ISODATA_PKT:
342 if (bt_cb(skb)->incoming)
343 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
345 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
348 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
354 /* Create a private copy with headroom */
355 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
359 /* Put header before the data */
360 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
361 hdr->opcode = opcode;
362 hdr->index = cpu_to_le16(hdev->id);
363 hdr->len = cpu_to_le16(skb->len);
365 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
366 HCI_SOCK_TRUSTED, NULL);
370 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
371 void *data, u16 data_len, ktime_t tstamp,
372 int flag, struct sock *skip_sk)
378 index = cpu_to_le16(hdev->id);
380 index = cpu_to_le16(MGMT_INDEX_NONE);
382 read_lock(&hci_sk_list.lock);
384 sk_for_each(sk, &hci_sk_list.head) {
385 struct hci_mon_hdr *hdr;
388 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
391 /* Ignore socket without the flag set */
392 if (!hci_sock_test_flag(sk, flag))
395 /* Skip the original socket */
399 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
403 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
404 put_unaligned_le16(event, skb_put(skb, 2));
407 skb_put_data(skb, data, data_len);
409 skb->tstamp = tstamp;
411 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
412 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
414 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
416 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
417 HCI_SOCK_TRUSTED, NULL);
421 read_unlock(&hci_sk_list.lock);
424 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
426 struct hci_mon_hdr *hdr;
427 struct hci_mon_new_index *ni;
428 struct hci_mon_index_info *ii;
434 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
438 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
439 ni->type = hdev->dev_type;
441 bacpy(&ni->bdaddr, &hdev->bdaddr);
442 memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
443 strnlen(hdev->name, sizeof(ni->name)), '\0');
445 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
449 skb = bt_skb_alloc(0, GFP_ATOMIC);
453 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
457 if (hdev->manufacturer == 0xffff)
462 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
466 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
467 bacpy(&ii->bdaddr, &hdev->bdaddr);
468 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
470 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
474 skb = bt_skb_alloc(0, GFP_ATOMIC);
478 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
482 skb = bt_skb_alloc(0, GFP_ATOMIC);
486 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
493 __net_timestamp(skb);
495 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
496 hdr->opcode = opcode;
497 hdr->index = cpu_to_le16(hdev->id);
498 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
503 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
505 struct hci_mon_hdr *hdr;
511 /* No message needed when cookie is not present */
512 if (!hci_pi(sk)->cookie)
515 switch (hci_pi(sk)->channel) {
516 case HCI_CHANNEL_RAW:
518 ver[0] = BT_SUBSYS_VERSION;
519 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
521 case HCI_CHANNEL_USER:
523 ver[0] = BT_SUBSYS_VERSION;
524 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
526 case HCI_CHANNEL_CONTROL:
528 mgmt_fill_version_info(ver);
531 /* No message for unsupported format */
535 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
539 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
541 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
542 put_unaligned_le16(format, skb_put(skb, 2));
543 skb_put_data(skb, ver, sizeof(ver));
544 put_unaligned_le32(flags, skb_put(skb, 4));
545 skb_put_u8(skb, TASK_COMM_LEN);
546 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
548 __net_timestamp(skb);
550 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
551 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
552 if (hci_pi(sk)->hdev)
553 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
555 hdr->index = cpu_to_le16(HCI_DEV_NONE);
556 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
561 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
563 struct hci_mon_hdr *hdr;
566 /* No message needed when cookie is not present */
567 if (!hci_pi(sk)->cookie)
570 switch (hci_pi(sk)->channel) {
571 case HCI_CHANNEL_RAW:
572 case HCI_CHANNEL_USER:
573 case HCI_CHANNEL_CONTROL:
576 /* No message for unsupported format */
580 skb = bt_skb_alloc(4, GFP_ATOMIC);
584 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
586 __net_timestamp(skb);
588 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
589 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
590 if (hci_pi(sk)->hdev)
591 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
593 hdr->index = cpu_to_le16(HCI_DEV_NONE);
594 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
599 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
603 struct hci_mon_hdr *hdr;
606 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
610 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
611 put_unaligned_le16(opcode, skb_put(skb, 2));
614 skb_put_data(skb, buf, len);
616 __net_timestamp(skb);
618 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
619 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
620 hdr->index = cpu_to_le16(index);
621 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
626 static void __printf(2, 3)
627 send_monitor_note(struct sock *sk, const char *fmt, ...)
630 struct hci_mon_hdr *hdr;
635 len = vsnprintf(NULL, 0, fmt, args);
638 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
643 vsprintf(skb_put(skb, len), fmt, args);
644 *(u8 *)skb_put(skb, 1) = 0;
647 __net_timestamp(skb);
649 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
650 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
651 hdr->index = cpu_to_le16(HCI_DEV_NONE);
652 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
654 if (sock_queue_rcv_skb(sk, skb))
658 static void send_monitor_replay(struct sock *sk)
660 struct hci_dev *hdev;
662 read_lock(&hci_dev_list_lock);
664 list_for_each_entry(hdev, &hci_dev_list, list) {
667 skb = create_monitor_event(hdev, HCI_DEV_REG);
671 if (sock_queue_rcv_skb(sk, skb))
674 if (!test_bit(HCI_RUNNING, &hdev->flags))
677 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
681 if (sock_queue_rcv_skb(sk, skb))
684 if (test_bit(HCI_UP, &hdev->flags))
685 skb = create_monitor_event(hdev, HCI_DEV_UP);
686 else if (hci_dev_test_flag(hdev, HCI_SETUP))
687 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
692 if (sock_queue_rcv_skb(sk, skb))
697 read_unlock(&hci_dev_list_lock);
700 static void send_monitor_control_replay(struct sock *mon_sk)
704 read_lock(&hci_sk_list.lock);
706 sk_for_each(sk, &hci_sk_list.head) {
709 skb = create_monitor_ctrl_open(sk);
713 if (sock_queue_rcv_skb(mon_sk, skb))
717 read_unlock(&hci_sk_list.lock);
720 /* Generate internal stack event */
721 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
723 struct hci_event_hdr *hdr;
724 struct hci_ev_stack_internal *ev;
727 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
731 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
732 hdr->evt = HCI_EV_STACK_INTERNAL;
733 hdr->plen = sizeof(*ev) + dlen;
735 ev = skb_put(skb, sizeof(*ev) + dlen);
737 memcpy(ev->data, data, dlen);
739 bt_cb(skb)->incoming = 1;
740 __net_timestamp(skb);
742 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
743 hci_send_to_sock(hdev, skb);
747 void hci_sock_dev_event(struct hci_dev *hdev, int event)
749 BT_DBG("hdev %s event %d", hdev->name, event);
751 if (atomic_read(&monitor_promisc)) {
754 /* Send event to monitor */
755 skb = create_monitor_event(hdev, event);
757 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
758 HCI_SOCK_TRUSTED, NULL);
763 if (event <= HCI_DEV_DOWN) {
764 struct hci_ev_si_device ev;
766 /* Send event to sockets */
768 ev.dev_id = hdev->id;
769 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
772 if (event == HCI_DEV_UNREG) {
775 /* Wake up sockets using this dead device */
776 read_lock(&hci_sk_list.lock);
777 sk_for_each(sk, &hci_sk_list.head) {
778 if (hci_pi(sk)->hdev == hdev) {
780 sk->sk_state_change(sk);
783 read_unlock(&hci_sk_list.lock);
787 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
789 struct hci_mgmt_chan *c;
791 list_for_each_entry(c, &mgmt_chan_list, list) {
792 if (c->channel == channel)
799 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
801 struct hci_mgmt_chan *c;
803 mutex_lock(&mgmt_chan_list_lock);
804 c = __hci_mgmt_chan_find(channel);
805 mutex_unlock(&mgmt_chan_list_lock);
810 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
812 if (c->channel < HCI_CHANNEL_CONTROL)
815 mutex_lock(&mgmt_chan_list_lock);
816 if (__hci_mgmt_chan_find(c->channel)) {
817 mutex_unlock(&mgmt_chan_list_lock);
821 list_add_tail(&c->list, &mgmt_chan_list);
823 mutex_unlock(&mgmt_chan_list_lock);
827 EXPORT_SYMBOL(hci_mgmt_chan_register);
829 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
831 mutex_lock(&mgmt_chan_list_lock);
833 mutex_unlock(&mgmt_chan_list_lock);
835 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
837 static int hci_sock_release(struct socket *sock)
839 struct sock *sk = sock->sk;
840 struct hci_dev *hdev;
843 BT_DBG("sock %p sk %p", sock, sk);
850 switch (hci_pi(sk)->channel) {
851 case HCI_CHANNEL_MONITOR:
852 atomic_dec(&monitor_promisc);
854 case HCI_CHANNEL_RAW:
855 case HCI_CHANNEL_USER:
856 case HCI_CHANNEL_CONTROL:
857 /* Send event to monitor */
858 skb = create_monitor_ctrl_close(sk);
860 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
861 HCI_SOCK_TRUSTED, NULL);
865 hci_sock_free_cookie(sk);
869 bt_sock_unlink(&hci_sk_list, sk);
871 hdev = hci_pi(sk)->hdev;
873 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
874 !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
875 /* When releasing a user channel exclusive access,
876 * call hci_dev_do_close directly instead of calling
877 * hci_dev_close to ensure the exclusive access will
878 * be released and the controller brought back down.
880 * The checking of HCI_AUTO_OFF is not needed in this
881 * case since it will have been cleared already when
882 * opening the user channel.
884 * Make sure to also check that we haven't already
885 * unregistered since all the cleanup will have already
886 * been complete and hdev will get released when we put
889 hci_dev_do_close(hdev);
890 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
891 mgmt_index_added(hdev);
894 atomic_dec(&hdev->promisc);
904 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
909 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
914 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
916 hci_dev_unlock(hdev);
921 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
926 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
931 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
933 hci_dev_unlock(hdev);
938 /* Ioctls that require bound socket */
939 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
942 struct hci_dev *hdev = hci_hdev_from_sock(sk);
945 return PTR_ERR(hdev);
947 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
950 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
953 if (hdev->dev_type != HCI_PRIMARY)
958 if (!capable(CAP_NET_ADMIN))
963 return hci_get_conn_info(hdev, (void __user *)arg);
966 return hci_get_auth_info(hdev, (void __user *)arg);
969 if (!capable(CAP_NET_ADMIN))
971 return hci_sock_reject_list_add(hdev, (void __user *)arg);
974 if (!capable(CAP_NET_ADMIN))
976 return hci_sock_reject_list_del(hdev, (void __user *)arg);
982 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
985 void __user *argp = (void __user *)arg;
986 struct sock *sk = sock->sk;
989 BT_DBG("cmd %x arg %lx", cmd, arg);
991 /* Make sure the cmd is valid before doing anything */
1005 case HCISETLINKMODE:
1010 case HCIGETCONNINFO:
1011 case HCIGETAUTHINFO:
1013 case HCIUNBLOCKADDR:
1016 return -ENOIOCTLCMD;
1021 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1026 /* When calling an ioctl on an unbound raw socket, then ensure
1027 * that the monitor gets informed. Ensure that the resulting event
1028 * is only send once by checking if the cookie exists or not. The
1029 * socket cookie will be only ever generated once for the lifetime
1030 * of a given socket.
1032 if (hci_sock_gen_cookie(sk)) {
1033 struct sk_buff *skb;
1035 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1036 * flag. Make sure that not only the current task but also
1037 * the socket opener has the required capability, since
1038 * privileged programs can be tricked into making ioctl calls
1039 * on HCI sockets, and the socket should not be marked as
1040 * trusted simply because the ioctl caller is privileged.
1042 if (sk_capable(sk, CAP_NET_ADMIN))
1043 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1045 /* Send event to monitor */
1046 skb = create_monitor_ctrl_open(sk);
1048 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1049 HCI_SOCK_TRUSTED, NULL);
1058 return hci_get_dev_list(argp);
1061 return hci_get_dev_info(argp);
1063 case HCIGETCONNLIST:
1064 return hci_get_conn_list(argp);
1067 if (!capable(CAP_NET_ADMIN))
1069 return hci_dev_open(arg);
1072 if (!capable(CAP_NET_ADMIN))
1074 return hci_dev_close(arg);
1077 if (!capable(CAP_NET_ADMIN))
1079 return hci_dev_reset(arg);
1082 if (!capable(CAP_NET_ADMIN))
1084 return hci_dev_reset_stat(arg);
1091 case HCISETLINKMODE:
1094 if (!capable(CAP_NET_ADMIN))
1096 return hci_dev_cmd(cmd, argp);
1099 return hci_inquiry(argp);
1104 err = hci_sock_bound_ioctl(sk, cmd, arg);
1111 #ifdef CONFIG_COMPAT
1112 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1120 return hci_sock_ioctl(sock, cmd, arg);
1123 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1127 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1130 struct sockaddr_hci haddr;
1131 struct sock *sk = sock->sk;
1132 struct hci_dev *hdev = NULL;
1133 struct sk_buff *skb;
1136 BT_DBG("sock %p sk %p", sock, sk);
1141 memset(&haddr, 0, sizeof(haddr));
1142 len = min_t(unsigned int, sizeof(haddr), addr_len);
1143 memcpy(&haddr, addr, len);
1145 if (haddr.hci_family != AF_BLUETOOTH)
1150 /* Allow detaching from dead device and attaching to alive device, if
1151 * the caller wants to re-bind (instead of close) this socket in
1152 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1154 hdev = hci_pi(sk)->hdev;
1155 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1156 hci_pi(sk)->hdev = NULL;
1157 sk->sk_state = BT_OPEN;
1162 if (sk->sk_state == BT_BOUND) {
1167 switch (haddr.hci_channel) {
1168 case HCI_CHANNEL_RAW:
1169 if (hci_pi(sk)->hdev) {
1174 if (haddr.hci_dev != HCI_DEV_NONE) {
1175 hdev = hci_dev_get(haddr.hci_dev);
1181 atomic_inc(&hdev->promisc);
1184 hci_pi(sk)->channel = haddr.hci_channel;
1186 if (!hci_sock_gen_cookie(sk)) {
1187 /* In the case when a cookie has already been assigned,
1188 * then there has been already an ioctl issued against
1189 * an unbound socket and with that triggered an open
1190 * notification. Send a close notification first to
1191 * allow the state transition to bounded.
1193 skb = create_monitor_ctrl_close(sk);
1195 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1196 HCI_SOCK_TRUSTED, NULL);
1201 if (capable(CAP_NET_ADMIN))
1202 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1204 hci_pi(sk)->hdev = hdev;
1206 /* Send event to monitor */
1207 skb = create_monitor_ctrl_open(sk);
1209 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1210 HCI_SOCK_TRUSTED, NULL);
1215 case HCI_CHANNEL_USER:
1216 if (hci_pi(sk)->hdev) {
1221 if (haddr.hci_dev == HCI_DEV_NONE) {
1226 if (!capable(CAP_NET_ADMIN)) {
1231 hdev = hci_dev_get(haddr.hci_dev);
1237 if (test_bit(HCI_INIT, &hdev->flags) ||
1238 hci_dev_test_flag(hdev, HCI_SETUP) ||
1239 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1240 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1241 test_bit(HCI_UP, &hdev->flags))) {
1247 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1253 mgmt_index_removed(hdev);
1255 err = hci_dev_open(hdev->id);
1257 if (err == -EALREADY) {
1258 /* In case the transport is already up and
1259 * running, clear the error here.
1261 * This can happen when opening a user
1262 * channel and HCI_AUTO_OFF grace period
1267 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1268 mgmt_index_added(hdev);
1274 hci_pi(sk)->channel = haddr.hci_channel;
1276 if (!hci_sock_gen_cookie(sk)) {
1277 /* In the case when a cookie has already been assigned,
1278 * this socket will transition from a raw socket into
1279 * a user channel socket. For a clean transition, send
1280 * the close notification first.
1282 skb = create_monitor_ctrl_close(sk);
1284 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1285 HCI_SOCK_TRUSTED, NULL);
1290 /* The user channel is restricted to CAP_NET_ADMIN
1291 * capabilities and with that implicitly trusted.
1293 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1295 hci_pi(sk)->hdev = hdev;
1297 /* Send event to monitor */
1298 skb = create_monitor_ctrl_open(sk);
1300 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1301 HCI_SOCK_TRUSTED, NULL);
1305 atomic_inc(&hdev->promisc);
1308 case HCI_CHANNEL_MONITOR:
1309 if (haddr.hci_dev != HCI_DEV_NONE) {
1314 if (!capable(CAP_NET_RAW)) {
1319 hci_pi(sk)->channel = haddr.hci_channel;
1321 /* The monitor interface is restricted to CAP_NET_RAW
1322 * capabilities and with that implicitly trusted.
1324 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1326 send_monitor_note(sk, "Linux version %s (%s)",
1327 init_utsname()->release,
1328 init_utsname()->machine);
1329 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1330 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1331 send_monitor_replay(sk);
1332 send_monitor_control_replay(sk);
1334 atomic_inc(&monitor_promisc);
1337 case HCI_CHANNEL_LOGGING:
1338 if (haddr.hci_dev != HCI_DEV_NONE) {
1343 if (!capable(CAP_NET_ADMIN)) {
1348 hci_pi(sk)->channel = haddr.hci_channel;
1352 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1357 if (haddr.hci_dev != HCI_DEV_NONE) {
1362 /* Users with CAP_NET_ADMIN capabilities are allowed
1363 * access to all management commands and events. For
1364 * untrusted users the interface is restricted and
1365 * also only untrusted events are sent.
1367 if (capable(CAP_NET_ADMIN))
1368 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1370 hci_pi(sk)->channel = haddr.hci_channel;
1372 /* At the moment the index and unconfigured index events
1373 * are enabled unconditionally. Setting them on each
1374 * socket when binding keeps this functionality. They
1375 * however might be cleared later and then sending of these
1376 * events will be disabled, but that is then intentional.
1378 * This also enables generic events that are safe to be
1379 * received by untrusted users. Example for such events
1380 * are changes to settings, class of device, name etc.
1382 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1383 if (!hci_sock_gen_cookie(sk)) {
1384 /* In the case when a cookie has already been
1385 * assigned, this socket will transition from
1386 * a raw socket into a control socket. To
1387 * allow for a clean transition, send the
1388 * close notification first.
1390 skb = create_monitor_ctrl_close(sk);
1392 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1393 HCI_SOCK_TRUSTED, NULL);
1398 /* Send event to monitor */
1399 skb = create_monitor_ctrl_open(sk);
1401 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1402 HCI_SOCK_TRUSTED, NULL);
1406 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1407 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1408 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1409 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1410 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1411 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1416 /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1417 if (!hci_pi(sk)->mtu)
1418 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1420 sk->sk_state = BT_BOUND;
1427 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1430 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1431 struct sock *sk = sock->sk;
1432 struct hci_dev *hdev;
1435 BT_DBG("sock %p sk %p", sock, sk);
1442 hdev = hci_hdev_from_sock(sk);
1444 err = PTR_ERR(hdev);
1448 haddr->hci_family = AF_BLUETOOTH;
1449 haddr->hci_dev = hdev->id;
1450 haddr->hci_channel= hci_pi(sk)->channel;
1451 err = sizeof(*haddr);
1458 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1459 struct sk_buff *skb)
1461 __u8 mask = hci_pi(sk)->cmsg_mask;
1463 if (mask & HCI_CMSG_DIR) {
1464 int incoming = bt_cb(skb)->incoming;
1465 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1469 if (mask & HCI_CMSG_TSTAMP) {
1470 #ifdef CONFIG_COMPAT
1471 struct old_timeval32 ctv;
1473 struct __kernel_old_timeval tv;
1477 skb_get_timestamp(skb, &tv);
1481 #ifdef CONFIG_COMPAT
1482 if (!COMPAT_USE_64BIT_TIME &&
1483 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1484 ctv.tv_sec = tv.tv_sec;
1485 ctv.tv_usec = tv.tv_usec;
1491 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1495 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1496 size_t len, int flags)
1498 struct sock *sk = sock->sk;
1499 struct sk_buff *skb;
1501 unsigned int skblen;
1503 BT_DBG("sock %p, sk %p", sock, sk);
1505 if (flags & MSG_OOB)
1508 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1511 if (sk->sk_state == BT_CLOSED)
1514 skb = skb_recv_datagram(sk, flags, &err);
1521 msg->msg_flags |= MSG_TRUNC;
1525 skb_reset_transport_header(skb);
1526 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1528 switch (hci_pi(sk)->channel) {
1529 case HCI_CHANNEL_RAW:
1530 hci_sock_cmsg(sk, msg, skb);
1532 case HCI_CHANNEL_USER:
1533 case HCI_CHANNEL_MONITOR:
1534 sock_recv_timestamp(msg, sk, skb);
1537 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1538 sock_recv_timestamp(msg, sk, skb);
1542 skb_free_datagram(sk, skb);
1544 if (flags & MSG_TRUNC)
1547 return err ? : copied;
1550 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1551 struct sk_buff *skb)
1554 struct mgmt_hdr *hdr;
1555 u16 opcode, index, len;
1556 struct hci_dev *hdev = NULL;
1557 const struct hci_mgmt_handler *handler;
1558 bool var_len, no_hdev;
1561 BT_DBG("got %d bytes", skb->len);
1563 if (skb->len < sizeof(*hdr))
1566 hdr = (void *)skb->data;
1567 opcode = __le16_to_cpu(hdr->opcode);
1568 index = __le16_to_cpu(hdr->index);
1569 len = __le16_to_cpu(hdr->len);
1571 if (len != skb->len - sizeof(*hdr)) {
1576 if (chan->channel == HCI_CHANNEL_CONTROL) {
1577 struct sk_buff *cmd;
1579 /* Send event to monitor */
1580 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1581 skb->data + sizeof(*hdr));
1583 hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1584 HCI_SOCK_TRUSTED, NULL);
1589 if (opcode >= chan->handler_count ||
1590 chan->handlers[opcode].func == NULL) {
1591 BT_DBG("Unknown op %u", opcode);
1592 err = mgmt_cmd_status(sk, index, opcode,
1593 MGMT_STATUS_UNKNOWN_COMMAND);
1597 handler = &chan->handlers[opcode];
1599 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1600 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1601 err = mgmt_cmd_status(sk, index, opcode,
1602 MGMT_STATUS_PERMISSION_DENIED);
1606 if (index != MGMT_INDEX_NONE) {
1607 hdev = hci_dev_get(index);
1609 err = mgmt_cmd_status(sk, index, opcode,
1610 MGMT_STATUS_INVALID_INDEX);
1614 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1615 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1616 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1617 err = mgmt_cmd_status(sk, index, opcode,
1618 MGMT_STATUS_INVALID_INDEX);
1622 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1623 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1624 err = mgmt_cmd_status(sk, index, opcode,
1625 MGMT_STATUS_INVALID_INDEX);
1630 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1631 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1632 if (no_hdev != !hdev) {
1633 err = mgmt_cmd_status(sk, index, opcode,
1634 MGMT_STATUS_INVALID_INDEX);
1639 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1640 if ((var_len && len < handler->data_len) ||
1641 (!var_len && len != handler->data_len)) {
1642 err = mgmt_cmd_status(sk, index, opcode,
1643 MGMT_STATUS_INVALID_PARAMS);
1647 if (hdev && chan->hdev_init)
1648 chan->hdev_init(sk, hdev);
1650 cp = skb->data + sizeof(*hdr);
1652 err = handler->func(sk, hdev, cp, len);
1665 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1668 struct hci_mon_hdr *hdr;
1669 struct hci_dev *hdev;
1673 /* The logging frame consists at minimum of the standard header,
1674 * the priority byte, the ident length byte and at least one string
1675 * terminator NUL byte. Anything shorter are invalid packets.
1677 if (skb->len < sizeof(*hdr) + 3)
1680 hdr = (void *)skb->data;
1682 if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1685 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1686 __u8 priority = skb->data[sizeof(*hdr)];
1687 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1689 /* Only the priorities 0-7 are valid and with that any other
1690 * value results in an invalid packet.
1692 * The priority byte is followed by an ident length byte and
1693 * the NUL terminated ident string. Check that the ident
1694 * length is not overflowing the packet and also that the
1695 * ident string itself is NUL terminated. In case the ident
1696 * length is zero, the length value actually doubles as NUL
1697 * terminator identifier.
1699 * The message follows the ident string (if present) and
1700 * must be NUL terminated. Otherwise it is not a valid packet.
1702 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1703 ident_len > skb->len - sizeof(*hdr) - 3 ||
1704 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1710 index = __le16_to_cpu(hdr->index);
1712 if (index != MGMT_INDEX_NONE) {
1713 hdev = hci_dev_get(index);
1720 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1722 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1731 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1734 struct sock *sk = sock->sk;
1735 struct hci_mgmt_chan *chan;
1736 struct hci_dev *hdev;
1737 struct sk_buff *skb;
1739 const unsigned int flags = msg->msg_flags;
1741 BT_DBG("sock %p sk %p", sock, sk);
1743 if (flags & MSG_OOB)
1746 if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1749 if (len < 4 || len > hci_pi(sk)->mtu)
1752 skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1754 return PTR_ERR(skb);
1758 switch (hci_pi(sk)->channel) {
1759 case HCI_CHANNEL_RAW:
1760 case HCI_CHANNEL_USER:
1762 case HCI_CHANNEL_MONITOR:
1765 case HCI_CHANNEL_LOGGING:
1766 err = hci_logging_frame(sk, skb, flags);
1769 mutex_lock(&mgmt_chan_list_lock);
1770 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1772 err = hci_mgmt_cmd(chan, sk, skb);
1776 mutex_unlock(&mgmt_chan_list_lock);
1780 hdev = hci_hdev_from_sock(sk);
1782 err = PTR_ERR(hdev);
1786 if (!test_bit(HCI_UP, &hdev->flags)) {
1791 hci_skb_pkt_type(skb) = skb->data[0];
1794 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1795 /* No permission check is needed for user channel
1796 * since that gets enforced when binding the socket.
1798 * However check that the packet type is valid.
1800 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1801 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1802 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1803 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1808 skb_queue_tail(&hdev->raw_q, skb);
1809 queue_work(hdev->workqueue, &hdev->tx_work);
1810 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1811 u16 opcode = get_unaligned_le16(skb->data);
1812 u16 ogf = hci_opcode_ogf(opcode);
1813 u16 ocf = hci_opcode_ocf(opcode);
1815 if (((ogf > HCI_SFLT_MAX_OGF) ||
1816 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1817 &hci_sec_filter.ocf_mask[ogf])) &&
1818 !capable(CAP_NET_RAW)) {
1823 /* Since the opcode has already been extracted here, store
1824 * a copy of the value for later use by the drivers.
1826 hci_skb_opcode(skb) = opcode;
1829 skb_queue_tail(&hdev->raw_q, skb);
1830 queue_work(hdev->workqueue, &hdev->tx_work);
1832 /* Stand-alone HCI commands must be flagged as
1833 * single-command requests.
1835 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1837 skb_queue_tail(&hdev->cmd_q, skb);
1838 queue_work(hdev->workqueue, &hdev->cmd_work);
1841 if (!capable(CAP_NET_RAW)) {
1846 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1847 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1848 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1853 skb_queue_tail(&hdev->raw_q, skb);
1854 queue_work(hdev->workqueue, &hdev->tx_work);
1868 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1869 sockptr_t optval, unsigned int len)
1871 struct hci_ufilter uf = { .opcode = 0 };
1872 struct sock *sk = sock->sk;
1873 int err = 0, opt = 0;
1875 BT_DBG("sk %p, opt %d", sk, optname);
1879 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1886 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1892 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1894 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1897 case HCI_TIME_STAMP:
1898 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1904 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1906 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1911 struct hci_filter *f = &hci_pi(sk)->filter;
1913 uf.type_mask = f->type_mask;
1914 uf.opcode = f->opcode;
1915 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1916 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1919 len = min_t(unsigned int, len, sizeof(uf));
1920 if (copy_from_sockptr(&uf, optval, len)) {
1925 if (!capable(CAP_NET_RAW)) {
1926 uf.type_mask &= hci_sec_filter.type_mask;
1927 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1928 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1932 struct hci_filter *f = &hci_pi(sk)->filter;
1934 f->type_mask = uf.type_mask;
1935 f->opcode = uf.opcode;
1936 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1937 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1951 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1952 sockptr_t optval, unsigned int len)
1954 struct sock *sk = sock->sk;
1958 BT_DBG("sk %p, opt %d", sk, optname);
1960 if (level == SOL_HCI)
1961 return hci_sock_setsockopt_old(sock, level, optname, optval,
1964 if (level != SOL_BLUETOOTH)
1965 return -ENOPROTOOPT;
1972 switch (hci_pi(sk)->channel) {
1973 /* Don't allow changing MTU for channels that are meant for HCI
1976 case HCI_CHANNEL_RAW:
1977 case HCI_CHANNEL_USER:
1982 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1987 hci_pi(sk)->mtu = opt;
2000 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2001 char __user *optval, int __user *optlen)
2003 struct hci_ufilter uf;
2004 struct sock *sk = sock->sk;
2005 int len, opt, err = 0;
2007 BT_DBG("sk %p, opt %d", sk, optname);
2009 if (get_user(len, optlen))
2014 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2021 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2026 if (put_user(opt, optval))
2030 case HCI_TIME_STAMP:
2031 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2036 if (put_user(opt, optval))
2042 struct hci_filter *f = &hci_pi(sk)->filter;
2044 memset(&uf, 0, sizeof(uf));
2045 uf.type_mask = f->type_mask;
2046 uf.opcode = f->opcode;
2047 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2048 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2051 len = min_t(unsigned int, len, sizeof(uf));
2052 if (copy_to_user(optval, &uf, len))
2066 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2067 char __user *optval, int __user *optlen)
2069 struct sock *sk = sock->sk;
2072 BT_DBG("sk %p, opt %d", sk, optname);
2074 if (level == SOL_HCI)
2075 return hci_sock_getsockopt_old(sock, level, optname, optval,
2078 if (level != SOL_BLUETOOTH)
2079 return -ENOPROTOOPT;
2086 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2099 static void hci_sock_destruct(struct sock *sk)
2102 skb_queue_purge(&sk->sk_receive_queue);
2103 skb_queue_purge(&sk->sk_write_queue);
2106 static const struct proto_ops hci_sock_ops = {
2107 .family = PF_BLUETOOTH,
2108 .owner = THIS_MODULE,
2109 .release = hci_sock_release,
2110 .bind = hci_sock_bind,
2111 .getname = hci_sock_getname,
2112 .sendmsg = hci_sock_sendmsg,
2113 .recvmsg = hci_sock_recvmsg,
2114 .ioctl = hci_sock_ioctl,
2115 #ifdef CONFIG_COMPAT
2116 .compat_ioctl = hci_sock_compat_ioctl,
2118 .poll = datagram_poll,
2119 .listen = sock_no_listen,
2120 .shutdown = sock_no_shutdown,
2121 .setsockopt = hci_sock_setsockopt,
2122 .getsockopt = hci_sock_getsockopt,
2123 .connect = sock_no_connect,
2124 .socketpair = sock_no_socketpair,
2125 .accept = sock_no_accept,
2126 .mmap = sock_no_mmap
2129 static struct proto hci_sk_proto = {
2131 .owner = THIS_MODULE,
2132 .obj_size = sizeof(struct hci_pinfo)
2135 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2140 BT_DBG("sock %p", sock);
2142 if (sock->type != SOCK_RAW)
2143 return -ESOCKTNOSUPPORT;
2145 sock->ops = &hci_sock_ops;
2147 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2151 sock_init_data(sock, sk);
2153 sock_reset_flag(sk, SOCK_ZAPPED);
2155 sk->sk_protocol = protocol;
2157 sock->state = SS_UNCONNECTED;
2158 sk->sk_state = BT_OPEN;
2159 sk->sk_destruct = hci_sock_destruct;
2161 bt_sock_link(&hci_sk_list, sk);
2165 static const struct net_proto_family hci_sock_family_ops = {
2166 .family = PF_BLUETOOTH,
2167 .owner = THIS_MODULE,
2168 .create = hci_sock_create,
2171 int __init hci_sock_init(void)
2175 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2177 err = proto_register(&hci_sk_proto, 0);
2181 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2183 BT_ERR("HCI socket registration failed");
2187 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2189 BT_ERR("Failed to create HCI proc file");
2190 bt_sock_unregister(BTPROTO_HCI);
2194 BT_INFO("HCI socket layer initialized");
2199 proto_unregister(&hci_sk_proto);
2203 void hci_sock_cleanup(void)
2205 bt_procfs_cleanup(&init_net, "hci");
2206 bt_sock_unregister(BTPROTO_HCI);
2207 proto_unregister(&hci_sk_proto);