2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "mgmt_util.h"
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
42 /* ----- HCI socket interface ----- */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 struct hci_filter filter;
52 unsigned short channel;
56 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
58 struct hci_dev *hdev = hci_pi(sk)->hdev;
61 return ERR_PTR(-EBADFD);
62 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
63 return ERR_PTR(-EPIPE);
67 void hci_sock_set_flag(struct sock *sk, int nr)
69 set_bit(nr, &hci_pi(sk)->flags);
72 void hci_sock_clear_flag(struct sock *sk, int nr)
74 clear_bit(nr, &hci_pi(sk)->flags);
77 int hci_sock_test_flag(struct sock *sk, int nr)
79 return test_bit(nr, &hci_pi(sk)->flags);
82 unsigned short hci_sock_get_channel(struct sock *sk)
84 return hci_pi(sk)->channel;
87 static inline int hci_test_bit(int nr, const void *addr)
89 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
93 #define HCI_SFLT_MAX_OGF 5
95 struct hci_sec_filter {
98 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
101 static const struct hci_sec_filter hci_sec_filter = {
105 { 0x1000d9fe, 0x0000b00c },
110 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
111 /* OGF_LINK_POLICY */
112 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
114 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
116 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
117 /* OGF_STATUS_PARAM */
118 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
122 static struct bt_sock_list hci_sk_list = {
123 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
126 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
128 struct hci_filter *flt;
129 int flt_type, flt_event;
132 flt = &hci_pi(sk)->filter;
134 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
136 if (!test_bit(flt_type, &flt->type_mask))
139 /* Extra filter for event packets only */
140 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
143 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
145 if (!hci_test_bit(flt_event, &flt->event_mask))
148 /* Check filter only when opcode is set */
152 if (flt_event == HCI_EV_CMD_COMPLETE &&
153 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
156 if (flt_event == HCI_EV_CMD_STATUS &&
157 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
163 /* Send frame to RAW socket */
164 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
167 struct sk_buff *skb_copy = NULL;
169 BT_DBG("hdev %p len %d", hdev, skb->len);
171 read_lock(&hci_sk_list.lock);
173 sk_for_each(sk, &hci_sk_list.head) {
174 struct sk_buff *nskb;
176 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
179 /* Don't send frame to the socket it came from */
183 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
184 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
185 bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
186 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
187 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
189 if (is_filtered_packet(sk, skb))
191 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
192 if (!bt_cb(skb)->incoming)
194 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
195 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
196 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
199 /* Don't send frame to other channel types */
204 /* Create a private copy with headroom */
205 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
209 /* Put type byte before the data */
210 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
213 nskb = skb_clone(skb_copy, GFP_ATOMIC);
217 if (sock_queue_rcv_skb(sk, nskb))
221 read_unlock(&hci_sk_list.lock);
226 /* Send frame to sockets with specific channel */
227 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
228 int flag, struct sock *skip_sk)
232 BT_DBG("channel %u len %d", channel, skb->len);
234 read_lock(&hci_sk_list.lock);
236 sk_for_each(sk, &hci_sk_list.head) {
237 struct sk_buff *nskb;
239 /* Ignore socket without the flag set */
240 if (!hci_sock_test_flag(sk, flag))
243 /* Skip the original socket */
247 if (sk->sk_state != BT_BOUND)
250 if (hci_pi(sk)->channel != channel)
253 nskb = skb_clone(skb, GFP_ATOMIC);
257 if (sock_queue_rcv_skb(sk, nskb))
261 read_unlock(&hci_sk_list.lock);
264 /* Send frame to monitor socket */
265 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
267 struct sk_buff *skb_copy = NULL;
268 struct hci_mon_hdr *hdr;
271 if (!atomic_read(&monitor_promisc))
274 BT_DBG("hdev %p len %d", hdev, skb->len);
276 switch (bt_cb(skb)->pkt_type) {
277 case HCI_COMMAND_PKT:
278 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
281 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
283 case HCI_ACLDATA_PKT:
284 if (bt_cb(skb)->incoming)
285 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
287 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
289 case HCI_SCODATA_PKT:
290 if (bt_cb(skb)->incoming)
291 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
293 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
296 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
302 /* Create a private copy with headroom */
303 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
307 /* Put header before the data */
308 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
309 hdr->opcode = opcode;
310 hdr->index = cpu_to_le16(hdev->id);
311 hdr->len = cpu_to_le16(skb->len);
313 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
314 HCI_SOCK_TRUSTED, NULL);
318 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
320 struct hci_mon_hdr *hdr;
321 struct hci_mon_new_index *ni;
322 struct hci_mon_index_info *ii;
328 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
332 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
333 ni->type = hdev->dev_type;
335 bacpy(&ni->bdaddr, &hdev->bdaddr);
336 memcpy(ni->name, hdev->name, 8);
338 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
342 skb = bt_skb_alloc(0, GFP_ATOMIC);
346 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
350 if (hdev->manufacturer == 0xffff)
356 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
360 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
361 bacpy(&ii->bdaddr, &hdev->bdaddr);
362 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
364 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
368 skb = bt_skb_alloc(0, GFP_ATOMIC);
372 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
376 skb = bt_skb_alloc(0, GFP_ATOMIC);
380 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
387 __net_timestamp(skb);
389 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
390 hdr->opcode = opcode;
391 hdr->index = cpu_to_le16(hdev->id);
392 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
397 static void send_monitor_replay(struct sock *sk)
399 struct hci_dev *hdev;
401 read_lock(&hci_dev_list_lock);
403 list_for_each_entry(hdev, &hci_dev_list, list) {
406 skb = create_monitor_event(hdev, HCI_DEV_REG);
410 if (sock_queue_rcv_skb(sk, skb))
413 if (!test_bit(HCI_RUNNING, &hdev->flags))
416 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
420 if (sock_queue_rcv_skb(sk, skb))
423 if (test_bit(HCI_UP, &hdev->flags))
424 skb = create_monitor_event(hdev, HCI_DEV_UP);
425 else if (hci_dev_test_flag(hdev, HCI_SETUP))
426 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
431 if (sock_queue_rcv_skb(sk, skb))
436 read_unlock(&hci_dev_list_lock);
439 /* Generate internal stack event */
440 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
442 struct hci_event_hdr *hdr;
443 struct hci_ev_stack_internal *ev;
446 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
450 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
451 hdr->evt = HCI_EV_STACK_INTERNAL;
452 hdr->plen = sizeof(*ev) + dlen;
454 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
456 memcpy(ev->data, data, dlen);
458 bt_cb(skb)->incoming = 1;
459 __net_timestamp(skb);
461 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
462 hci_send_to_sock(hdev, skb);
466 void hci_sock_dev_event(struct hci_dev *hdev, int event)
468 BT_DBG("hdev %s event %d", hdev->name, event);
470 if (atomic_read(&monitor_promisc)) {
473 /* Send event to monitor */
474 skb = create_monitor_event(hdev, event);
476 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
477 HCI_SOCK_TRUSTED, NULL);
482 if (event <= HCI_DEV_DOWN) {
483 struct hci_ev_si_device ev;
485 /* Send event to sockets */
487 ev.dev_id = hdev->id;
488 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
491 if (event == HCI_DEV_UNREG) {
494 /* Wake up sockets using this dead device */
495 read_lock(&hci_sk_list.lock);
496 sk_for_each(sk, &hci_sk_list.head) {
497 if (hci_pi(sk)->hdev == hdev) {
499 sk->sk_state_change(sk);
502 read_unlock(&hci_sk_list.lock);
506 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
508 struct hci_mgmt_chan *c;
510 list_for_each_entry(c, &mgmt_chan_list, list) {
511 if (c->channel == channel)
518 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
520 struct hci_mgmt_chan *c;
522 mutex_lock(&mgmt_chan_list_lock);
523 c = __hci_mgmt_chan_find(channel);
524 mutex_unlock(&mgmt_chan_list_lock);
529 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
531 if (c->channel < HCI_CHANNEL_CONTROL)
534 mutex_lock(&mgmt_chan_list_lock);
535 if (__hci_mgmt_chan_find(c->channel)) {
536 mutex_unlock(&mgmt_chan_list_lock);
540 list_add_tail(&c->list, &mgmt_chan_list);
542 mutex_unlock(&mgmt_chan_list_lock);
546 EXPORT_SYMBOL(hci_mgmt_chan_register);
548 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
550 mutex_lock(&mgmt_chan_list_lock);
552 mutex_unlock(&mgmt_chan_list_lock);
554 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
556 static int hci_sock_release(struct socket *sock)
558 struct sock *sk = sock->sk;
559 struct hci_dev *hdev;
561 BT_DBG("sock %p sk %p", sock, sk);
566 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
567 atomic_dec(&monitor_promisc);
569 bt_sock_unlink(&hci_sk_list, sk);
571 hdev = hci_pi(sk)->hdev;
573 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
574 /* When releasing an user channel exclusive access,
575 * call hci_dev_do_close directly instead of calling
576 * hci_dev_close to ensure the exclusive access will
577 * be released and the controller brought back down.
579 * The checking of HCI_AUTO_OFF is not needed in this
580 * case since it will have been cleared already when
581 * opening the user channel.
583 hci_dev_do_close(hdev);
584 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
585 mgmt_index_added(hdev);
588 atomic_dec(&hdev->promisc);
594 skb_queue_purge(&sk->sk_receive_queue);
595 skb_queue_purge(&sk->sk_write_queue);
601 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
606 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
611 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
613 hci_dev_unlock(hdev);
618 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
623 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
628 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
630 hci_dev_unlock(hdev);
635 /* Ioctls that require bound socket */
636 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
639 struct hci_dev *hdev = hci_hdev_from_sock(sk);
642 return PTR_ERR(hdev);
644 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
647 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
650 if (hdev->dev_type != HCI_BREDR)
655 if (!capable(CAP_NET_ADMIN))
660 return hci_get_conn_info(hdev, (void __user *) arg);
663 return hci_get_auth_info(hdev, (void __user *) arg);
666 if (!capable(CAP_NET_ADMIN))
668 return hci_sock_blacklist_add(hdev, (void __user *) arg);
671 if (!capable(CAP_NET_ADMIN))
673 return hci_sock_blacklist_del(hdev, (void __user *) arg);
679 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
682 void __user *argp = (void __user *) arg;
683 struct sock *sk = sock->sk;
686 BT_DBG("cmd %x arg %lx", cmd, arg);
690 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
699 return hci_get_dev_list(argp);
702 return hci_get_dev_info(argp);
705 return hci_get_conn_list(argp);
708 if (!capable(CAP_NET_ADMIN))
710 return hci_dev_open(arg);
713 if (!capable(CAP_NET_ADMIN))
715 return hci_dev_close(arg);
718 if (!capable(CAP_NET_ADMIN))
720 return hci_dev_reset(arg);
723 if (!capable(CAP_NET_ADMIN))
725 return hci_dev_reset_stat(arg);
735 if (!capable(CAP_NET_ADMIN))
737 return hci_dev_cmd(cmd, argp);
740 return hci_inquiry(argp);
745 err = hci_sock_bound_ioctl(sk, cmd, arg);
752 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
755 struct sockaddr_hci haddr;
756 struct sock *sk = sock->sk;
757 struct hci_dev *hdev = NULL;
760 BT_DBG("sock %p sk %p", sock, sk);
765 memset(&haddr, 0, sizeof(haddr));
766 len = min_t(unsigned int, sizeof(haddr), addr_len);
767 memcpy(&haddr, addr, len);
769 if (haddr.hci_family != AF_BLUETOOTH)
774 /* Allow detaching from dead device and attaching to alive device, if
775 * the caller wants to re-bind (instead of close) this socket in
776 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
778 hdev = hci_pi(sk)->hdev;
779 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
780 hci_pi(sk)->hdev = NULL;
781 sk->sk_state = BT_OPEN;
786 if (sk->sk_state == BT_BOUND) {
791 switch (haddr.hci_channel) {
792 case HCI_CHANNEL_RAW:
793 if (hci_pi(sk)->hdev) {
798 if (haddr.hci_dev != HCI_DEV_NONE) {
799 hdev = hci_dev_get(haddr.hci_dev);
805 atomic_inc(&hdev->promisc);
808 hci_pi(sk)->hdev = hdev;
811 case HCI_CHANNEL_USER:
812 if (hci_pi(sk)->hdev) {
817 if (haddr.hci_dev == HCI_DEV_NONE) {
822 if (!capable(CAP_NET_ADMIN)) {
827 hdev = hci_dev_get(haddr.hci_dev);
833 if (test_bit(HCI_INIT, &hdev->flags) ||
834 hci_dev_test_flag(hdev, HCI_SETUP) ||
835 hci_dev_test_flag(hdev, HCI_CONFIG) ||
836 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
837 test_bit(HCI_UP, &hdev->flags))) {
843 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
849 mgmt_index_removed(hdev);
851 err = hci_dev_open(hdev->id);
853 if (err == -EALREADY) {
854 /* In case the transport is already up and
855 * running, clear the error here.
857 * This can happen when opening an user
858 * channel and HCI_AUTO_OFF grace period
863 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
864 mgmt_index_added(hdev);
870 atomic_inc(&hdev->promisc);
872 hci_pi(sk)->hdev = hdev;
875 case HCI_CHANNEL_MONITOR:
876 if (haddr.hci_dev != HCI_DEV_NONE) {
881 if (!capable(CAP_NET_RAW)) {
886 /* The monitor interface is restricted to CAP_NET_RAW
887 * capabilities and with that implicitly trusted.
889 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
891 send_monitor_replay(sk);
893 atomic_inc(&monitor_promisc);
897 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
902 if (haddr.hci_dev != HCI_DEV_NONE) {
907 /* Users with CAP_NET_ADMIN capabilities are allowed
908 * access to all management commands and events. For
909 * untrusted users the interface is restricted and
910 * also only untrusted events are sent.
912 if (capable(CAP_NET_ADMIN))
913 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
915 /* At the moment the index and unconfigured index events
916 * are enabled unconditionally. Setting them on each
917 * socket when binding keeps this functionality. They
918 * however might be cleared later and then sending of these
919 * events will be disabled, but that is then intentional.
921 * This also enables generic events that are safe to be
922 * received by untrusted users. Example for such events
923 * are changes to settings, class of device, name etc.
925 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
926 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
927 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
928 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
934 hci_pi(sk)->channel = haddr.hci_channel;
935 sk->sk_state = BT_BOUND;
942 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
943 int *addr_len, int peer)
945 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
946 struct sock *sk = sock->sk;
947 struct hci_dev *hdev;
950 BT_DBG("sock %p sk %p", sock, sk);
957 hdev = hci_hdev_from_sock(sk);
963 *addr_len = sizeof(*haddr);
964 haddr->hci_family = AF_BLUETOOTH;
965 haddr->hci_dev = hdev->id;
966 haddr->hci_channel= hci_pi(sk)->channel;
973 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
976 __u32 mask = hci_pi(sk)->cmsg_mask;
978 if (mask & HCI_CMSG_DIR) {
979 int incoming = bt_cb(skb)->incoming;
980 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
984 if (mask & HCI_CMSG_TSTAMP) {
986 struct compat_timeval ctv;
992 skb_get_timestamp(skb, &tv);
997 if (!COMPAT_USE_64BIT_TIME &&
998 (msg->msg_flags & MSG_CMSG_COMPAT)) {
999 ctv.tv_sec = tv.tv_sec;
1000 ctv.tv_usec = tv.tv_usec;
1006 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1010 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1013 int noblock = flags & MSG_DONTWAIT;
1014 struct sock *sk = sock->sk;
1015 struct sk_buff *skb;
1018 BT_DBG("sock %p, sk %p", sock, sk);
1020 if (flags & MSG_OOB)
1023 if (sk->sk_state == BT_CLOSED)
1026 skb = skb_recv_datagram(sk, flags, noblock, &err);
1032 msg->msg_flags |= MSG_TRUNC;
1036 skb_reset_transport_header(skb);
1037 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1039 switch (hci_pi(sk)->channel) {
1040 case HCI_CHANNEL_RAW:
1041 hci_sock_cmsg(sk, msg, skb);
1043 case HCI_CHANNEL_USER:
1044 case HCI_CHANNEL_MONITOR:
1045 sock_recv_timestamp(msg, sk, skb);
1048 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1049 sock_recv_timestamp(msg, sk, skb);
1053 skb_free_datagram(sk, skb);
1055 return err ? : copied;
1058 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1059 struct msghdr *msg, size_t msglen)
1063 struct mgmt_hdr *hdr;
1064 u16 opcode, index, len;
1065 struct hci_dev *hdev = NULL;
1066 const struct hci_mgmt_handler *handler;
1067 bool var_len, no_hdev;
1070 BT_DBG("got %zu bytes", msglen);
1072 if (msglen < sizeof(*hdr))
1075 buf = kmalloc(msglen, GFP_KERNEL);
1079 if (memcpy_from_msg(buf, msg, msglen)) {
1085 opcode = __le16_to_cpu(hdr->opcode);
1086 index = __le16_to_cpu(hdr->index);
1087 len = __le16_to_cpu(hdr->len);
1089 if (len != msglen - sizeof(*hdr)) {
1094 if (opcode >= chan->handler_count ||
1095 chan->handlers[opcode].func == NULL) {
1096 BT_DBG("Unknown op %u", opcode);
1097 err = mgmt_cmd_status(sk, index, opcode,
1098 MGMT_STATUS_UNKNOWN_COMMAND);
1102 handler = &chan->handlers[opcode];
1104 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1105 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1106 err = mgmt_cmd_status(sk, index, opcode,
1107 MGMT_STATUS_PERMISSION_DENIED);
1111 if (index != MGMT_INDEX_NONE) {
1112 hdev = hci_dev_get(index);
1114 err = mgmt_cmd_status(sk, index, opcode,
1115 MGMT_STATUS_INVALID_INDEX);
1119 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1120 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1121 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1122 err = mgmt_cmd_status(sk, index, opcode,
1123 MGMT_STATUS_INVALID_INDEX);
1127 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1128 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1129 err = mgmt_cmd_status(sk, index, opcode,
1130 MGMT_STATUS_INVALID_INDEX);
1135 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1136 if (no_hdev != !hdev) {
1137 err = mgmt_cmd_status(sk, index, opcode,
1138 MGMT_STATUS_INVALID_INDEX);
1142 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1143 if ((var_len && len < handler->data_len) ||
1144 (!var_len && len != handler->data_len)) {
1145 err = mgmt_cmd_status(sk, index, opcode,
1146 MGMT_STATUS_INVALID_PARAMS);
1150 if (hdev && chan->hdev_init)
1151 chan->hdev_init(sk, hdev);
1153 cp = buf + sizeof(*hdr);
1155 err = handler->func(sk, hdev, cp, len);
1169 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1172 struct sock *sk = sock->sk;
1173 struct hci_mgmt_chan *chan;
1174 struct hci_dev *hdev;
1175 struct sk_buff *skb;
1178 BT_DBG("sock %p sk %p", sock, sk);
1180 if (msg->msg_flags & MSG_OOB)
1183 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1187 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1192 switch (hci_pi(sk)->channel) {
1193 case HCI_CHANNEL_RAW:
1194 case HCI_CHANNEL_USER:
1196 case HCI_CHANNEL_MONITOR:
1200 mutex_lock(&mgmt_chan_list_lock);
1201 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1203 err = hci_mgmt_cmd(chan, sk, msg, len);
1207 mutex_unlock(&mgmt_chan_list_lock);
1211 hdev = hci_hdev_from_sock(sk);
1213 err = PTR_ERR(hdev);
1217 if (!test_bit(HCI_UP, &hdev->flags)) {
1222 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1226 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1231 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1234 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1235 /* No permission check is needed for user channel
1236 * since that gets enforced when binding the socket.
1238 * However check that the packet type is valid.
1240 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1241 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1242 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1247 skb_queue_tail(&hdev->raw_q, skb);
1248 queue_work(hdev->workqueue, &hdev->tx_work);
1249 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1250 u16 opcode = get_unaligned_le16(skb->data);
1251 u16 ogf = hci_opcode_ogf(opcode);
1252 u16 ocf = hci_opcode_ocf(opcode);
1254 if (((ogf > HCI_SFLT_MAX_OGF) ||
1255 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1256 &hci_sec_filter.ocf_mask[ogf])) &&
1257 !capable(CAP_NET_RAW)) {
1263 skb_queue_tail(&hdev->raw_q, skb);
1264 queue_work(hdev->workqueue, &hdev->tx_work);
1266 /* Stand-alone HCI commands must be flagged as
1267 * single-command requests.
1269 bt_cb(skb)->hci.req_start = true;
1271 skb_queue_tail(&hdev->cmd_q, skb);
1272 queue_work(hdev->workqueue, &hdev->cmd_work);
1275 if (!capable(CAP_NET_RAW)) {
1280 if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1281 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1286 skb_queue_tail(&hdev->raw_q, skb);
1287 queue_work(hdev->workqueue, &hdev->tx_work);
1301 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1302 char __user *optval, unsigned int len)
1304 struct hci_ufilter uf = { .opcode = 0 };
1305 struct sock *sk = sock->sk;
1306 int err = 0, opt = 0;
1308 BT_DBG("sk %p, opt %d", sk, optname);
1312 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1319 if (get_user(opt, (int __user *)optval)) {
1325 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1327 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1330 case HCI_TIME_STAMP:
1331 if (get_user(opt, (int __user *)optval)) {
1337 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1339 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1344 struct hci_filter *f = &hci_pi(sk)->filter;
1346 uf.type_mask = f->type_mask;
1347 uf.opcode = f->opcode;
1348 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1349 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1352 len = min_t(unsigned int, len, sizeof(uf));
1353 if (copy_from_user(&uf, optval, len)) {
1358 if (!capable(CAP_NET_RAW)) {
1359 uf.type_mask &= hci_sec_filter.type_mask;
1360 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1361 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1365 struct hci_filter *f = &hci_pi(sk)->filter;
1367 f->type_mask = uf.type_mask;
1368 f->opcode = uf.opcode;
1369 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1370 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1384 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1385 char __user *optval, int __user *optlen)
1387 struct hci_ufilter uf;
1388 struct sock *sk = sock->sk;
1389 int len, opt, err = 0;
1391 BT_DBG("sk %p, opt %d", sk, optname);
1393 if (get_user(len, optlen))
1398 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1405 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1410 if (put_user(opt, optval))
1414 case HCI_TIME_STAMP:
1415 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1420 if (put_user(opt, optval))
1426 struct hci_filter *f = &hci_pi(sk)->filter;
1428 memset(&uf, 0, sizeof(uf));
1429 uf.type_mask = f->type_mask;
1430 uf.opcode = f->opcode;
1431 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1432 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1435 len = min_t(unsigned int, len, sizeof(uf));
1436 if (copy_to_user(optval, &uf, len))
1450 static const struct proto_ops hci_sock_ops = {
1451 .family = PF_BLUETOOTH,
1452 .owner = THIS_MODULE,
1453 .release = hci_sock_release,
1454 .bind = hci_sock_bind,
1455 .getname = hci_sock_getname,
1456 .sendmsg = hci_sock_sendmsg,
1457 .recvmsg = hci_sock_recvmsg,
1458 .ioctl = hci_sock_ioctl,
1459 .poll = datagram_poll,
1460 .listen = sock_no_listen,
1461 .shutdown = sock_no_shutdown,
1462 .setsockopt = hci_sock_setsockopt,
1463 .getsockopt = hci_sock_getsockopt,
1464 .connect = sock_no_connect,
1465 .socketpair = sock_no_socketpair,
1466 .accept = sock_no_accept,
1467 .mmap = sock_no_mmap
1470 static struct proto hci_sk_proto = {
1472 .owner = THIS_MODULE,
1473 .obj_size = sizeof(struct hci_pinfo)
1476 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1481 BT_DBG("sock %p", sock);
1483 if (sock->type != SOCK_RAW)
1484 return -ESOCKTNOSUPPORT;
1486 sock->ops = &hci_sock_ops;
1488 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1492 sock_init_data(sock, sk);
1494 sock_reset_flag(sk, SOCK_ZAPPED);
1496 sk->sk_protocol = protocol;
1498 sock->state = SS_UNCONNECTED;
1499 sk->sk_state = BT_OPEN;
1501 bt_sock_link(&hci_sk_list, sk);
1505 static const struct net_proto_family hci_sock_family_ops = {
1506 .family = PF_BLUETOOTH,
1507 .owner = THIS_MODULE,
1508 .create = hci_sock_create,
1511 int __init hci_sock_init(void)
1515 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1517 err = proto_register(&hci_sk_proto, 0);
1521 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1523 BT_ERR("HCI socket registration failed");
1527 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1529 BT_ERR("Failed to create HCI proc file");
1530 bt_sock_unregister(BTPROTO_HCI);
1534 BT_INFO("HCI socket layer initialized");
1539 proto_unregister(&hci_sk_proto);
1543 void hci_sock_cleanup(void)
1545 bt_procfs_cleanup(&init_net, "hci");
1546 bt_sock_unregister(BTPROTO_HCI);
1547 proto_unregister(&hci_sk_proto);