2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
42 static DEFINE_IDA(sock_cookie_ida);
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter;
56 unsigned short channel;
59 char comm[TASK_COMM_LEN];
62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64 struct hci_dev *hdev = hci_pi(sk)->hdev;
67 return ERR_PTR(-EBADFD);
68 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69 return ERR_PTR(-EPIPE);
73 void hci_sock_set_flag(struct sock *sk, int nr)
75 set_bit(nr, &hci_pi(sk)->flags);
78 void hci_sock_clear_flag(struct sock *sk, int nr)
80 clear_bit(nr, &hci_pi(sk)->flags);
83 int hci_sock_test_flag(struct sock *sk, int nr)
85 return test_bit(nr, &hci_pi(sk)->flags);
88 unsigned short hci_sock_get_channel(struct sock *sk)
90 return hci_pi(sk)->channel;
93 u32 hci_sock_get_cookie(struct sock *sk)
95 return hci_pi(sk)->cookie;
98 static bool hci_sock_gen_cookie(struct sock *sk)
100 int id = hci_pi(sk)->cookie;
103 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
107 hci_pi(sk)->cookie = id;
108 get_task_comm(hci_pi(sk)->comm, current);
115 static void hci_sock_free_cookie(struct sock *sk)
117 int id = hci_pi(sk)->cookie;
120 hci_pi(sk)->cookie = 0xffffffff;
121 ida_simple_remove(&sock_cookie_ida, id);
125 static inline int hci_test_bit(int nr, const void *addr)
127 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF 5
133 struct hci_sec_filter {
136 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
139 static const struct hci_sec_filter hci_sec_filter = {
143 { 0x1000d9fe, 0x0000b00c },
148 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149 /* OGF_LINK_POLICY */
150 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
152 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155 /* OGF_STATUS_PARAM */
156 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
160 static struct bt_sock_list hci_sk_list = {
161 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166 struct hci_filter *flt;
167 int flt_type, flt_event;
170 flt = &hci_pi(sk)->filter;
172 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174 if (!test_bit(flt_type, &flt->type_mask))
177 /* Extra filter for event packets only */
178 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
181 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183 if (!hci_test_bit(flt_event, &flt->event_mask))
186 /* Check filter only when opcode is set */
190 if (flt_event == HCI_EV_CMD_COMPLETE &&
191 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
194 if (flt_event == HCI_EV_CMD_STATUS &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
201 /* Send frame to RAW socket */
202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
205 struct sk_buff *skb_copy = NULL;
207 BT_DBG("hdev %p len %d", hdev, skb->len);
209 read_lock(&hci_sk_list.lock);
211 sk_for_each(sk, &hci_sk_list.head) {
212 struct sk_buff *nskb;
214 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
217 /* Don't send frame to the socket it came from */
221 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
227 if (is_filtered_packet(sk, skb))
229 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230 if (!bt_cb(skb)->incoming)
232 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
237 /* Don't send frame to other channel types */
242 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
247 /* Put type byte before the data */
248 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
251 nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 if (sock_queue_rcv_skb(sk, nskb))
259 read_unlock(&hci_sk_list.lock);
264 /* Send frame to sockets with specific channel */
265 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
266 int flag, struct sock *skip_sk)
270 BT_DBG("channel %u len %d", channel, skb->len);
272 sk_for_each(sk, &hci_sk_list.head) {
273 struct sk_buff *nskb;
275 /* Ignore socket without the flag set */
276 if (!hci_sock_test_flag(sk, flag))
279 /* Skip the original socket */
283 if (sk->sk_state != BT_BOUND)
286 if (hci_pi(sk)->channel != channel)
289 nskb = skb_clone(skb, GFP_ATOMIC);
293 if (sock_queue_rcv_skb(sk, nskb))
299 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
300 int flag, struct sock *skip_sk)
302 read_lock(&hci_sk_list.lock);
303 __hci_send_to_channel(channel, skb, flag, skip_sk);
304 read_unlock(&hci_sk_list.lock);
307 /* Send frame to monitor socket */
308 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
310 struct sk_buff *skb_copy = NULL;
311 struct hci_mon_hdr *hdr;
314 if (!atomic_read(&monitor_promisc))
317 BT_DBG("hdev %p len %d", hdev, skb->len);
319 switch (hci_skb_pkt_type(skb)) {
320 case HCI_COMMAND_PKT:
321 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
324 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
326 case HCI_ACLDATA_PKT:
327 if (bt_cb(skb)->incoming)
328 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
330 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
332 case HCI_SCODATA_PKT:
333 if (bt_cb(skb)->incoming)
334 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
336 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
339 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
345 /* Create a private copy with headroom */
346 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
350 /* Put header before the data */
351 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
352 hdr->opcode = opcode;
353 hdr->index = cpu_to_le16(hdev->id);
354 hdr->len = cpu_to_le16(skb->len);
356 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
357 HCI_SOCK_TRUSTED, NULL);
361 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
362 void *data, u16 data_len, ktime_t tstamp,
363 int flag, struct sock *skip_sk)
369 index = cpu_to_le16(hdev->id);
371 index = cpu_to_le16(MGMT_INDEX_NONE);
373 read_lock(&hci_sk_list.lock);
375 sk_for_each(sk, &hci_sk_list.head) {
376 struct hci_mon_hdr *hdr;
379 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
382 /* Ignore socket without the flag set */
383 if (!hci_sock_test_flag(sk, flag))
386 /* Skip the original socket */
390 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
394 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
395 put_unaligned_le16(event, skb_put(skb, 2));
398 skb_put_data(skb, data, data_len);
400 skb->tstamp = tstamp;
402 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
403 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
405 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
407 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
408 HCI_SOCK_TRUSTED, NULL);
412 read_unlock(&hci_sk_list.lock);
415 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
417 struct hci_mon_hdr *hdr;
418 struct hci_mon_new_index *ni;
419 struct hci_mon_index_info *ii;
425 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
429 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
430 ni->type = hdev->dev_type;
432 bacpy(&ni->bdaddr, &hdev->bdaddr);
433 memcpy(ni->name, hdev->name, 8);
435 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
439 skb = bt_skb_alloc(0, GFP_ATOMIC);
443 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
447 if (hdev->manufacturer == 0xffff)
453 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
457 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
458 bacpy(&ii->bdaddr, &hdev->bdaddr);
459 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
461 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
465 skb = bt_skb_alloc(0, GFP_ATOMIC);
469 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
473 skb = bt_skb_alloc(0, GFP_ATOMIC);
477 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
484 __net_timestamp(skb);
486 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
487 hdr->opcode = opcode;
488 hdr->index = cpu_to_le16(hdev->id);
489 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
494 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
496 struct hci_mon_hdr *hdr;
502 /* No message needed when cookie is not present */
503 if (!hci_pi(sk)->cookie)
506 switch (hci_pi(sk)->channel) {
507 case HCI_CHANNEL_RAW:
509 ver[0] = BT_SUBSYS_VERSION;
510 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
512 case HCI_CHANNEL_USER:
514 ver[0] = BT_SUBSYS_VERSION;
515 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
517 case HCI_CHANNEL_CONTROL:
519 mgmt_fill_version_info(ver);
522 /* No message for unsupported format */
526 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
530 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
532 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
533 put_unaligned_le16(format, skb_put(skb, 2));
534 skb_put_data(skb, ver, sizeof(ver));
535 put_unaligned_le32(flags, skb_put(skb, 4));
536 skb_put_u8(skb, TASK_COMM_LEN);
537 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
539 __net_timestamp(skb);
541 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
542 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
543 if (hci_pi(sk)->hdev)
544 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
546 hdr->index = cpu_to_le16(HCI_DEV_NONE);
547 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
552 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
554 struct hci_mon_hdr *hdr;
557 /* No message needed when cookie is not present */
558 if (!hci_pi(sk)->cookie)
561 switch (hci_pi(sk)->channel) {
562 case HCI_CHANNEL_RAW:
563 case HCI_CHANNEL_USER:
564 case HCI_CHANNEL_CONTROL:
567 /* No message for unsupported format */
571 skb = bt_skb_alloc(4, GFP_ATOMIC);
575 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
577 __net_timestamp(skb);
579 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
580 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
581 if (hci_pi(sk)->hdev)
582 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
584 hdr->index = cpu_to_le16(HCI_DEV_NONE);
585 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
590 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
594 struct hci_mon_hdr *hdr;
597 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
601 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
602 put_unaligned_le16(opcode, skb_put(skb, 2));
605 skb_put_data(skb, buf, len);
607 __net_timestamp(skb);
609 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
610 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
611 hdr->index = cpu_to_le16(index);
612 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
617 static void __printf(2, 3)
618 send_monitor_note(struct sock *sk, const char *fmt, ...)
621 struct hci_mon_hdr *hdr;
626 len = vsnprintf(NULL, 0, fmt, args);
629 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
634 vsprintf(skb_put(skb, len), fmt, args);
635 *(u8 *)skb_put(skb, 1) = 0;
638 __net_timestamp(skb);
640 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
641 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
642 hdr->index = cpu_to_le16(HCI_DEV_NONE);
643 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
645 if (sock_queue_rcv_skb(sk, skb))
649 static void send_monitor_replay(struct sock *sk)
651 struct hci_dev *hdev;
653 read_lock(&hci_dev_list_lock);
655 list_for_each_entry(hdev, &hci_dev_list, list) {
658 skb = create_monitor_event(hdev, HCI_DEV_REG);
662 if (sock_queue_rcv_skb(sk, skb))
665 if (!test_bit(HCI_RUNNING, &hdev->flags))
668 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
672 if (sock_queue_rcv_skb(sk, skb))
675 if (test_bit(HCI_UP, &hdev->flags))
676 skb = create_monitor_event(hdev, HCI_DEV_UP);
677 else if (hci_dev_test_flag(hdev, HCI_SETUP))
678 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
683 if (sock_queue_rcv_skb(sk, skb))
688 read_unlock(&hci_dev_list_lock);
691 static void send_monitor_control_replay(struct sock *mon_sk)
695 read_lock(&hci_sk_list.lock);
697 sk_for_each(sk, &hci_sk_list.head) {
700 skb = create_monitor_ctrl_open(sk);
704 if (sock_queue_rcv_skb(mon_sk, skb))
708 read_unlock(&hci_sk_list.lock);
711 /* Generate internal stack event */
712 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
714 struct hci_event_hdr *hdr;
715 struct hci_ev_stack_internal *ev;
718 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
722 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
723 hdr->evt = HCI_EV_STACK_INTERNAL;
724 hdr->plen = sizeof(*ev) + dlen;
726 ev = skb_put(skb, sizeof(*ev) + dlen);
728 memcpy(ev->data, data, dlen);
730 bt_cb(skb)->incoming = 1;
731 __net_timestamp(skb);
733 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
734 hci_send_to_sock(hdev, skb);
738 void hci_sock_dev_event(struct hci_dev *hdev, int event)
740 BT_DBG("hdev %s event %d", hdev->name, event);
742 if (atomic_read(&monitor_promisc)) {
745 /* Send event to monitor */
746 skb = create_monitor_event(hdev, event);
748 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
749 HCI_SOCK_TRUSTED, NULL);
754 if (event <= HCI_DEV_DOWN) {
755 struct hci_ev_si_device ev;
757 /* Send event to sockets */
759 ev.dev_id = hdev->id;
760 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
763 if (event == HCI_DEV_UNREG) {
766 /* Wake up sockets using this dead device */
767 read_lock(&hci_sk_list.lock);
768 sk_for_each(sk, &hci_sk_list.head) {
769 if (hci_pi(sk)->hdev == hdev) {
771 sk->sk_state_change(sk);
774 read_unlock(&hci_sk_list.lock);
778 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
780 struct hci_mgmt_chan *c;
782 list_for_each_entry(c, &mgmt_chan_list, list) {
783 if (c->channel == channel)
790 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
792 struct hci_mgmt_chan *c;
794 mutex_lock(&mgmt_chan_list_lock);
795 c = __hci_mgmt_chan_find(channel);
796 mutex_unlock(&mgmt_chan_list_lock);
801 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
803 if (c->channel < HCI_CHANNEL_CONTROL)
806 mutex_lock(&mgmt_chan_list_lock);
807 if (__hci_mgmt_chan_find(c->channel)) {
808 mutex_unlock(&mgmt_chan_list_lock);
812 list_add_tail(&c->list, &mgmt_chan_list);
814 mutex_unlock(&mgmt_chan_list_lock);
818 EXPORT_SYMBOL(hci_mgmt_chan_register);
820 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
822 mutex_lock(&mgmt_chan_list_lock);
824 mutex_unlock(&mgmt_chan_list_lock);
826 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
828 static int hci_sock_release(struct socket *sock)
830 struct sock *sk = sock->sk;
831 struct hci_dev *hdev;
834 BT_DBG("sock %p sk %p", sock, sk);
841 switch (hci_pi(sk)->channel) {
842 case HCI_CHANNEL_MONITOR:
843 atomic_dec(&monitor_promisc);
845 case HCI_CHANNEL_RAW:
846 case HCI_CHANNEL_USER:
847 case HCI_CHANNEL_CONTROL:
848 /* Send event to monitor */
849 skb = create_monitor_ctrl_close(sk);
851 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
852 HCI_SOCK_TRUSTED, NULL);
856 hci_sock_free_cookie(sk);
860 bt_sock_unlink(&hci_sk_list, sk);
862 hdev = hci_pi(sk)->hdev;
864 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
865 /* When releasing a user channel exclusive access,
866 * call hci_dev_do_close directly instead of calling
867 * hci_dev_close to ensure the exclusive access will
868 * be released and the controller brought back down.
870 * The checking of HCI_AUTO_OFF is not needed in this
871 * case since it will have been cleared already when
872 * opening the user channel.
874 hci_dev_do_close(hdev);
875 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
876 mgmt_index_added(hdev);
879 atomic_dec(&hdev->promisc);
885 skb_queue_purge(&sk->sk_receive_queue);
886 skb_queue_purge(&sk->sk_write_queue);
893 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
898 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
903 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
905 hci_dev_unlock(hdev);
910 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
915 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
920 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
922 hci_dev_unlock(hdev);
927 /* Ioctls that require bound socket */
928 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
931 struct hci_dev *hdev = hci_hdev_from_sock(sk);
934 return PTR_ERR(hdev);
936 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
939 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
942 if (hdev->dev_type != HCI_PRIMARY)
947 if (!capable(CAP_NET_ADMIN))
952 return hci_get_conn_info(hdev, (void __user *)arg);
955 return hci_get_auth_info(hdev, (void __user *)arg);
958 if (!capable(CAP_NET_ADMIN))
960 return hci_sock_blacklist_add(hdev, (void __user *)arg);
963 if (!capable(CAP_NET_ADMIN))
965 return hci_sock_blacklist_del(hdev, (void __user *)arg);
971 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
974 void __user *argp = (void __user *)arg;
975 struct sock *sk = sock->sk;
978 BT_DBG("cmd %x arg %lx", cmd, arg);
982 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
987 /* When calling an ioctl on an unbound raw socket, then ensure
988 * that the monitor gets informed. Ensure that the resulting event
989 * is only send once by checking if the cookie exists or not. The
990 * socket cookie will be only ever generated once for the lifetime
993 if (hci_sock_gen_cookie(sk)) {
996 if (capable(CAP_NET_ADMIN))
997 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
999 /* Send event to monitor */
1000 skb = create_monitor_ctrl_open(sk);
1002 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1003 HCI_SOCK_TRUSTED, NULL);
1012 return hci_get_dev_list(argp);
1015 return hci_get_dev_info(argp);
1017 case HCIGETCONNLIST:
1018 return hci_get_conn_list(argp);
1021 if (!capable(CAP_NET_ADMIN))
1023 return hci_dev_open(arg);
1026 if (!capable(CAP_NET_ADMIN))
1028 return hci_dev_close(arg);
1031 if (!capable(CAP_NET_ADMIN))
1033 return hci_dev_reset(arg);
1036 if (!capable(CAP_NET_ADMIN))
1038 return hci_dev_reset_stat(arg);
1045 case HCISETLINKMODE:
1048 if (!capable(CAP_NET_ADMIN))
1050 return hci_dev_cmd(cmd, argp);
1053 return hci_inquiry(argp);
1058 err = hci_sock_bound_ioctl(sk, cmd, arg);
1065 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1068 struct sockaddr_hci haddr;
1069 struct sock *sk = sock->sk;
1070 struct hci_dev *hdev = NULL;
1071 struct sk_buff *skb;
1074 BT_DBG("sock %p sk %p", sock, sk);
1079 memset(&haddr, 0, sizeof(haddr));
1080 len = min_t(unsigned int, sizeof(haddr), addr_len);
1081 memcpy(&haddr, addr, len);
1083 if (haddr.hci_family != AF_BLUETOOTH)
1088 /* Allow detaching from dead device and attaching to alive device, if
1089 * the caller wants to re-bind (instead of close) this socket in
1090 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1092 hdev = hci_pi(sk)->hdev;
1093 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1094 hci_pi(sk)->hdev = NULL;
1095 sk->sk_state = BT_OPEN;
1100 if (sk->sk_state == BT_BOUND) {
1105 switch (haddr.hci_channel) {
1106 case HCI_CHANNEL_RAW:
1107 if (hci_pi(sk)->hdev) {
1112 if (haddr.hci_dev != HCI_DEV_NONE) {
1113 hdev = hci_dev_get(haddr.hci_dev);
1119 atomic_inc(&hdev->promisc);
1122 hci_pi(sk)->channel = haddr.hci_channel;
1124 if (!hci_sock_gen_cookie(sk)) {
1125 /* In the case when a cookie has already been assigned,
1126 * then there has been already an ioctl issued against
1127 * an unbound socket and with that triggerd an open
1128 * notification. Send a close notification first to
1129 * allow the state transition to bounded.
1131 skb = create_monitor_ctrl_close(sk);
1133 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1134 HCI_SOCK_TRUSTED, NULL);
1139 if (capable(CAP_NET_ADMIN))
1140 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1142 hci_pi(sk)->hdev = hdev;
1144 /* Send event to monitor */
1145 skb = create_monitor_ctrl_open(sk);
1147 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1148 HCI_SOCK_TRUSTED, NULL);
1153 case HCI_CHANNEL_USER:
1154 if (hci_pi(sk)->hdev) {
1159 if (haddr.hci_dev == HCI_DEV_NONE) {
1164 if (!capable(CAP_NET_ADMIN)) {
1169 hdev = hci_dev_get(haddr.hci_dev);
1175 if (test_bit(HCI_INIT, &hdev->flags) ||
1176 hci_dev_test_flag(hdev, HCI_SETUP) ||
1177 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1178 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1179 test_bit(HCI_UP, &hdev->flags))) {
1185 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1191 mgmt_index_removed(hdev);
1193 err = hci_dev_open(hdev->id);
1195 if (err == -EALREADY) {
1196 /* In case the transport is already up and
1197 * running, clear the error here.
1199 * This can happen when opening a user
1200 * channel and HCI_AUTO_OFF grace period
1205 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1206 mgmt_index_added(hdev);
1212 hci_pi(sk)->channel = haddr.hci_channel;
1214 if (!hci_sock_gen_cookie(sk)) {
1215 /* In the case when a cookie has already been assigned,
1216 * this socket will transition from a raw socket into
1217 * a user channel socket. For a clean transition, send
1218 * the close notification first.
1220 skb = create_monitor_ctrl_close(sk);
1222 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1223 HCI_SOCK_TRUSTED, NULL);
1228 /* The user channel is restricted to CAP_NET_ADMIN
1229 * capabilities and with that implicitly trusted.
1231 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1233 hci_pi(sk)->hdev = hdev;
1235 /* Send event to monitor */
1236 skb = create_monitor_ctrl_open(sk);
1238 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1239 HCI_SOCK_TRUSTED, NULL);
1243 atomic_inc(&hdev->promisc);
1246 case HCI_CHANNEL_MONITOR:
1247 if (haddr.hci_dev != HCI_DEV_NONE) {
1252 if (!capable(CAP_NET_RAW)) {
1257 hci_pi(sk)->channel = haddr.hci_channel;
1259 /* The monitor interface is restricted to CAP_NET_RAW
1260 * capabilities and with that implicitly trusted.
1262 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1264 send_monitor_note(sk, "Linux version %s (%s)",
1265 init_utsname()->release,
1266 init_utsname()->machine);
1267 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1268 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1269 send_monitor_replay(sk);
1270 send_monitor_control_replay(sk);
1272 atomic_inc(&monitor_promisc);
1275 case HCI_CHANNEL_LOGGING:
1276 if (haddr.hci_dev != HCI_DEV_NONE) {
1281 if (!capable(CAP_NET_ADMIN)) {
1286 hci_pi(sk)->channel = haddr.hci_channel;
1290 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1295 if (haddr.hci_dev != HCI_DEV_NONE) {
1300 /* Users with CAP_NET_ADMIN capabilities are allowed
1301 * access to all management commands and events. For
1302 * untrusted users the interface is restricted and
1303 * also only untrusted events are sent.
1305 if (capable(CAP_NET_ADMIN))
1306 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1308 hci_pi(sk)->channel = haddr.hci_channel;
1310 /* At the moment the index and unconfigured index events
1311 * are enabled unconditionally. Setting them on each
1312 * socket when binding keeps this functionality. They
1313 * however might be cleared later and then sending of these
1314 * events will be disabled, but that is then intentional.
1316 * This also enables generic events that are safe to be
1317 * received by untrusted users. Example for such events
1318 * are changes to settings, class of device, name etc.
1320 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1321 if (!hci_sock_gen_cookie(sk)) {
1322 /* In the case when a cookie has already been
1323 * assigned, this socket will transtion from
1324 * a raw socket into a control socket. To
1325 * allow for a clean transtion, send the
1326 * close notification first.
1328 skb = create_monitor_ctrl_close(sk);
1330 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1331 HCI_SOCK_TRUSTED, NULL);
1336 /* Send event to monitor */
1337 skb = create_monitor_ctrl_open(sk);
1339 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1340 HCI_SOCK_TRUSTED, NULL);
1344 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1345 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1346 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1347 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1348 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1349 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1354 sk->sk_state = BT_BOUND;
1361 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1364 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1365 struct sock *sk = sock->sk;
1366 struct hci_dev *hdev;
1369 BT_DBG("sock %p sk %p", sock, sk);
1376 hdev = hci_hdev_from_sock(sk);
1378 err = PTR_ERR(hdev);
1382 haddr->hci_family = AF_BLUETOOTH;
1383 haddr->hci_dev = hdev->id;
1384 haddr->hci_channel= hci_pi(sk)->channel;
1385 err = sizeof(*haddr);
1392 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1393 struct sk_buff *skb)
1395 __u32 mask = hci_pi(sk)->cmsg_mask;
1397 if (mask & HCI_CMSG_DIR) {
1398 int incoming = bt_cb(skb)->incoming;
1399 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1403 if (mask & HCI_CMSG_TSTAMP) {
1404 #ifdef CONFIG_COMPAT
1405 struct compat_timeval ctv;
1411 skb_get_timestamp(skb, &tv);
1415 #ifdef CONFIG_COMPAT
1416 if (!COMPAT_USE_64BIT_TIME &&
1417 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1418 ctv.tv_sec = tv.tv_sec;
1419 ctv.tv_usec = tv.tv_usec;
1425 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1429 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1430 size_t len, int flags)
1432 int noblock = flags & MSG_DONTWAIT;
1433 struct sock *sk = sock->sk;
1434 struct sk_buff *skb;
1436 unsigned int skblen;
1438 BT_DBG("sock %p, sk %p", sock, sk);
1440 if (flags & MSG_OOB)
1443 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1446 if (sk->sk_state == BT_CLOSED)
1449 skb = skb_recv_datagram(sk, flags, noblock, &err);
1456 msg->msg_flags |= MSG_TRUNC;
1460 skb_reset_transport_header(skb);
1461 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1463 switch (hci_pi(sk)->channel) {
1464 case HCI_CHANNEL_RAW:
1465 hci_sock_cmsg(sk, msg, skb);
1467 case HCI_CHANNEL_USER:
1468 case HCI_CHANNEL_MONITOR:
1469 sock_recv_timestamp(msg, sk, skb);
1472 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1473 sock_recv_timestamp(msg, sk, skb);
1477 skb_free_datagram(sk, skb);
1479 if (flags & MSG_TRUNC)
1482 return err ? : copied;
1485 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1486 struct msghdr *msg, size_t msglen)
1490 struct mgmt_hdr *hdr;
1491 u16 opcode, index, len;
1492 struct hci_dev *hdev = NULL;
1493 const struct hci_mgmt_handler *handler;
1494 bool var_len, no_hdev;
1497 BT_DBG("got %zu bytes", msglen);
1499 if (msglen < sizeof(*hdr))
1502 buf = kmalloc(msglen, GFP_KERNEL);
1506 if (memcpy_from_msg(buf, msg, msglen)) {
1512 opcode = __le16_to_cpu(hdr->opcode);
1513 index = __le16_to_cpu(hdr->index);
1514 len = __le16_to_cpu(hdr->len);
1516 if (len != msglen - sizeof(*hdr)) {
1521 if (chan->channel == HCI_CHANNEL_CONTROL) {
1522 struct sk_buff *skb;
1524 /* Send event to monitor */
1525 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1526 buf + sizeof(*hdr));
1528 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1529 HCI_SOCK_TRUSTED, NULL);
1534 if (opcode >= chan->handler_count ||
1535 chan->handlers[opcode].func == NULL) {
1536 BT_DBG("Unknown op %u", opcode);
1537 err = mgmt_cmd_status(sk, index, opcode,
1538 MGMT_STATUS_UNKNOWN_COMMAND);
1542 handler = &chan->handlers[opcode];
1544 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1545 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1546 err = mgmt_cmd_status(sk, index, opcode,
1547 MGMT_STATUS_PERMISSION_DENIED);
1551 if (index != MGMT_INDEX_NONE) {
1552 hdev = hci_dev_get(index);
1554 err = mgmt_cmd_status(sk, index, opcode,
1555 MGMT_STATUS_INVALID_INDEX);
1559 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1560 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1561 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1562 err = mgmt_cmd_status(sk, index, opcode,
1563 MGMT_STATUS_INVALID_INDEX);
1567 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1568 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1569 err = mgmt_cmd_status(sk, index, opcode,
1570 MGMT_STATUS_INVALID_INDEX);
1575 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1576 if (no_hdev != !hdev) {
1577 err = mgmt_cmd_status(sk, index, opcode,
1578 MGMT_STATUS_INVALID_INDEX);
1582 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1583 if ((var_len && len < handler->data_len) ||
1584 (!var_len && len != handler->data_len)) {
1585 err = mgmt_cmd_status(sk, index, opcode,
1586 MGMT_STATUS_INVALID_PARAMS);
1590 if (hdev && chan->hdev_init)
1591 chan->hdev_init(sk, hdev);
1593 cp = buf + sizeof(*hdr);
1595 err = handler->func(sk, hdev, cp, len);
1609 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1611 struct hci_mon_hdr *hdr;
1612 struct sk_buff *skb;
1613 struct hci_dev *hdev;
1617 /* The logging frame consists at minimum of the standard header,
1618 * the priority byte, the ident length byte and at least one string
1619 * terminator NUL byte. Anything shorter are invalid packets.
1621 if (len < sizeof(*hdr) + 3)
1624 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1628 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1633 hdr = (void *)skb->data;
1635 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1640 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1641 __u8 priority = skb->data[sizeof(*hdr)];
1642 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1644 /* Only the priorities 0-7 are valid and with that any other
1645 * value results in an invalid packet.
1647 * The priority byte is followed by an ident length byte and
1648 * the NUL terminated ident string. Check that the ident
1649 * length is not overflowing the packet and also that the
1650 * ident string itself is NUL terminated. In case the ident
1651 * length is zero, the length value actually doubles as NUL
1652 * terminator identifier.
1654 * The message follows the ident string (if present) and
1655 * must be NUL terminated. Otherwise it is not a valid packet.
1657 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1658 ident_len > len - sizeof(*hdr) - 3 ||
1659 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1668 index = __le16_to_cpu(hdr->index);
1670 if (index != MGMT_INDEX_NONE) {
1671 hdev = hci_dev_get(index);
1680 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1682 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1693 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1696 struct sock *sk = sock->sk;
1697 struct hci_mgmt_chan *chan;
1698 struct hci_dev *hdev;
1699 struct sk_buff *skb;
1702 BT_DBG("sock %p sk %p", sock, sk);
1704 if (msg->msg_flags & MSG_OOB)
1707 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1711 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1716 switch (hci_pi(sk)->channel) {
1717 case HCI_CHANNEL_RAW:
1718 case HCI_CHANNEL_USER:
1720 case HCI_CHANNEL_MONITOR:
1723 case HCI_CHANNEL_LOGGING:
1724 err = hci_logging_frame(sk, msg, len);
1727 mutex_lock(&mgmt_chan_list_lock);
1728 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1730 err = hci_mgmt_cmd(chan, sk, msg, len);
1734 mutex_unlock(&mgmt_chan_list_lock);
1738 hdev = hci_hdev_from_sock(sk);
1740 err = PTR_ERR(hdev);
1744 if (!test_bit(HCI_UP, &hdev->flags)) {
1749 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1753 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1758 hci_skb_pkt_type(skb) = skb->data[0];
1761 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1762 /* No permission check is needed for user channel
1763 * since that gets enforced when binding the socket.
1765 * However check that the packet type is valid.
1767 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1768 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1769 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1774 skb_queue_tail(&hdev->raw_q, skb);
1775 queue_work(hdev->workqueue, &hdev->tx_work);
1776 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1777 u16 opcode = get_unaligned_le16(skb->data);
1778 u16 ogf = hci_opcode_ogf(opcode);
1779 u16 ocf = hci_opcode_ocf(opcode);
1781 if (((ogf > HCI_SFLT_MAX_OGF) ||
1782 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1783 &hci_sec_filter.ocf_mask[ogf])) &&
1784 !capable(CAP_NET_RAW)) {
1789 /* Since the opcode has already been extracted here, store
1790 * a copy of the value for later use by the drivers.
1792 hci_skb_opcode(skb) = opcode;
1795 skb_queue_tail(&hdev->raw_q, skb);
1796 queue_work(hdev->workqueue, &hdev->tx_work);
1798 /* Stand-alone HCI commands must be flagged as
1799 * single-command requests.
1801 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1803 skb_queue_tail(&hdev->cmd_q, skb);
1804 queue_work(hdev->workqueue, &hdev->cmd_work);
1807 if (!capable(CAP_NET_RAW)) {
1812 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1813 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1818 skb_queue_tail(&hdev->raw_q, skb);
1819 queue_work(hdev->workqueue, &hdev->tx_work);
1833 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1834 char __user *optval, unsigned int len)
1836 struct hci_ufilter uf = { .opcode = 0 };
1837 struct sock *sk = sock->sk;
1838 int err = 0, opt = 0;
1840 BT_DBG("sk %p, opt %d", sk, optname);
1842 if (level != SOL_HCI)
1843 return -ENOPROTOOPT;
1847 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1854 if (get_user(opt, (int __user *)optval)) {
1860 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1862 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1865 case HCI_TIME_STAMP:
1866 if (get_user(opt, (int __user *)optval)) {
1872 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1874 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1879 struct hci_filter *f = &hci_pi(sk)->filter;
1881 uf.type_mask = f->type_mask;
1882 uf.opcode = f->opcode;
1883 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1884 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1887 len = min_t(unsigned int, len, sizeof(uf));
1888 if (copy_from_user(&uf, optval, len)) {
1893 if (!capable(CAP_NET_RAW)) {
1894 uf.type_mask &= hci_sec_filter.type_mask;
1895 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1896 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1900 struct hci_filter *f = &hci_pi(sk)->filter;
1902 f->type_mask = uf.type_mask;
1903 f->opcode = uf.opcode;
1904 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1905 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1919 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1920 char __user *optval, int __user *optlen)
1922 struct hci_ufilter uf;
1923 struct sock *sk = sock->sk;
1924 int len, opt, err = 0;
1926 BT_DBG("sk %p, opt %d", sk, optname);
1928 if (level != SOL_HCI)
1929 return -ENOPROTOOPT;
1931 if (get_user(len, optlen))
1936 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1943 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1948 if (put_user(opt, optval))
1952 case HCI_TIME_STAMP:
1953 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1958 if (put_user(opt, optval))
1964 struct hci_filter *f = &hci_pi(sk)->filter;
1966 memset(&uf, 0, sizeof(uf));
1967 uf.type_mask = f->type_mask;
1968 uf.opcode = f->opcode;
1969 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1970 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1973 len = min_t(unsigned int, len, sizeof(uf));
1974 if (copy_to_user(optval, &uf, len))
1988 static const struct proto_ops hci_sock_ops = {
1989 .family = PF_BLUETOOTH,
1990 .owner = THIS_MODULE,
1991 .release = hci_sock_release,
1992 .bind = hci_sock_bind,
1993 .getname = hci_sock_getname,
1994 .sendmsg = hci_sock_sendmsg,
1995 .recvmsg = hci_sock_recvmsg,
1996 .ioctl = hci_sock_ioctl,
1997 .poll = datagram_poll,
1998 .listen = sock_no_listen,
1999 .shutdown = sock_no_shutdown,
2000 .setsockopt = hci_sock_setsockopt,
2001 .getsockopt = hci_sock_getsockopt,
2002 .connect = sock_no_connect,
2003 .socketpair = sock_no_socketpair,
2004 .accept = sock_no_accept,
2005 .mmap = sock_no_mmap
2008 static struct proto hci_sk_proto = {
2010 .owner = THIS_MODULE,
2011 .obj_size = sizeof(struct hci_pinfo)
2014 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2019 BT_DBG("sock %p", sock);
2021 if (sock->type != SOCK_RAW)
2022 return -ESOCKTNOSUPPORT;
2024 sock->ops = &hci_sock_ops;
2026 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2030 sock_init_data(sock, sk);
2032 sock_reset_flag(sk, SOCK_ZAPPED);
2034 sk->sk_protocol = protocol;
2036 sock->state = SS_UNCONNECTED;
2037 sk->sk_state = BT_OPEN;
2039 bt_sock_link(&hci_sk_list, sk);
2043 static const struct net_proto_family hci_sock_family_ops = {
2044 .family = PF_BLUETOOTH,
2045 .owner = THIS_MODULE,
2046 .create = hci_sock_create,
2049 int __init hci_sock_init(void)
2053 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2055 err = proto_register(&hci_sk_proto, 0);
2059 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2061 BT_ERR("HCI socket registration failed");
2065 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2067 BT_ERR("Failed to create HCI proc file");
2068 bt_sock_unregister(BTPROTO_HCI);
2072 BT_INFO("HCI socket layer initialized");
2077 proto_unregister(&hci_sk_proto);
2081 void hci_sock_cleanup(void)
2083 bt_procfs_cleanup(&init_net, "hci");
2084 bt_sock_unregister(BTPROTO_HCI);
2085 proto_unregister(&hci_sk_proto);