2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
42 static DEFINE_IDA(sock_cookie_ida);
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter;
56 unsigned short channel;
59 char comm[TASK_COMM_LEN];
62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64 struct hci_dev *hdev = hci_pi(sk)->hdev;
67 return ERR_PTR(-EBADFD);
68 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69 return ERR_PTR(-EPIPE);
73 void hci_sock_set_flag(struct sock *sk, int nr)
75 set_bit(nr, &hci_pi(sk)->flags);
78 void hci_sock_clear_flag(struct sock *sk, int nr)
80 clear_bit(nr, &hci_pi(sk)->flags);
83 int hci_sock_test_flag(struct sock *sk, int nr)
85 return test_bit(nr, &hci_pi(sk)->flags);
88 unsigned short hci_sock_get_channel(struct sock *sk)
90 return hci_pi(sk)->channel;
93 u32 hci_sock_get_cookie(struct sock *sk)
95 return hci_pi(sk)->cookie;
98 static bool hci_sock_gen_cookie(struct sock *sk)
100 int id = hci_pi(sk)->cookie;
103 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
107 hci_pi(sk)->cookie = id;
108 get_task_comm(hci_pi(sk)->comm, current);
115 static void hci_sock_free_cookie(struct sock *sk)
117 int id = hci_pi(sk)->cookie;
120 hci_pi(sk)->cookie = 0xffffffff;
121 ida_simple_remove(&sock_cookie_ida, id);
125 static inline int hci_test_bit(int nr, const void *addr)
127 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF 5
133 struct hci_sec_filter {
136 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
139 static const struct hci_sec_filter hci_sec_filter = {
143 { 0x1000d9fe, 0x0000b00c },
148 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149 /* OGF_LINK_POLICY */
150 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
152 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155 /* OGF_STATUS_PARAM */
156 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
160 static struct bt_sock_list hci_sk_list = {
161 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166 struct hci_filter *flt;
167 int flt_type, flt_event;
170 flt = &hci_pi(sk)->filter;
172 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174 if (!test_bit(flt_type, &flt->type_mask))
177 /* Extra filter for event packets only */
178 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
181 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183 if (!hci_test_bit(flt_event, &flt->event_mask))
186 /* Check filter only when opcode is set */
190 if (flt_event == HCI_EV_CMD_COMPLETE &&
191 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
194 if (flt_event == HCI_EV_CMD_STATUS &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
201 /* Send frame to RAW socket */
202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
205 struct sk_buff *skb_copy = NULL;
207 BT_DBG("hdev %p len %d", hdev, skb->len);
209 read_lock(&hci_sk_list.lock);
211 sk_for_each(sk, &hci_sk_list.head) {
212 struct sk_buff *nskb;
214 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
217 /* Don't send frame to the socket it came from */
221 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
227 if (is_filtered_packet(sk, skb))
229 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230 if (!bt_cb(skb)->incoming)
232 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
237 /* Don't send frame to other channel types */
242 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
247 /* Put type byte before the data */
248 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
251 nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 if (sock_queue_rcv_skb(sk, nskb))
259 read_unlock(&hci_sk_list.lock);
264 /* Send frame to sockets with specific channel */
265 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
266 int flag, struct sock *skip_sk)
270 BT_DBG("channel %u len %d", channel, skb->len);
272 read_lock(&hci_sk_list.lock);
274 sk_for_each(sk, &hci_sk_list.head) {
275 struct sk_buff *nskb;
277 /* Ignore socket without the flag set */
278 if (!hci_sock_test_flag(sk, flag))
281 /* Skip the original socket */
285 if (sk->sk_state != BT_BOUND)
288 if (hci_pi(sk)->channel != channel)
291 nskb = skb_clone(skb, GFP_ATOMIC);
295 if (sock_queue_rcv_skb(sk, nskb))
299 read_unlock(&hci_sk_list.lock);
302 /* Send frame to monitor socket */
303 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
305 struct sk_buff *skb_copy = NULL;
306 struct hci_mon_hdr *hdr;
309 if (!atomic_read(&monitor_promisc))
312 BT_DBG("hdev %p len %d", hdev, skb->len);
314 switch (hci_skb_pkt_type(skb)) {
315 case HCI_COMMAND_PKT:
316 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
319 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
321 case HCI_ACLDATA_PKT:
322 if (bt_cb(skb)->incoming)
323 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
325 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
327 case HCI_SCODATA_PKT:
328 if (bt_cb(skb)->incoming)
329 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
331 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
334 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
340 /* Create a private copy with headroom */
341 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
345 /* Put header before the data */
346 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
347 hdr->opcode = opcode;
348 hdr->index = cpu_to_le16(hdev->id);
349 hdr->len = cpu_to_le16(skb->len);
351 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
352 HCI_SOCK_TRUSTED, NULL);
356 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
357 void *data, u16 data_len, ktime_t tstamp,
358 int flag, struct sock *skip_sk)
364 index = cpu_to_le16(hdev->id);
366 index = cpu_to_le16(MGMT_INDEX_NONE);
368 read_lock(&hci_sk_list.lock);
370 sk_for_each(sk, &hci_sk_list.head) {
371 struct hci_mon_hdr *hdr;
374 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
377 /* Ignore socket without the flag set */
378 if (!hci_sock_test_flag(sk, flag))
381 /* Skip the original socket */
385 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
389 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
390 put_unaligned_le16(event, skb_put(skb, 2));
393 skb_put_data(skb, data, data_len);
395 skb->tstamp = tstamp;
397 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
398 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
400 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
402 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
403 HCI_SOCK_TRUSTED, NULL);
407 read_unlock(&hci_sk_list.lock);
410 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
412 struct hci_mon_hdr *hdr;
413 struct hci_mon_new_index *ni;
414 struct hci_mon_index_info *ii;
420 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
424 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
425 ni->type = hdev->dev_type;
427 bacpy(&ni->bdaddr, &hdev->bdaddr);
428 memcpy(ni->name, hdev->name, 8);
430 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
434 skb = bt_skb_alloc(0, GFP_ATOMIC);
438 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
442 if (hdev->manufacturer == 0xffff)
448 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
452 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
453 bacpy(&ii->bdaddr, &hdev->bdaddr);
454 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
456 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
460 skb = bt_skb_alloc(0, GFP_ATOMIC);
464 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
468 skb = bt_skb_alloc(0, GFP_ATOMIC);
472 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
479 __net_timestamp(skb);
481 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
482 hdr->opcode = opcode;
483 hdr->index = cpu_to_le16(hdev->id);
484 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
489 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
491 struct hci_mon_hdr *hdr;
497 /* No message needed when cookie is not present */
498 if (!hci_pi(sk)->cookie)
501 switch (hci_pi(sk)->channel) {
502 case HCI_CHANNEL_RAW:
504 ver[0] = BT_SUBSYS_VERSION;
505 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
507 case HCI_CHANNEL_USER:
509 ver[0] = BT_SUBSYS_VERSION;
510 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
512 case HCI_CHANNEL_CONTROL:
514 mgmt_fill_version_info(ver);
517 /* No message for unsupported format */
521 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
525 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
527 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
528 put_unaligned_le16(format, skb_put(skb, 2));
529 skb_put_data(skb, ver, sizeof(ver));
530 put_unaligned_le32(flags, skb_put(skb, 4));
531 skb_put_u8(skb, TASK_COMM_LEN);
532 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
534 __net_timestamp(skb);
536 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
537 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
538 if (hci_pi(sk)->hdev)
539 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
541 hdr->index = cpu_to_le16(HCI_DEV_NONE);
542 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
547 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
549 struct hci_mon_hdr *hdr;
552 /* No message needed when cookie is not present */
553 if (!hci_pi(sk)->cookie)
556 switch (hci_pi(sk)->channel) {
557 case HCI_CHANNEL_RAW:
558 case HCI_CHANNEL_USER:
559 case HCI_CHANNEL_CONTROL:
562 /* No message for unsupported format */
566 skb = bt_skb_alloc(4, GFP_ATOMIC);
570 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
572 __net_timestamp(skb);
574 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
575 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
576 if (hci_pi(sk)->hdev)
577 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
579 hdr->index = cpu_to_le16(HCI_DEV_NONE);
580 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
585 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
589 struct hci_mon_hdr *hdr;
592 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
596 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
597 put_unaligned_le16(opcode, skb_put(skb, 2));
600 skb_put_data(skb, buf, len);
602 __net_timestamp(skb);
604 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
605 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
606 hdr->index = cpu_to_le16(index);
607 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
612 static void __printf(2, 3)
613 send_monitor_note(struct sock *sk, const char *fmt, ...)
616 struct hci_mon_hdr *hdr;
621 len = vsnprintf(NULL, 0, fmt, args);
624 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
629 vsprintf(skb_put(skb, len), fmt, args);
630 *(u8 *)skb_put(skb, 1) = 0;
633 __net_timestamp(skb);
635 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
636 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
637 hdr->index = cpu_to_le16(HCI_DEV_NONE);
638 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
640 if (sock_queue_rcv_skb(sk, skb))
644 static void send_monitor_replay(struct sock *sk)
646 struct hci_dev *hdev;
648 read_lock(&hci_dev_list_lock);
650 list_for_each_entry(hdev, &hci_dev_list, list) {
653 skb = create_monitor_event(hdev, HCI_DEV_REG);
657 if (sock_queue_rcv_skb(sk, skb))
660 if (!test_bit(HCI_RUNNING, &hdev->flags))
663 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
667 if (sock_queue_rcv_skb(sk, skb))
670 if (test_bit(HCI_UP, &hdev->flags))
671 skb = create_monitor_event(hdev, HCI_DEV_UP);
672 else if (hci_dev_test_flag(hdev, HCI_SETUP))
673 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
678 if (sock_queue_rcv_skb(sk, skb))
683 read_unlock(&hci_dev_list_lock);
686 static void send_monitor_control_replay(struct sock *mon_sk)
690 read_lock(&hci_sk_list.lock);
692 sk_for_each(sk, &hci_sk_list.head) {
695 skb = create_monitor_ctrl_open(sk);
699 if (sock_queue_rcv_skb(mon_sk, skb))
703 read_unlock(&hci_sk_list.lock);
706 /* Generate internal stack event */
707 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
709 struct hci_event_hdr *hdr;
710 struct hci_ev_stack_internal *ev;
713 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
717 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
718 hdr->evt = HCI_EV_STACK_INTERNAL;
719 hdr->plen = sizeof(*ev) + dlen;
721 ev = skb_put(skb, sizeof(*ev) + dlen);
723 memcpy(ev->data, data, dlen);
725 bt_cb(skb)->incoming = 1;
726 __net_timestamp(skb);
728 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
729 hci_send_to_sock(hdev, skb);
733 void hci_sock_dev_event(struct hci_dev *hdev, int event)
735 BT_DBG("hdev %s event %d", hdev->name, event);
737 if (atomic_read(&monitor_promisc)) {
740 /* Send event to monitor */
741 skb = create_monitor_event(hdev, event);
743 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
744 HCI_SOCK_TRUSTED, NULL);
749 if (event <= HCI_DEV_DOWN) {
750 struct hci_ev_si_device ev;
752 /* Send event to sockets */
754 ev.dev_id = hdev->id;
755 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
758 if (event == HCI_DEV_UNREG) {
761 /* Wake up sockets using this dead device */
762 read_lock(&hci_sk_list.lock);
763 sk_for_each(sk, &hci_sk_list.head) {
764 if (hci_pi(sk)->hdev == hdev) {
766 sk->sk_state_change(sk);
769 read_unlock(&hci_sk_list.lock);
773 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
775 struct hci_mgmt_chan *c;
777 list_for_each_entry(c, &mgmt_chan_list, list) {
778 if (c->channel == channel)
785 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
787 struct hci_mgmt_chan *c;
789 mutex_lock(&mgmt_chan_list_lock);
790 c = __hci_mgmt_chan_find(channel);
791 mutex_unlock(&mgmt_chan_list_lock);
796 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
798 if (c->channel < HCI_CHANNEL_CONTROL)
801 mutex_lock(&mgmt_chan_list_lock);
802 if (__hci_mgmt_chan_find(c->channel)) {
803 mutex_unlock(&mgmt_chan_list_lock);
807 list_add_tail(&c->list, &mgmt_chan_list);
809 mutex_unlock(&mgmt_chan_list_lock);
813 EXPORT_SYMBOL(hci_mgmt_chan_register);
815 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
817 mutex_lock(&mgmt_chan_list_lock);
819 mutex_unlock(&mgmt_chan_list_lock);
821 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
823 static int hci_sock_release(struct socket *sock)
825 struct sock *sk = sock->sk;
826 struct hci_dev *hdev;
829 BT_DBG("sock %p sk %p", sock, sk);
836 switch (hci_pi(sk)->channel) {
837 case HCI_CHANNEL_MONITOR:
838 atomic_dec(&monitor_promisc);
840 case HCI_CHANNEL_RAW:
841 case HCI_CHANNEL_USER:
842 case HCI_CHANNEL_CONTROL:
843 /* Send event to monitor */
844 skb = create_monitor_ctrl_close(sk);
846 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
847 HCI_SOCK_TRUSTED, NULL);
851 hci_sock_free_cookie(sk);
855 bt_sock_unlink(&hci_sk_list, sk);
857 hdev = hci_pi(sk)->hdev;
859 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
860 /* When releasing a user channel exclusive access,
861 * call hci_dev_do_close directly instead of calling
862 * hci_dev_close to ensure the exclusive access will
863 * be released and the controller brought back down.
865 * The checking of HCI_AUTO_OFF is not needed in this
866 * case since it will have been cleared already when
867 * opening the user channel.
869 hci_dev_do_close(hdev);
870 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
871 mgmt_index_added(hdev);
874 atomic_dec(&hdev->promisc);
884 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
889 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
894 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
896 hci_dev_unlock(hdev);
901 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
906 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
911 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
913 hci_dev_unlock(hdev);
918 /* Ioctls that require bound socket */
919 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
922 struct hci_dev *hdev = hci_hdev_from_sock(sk);
925 return PTR_ERR(hdev);
927 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
930 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
933 if (hdev->dev_type != HCI_PRIMARY)
938 if (!capable(CAP_NET_ADMIN))
943 return hci_get_conn_info(hdev, (void __user *)arg);
946 return hci_get_auth_info(hdev, (void __user *)arg);
949 if (!capable(CAP_NET_ADMIN))
951 return hci_sock_blacklist_add(hdev, (void __user *)arg);
954 if (!capable(CAP_NET_ADMIN))
956 return hci_sock_blacklist_del(hdev, (void __user *)arg);
962 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
965 void __user *argp = (void __user *)arg;
966 struct sock *sk = sock->sk;
969 BT_DBG("cmd %x arg %lx", cmd, arg);
973 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
978 /* When calling an ioctl on an unbound raw socket, then ensure
979 * that the monitor gets informed. Ensure that the resulting event
980 * is only send once by checking if the cookie exists or not. The
981 * socket cookie will be only ever generated once for the lifetime
984 if (hci_sock_gen_cookie(sk)) {
987 if (capable(CAP_NET_ADMIN))
988 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
990 /* Send event to monitor */
991 skb = create_monitor_ctrl_open(sk);
993 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
994 HCI_SOCK_TRUSTED, NULL);
1003 return hci_get_dev_list(argp);
1006 return hci_get_dev_info(argp);
1008 case HCIGETCONNLIST:
1009 return hci_get_conn_list(argp);
1012 if (!capable(CAP_NET_ADMIN))
1014 return hci_dev_open(arg);
1017 if (!capable(CAP_NET_ADMIN))
1019 return hci_dev_close(arg);
1022 if (!capable(CAP_NET_ADMIN))
1024 return hci_dev_reset(arg);
1027 if (!capable(CAP_NET_ADMIN))
1029 return hci_dev_reset_stat(arg);
1036 case HCISETLINKMODE:
1039 if (!capable(CAP_NET_ADMIN))
1041 return hci_dev_cmd(cmd, argp);
1044 return hci_inquiry(argp);
1049 err = hci_sock_bound_ioctl(sk, cmd, arg);
1056 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1059 struct sockaddr_hci haddr;
1060 struct sock *sk = sock->sk;
1061 struct hci_dev *hdev = NULL;
1062 struct sk_buff *skb;
1065 BT_DBG("sock %p sk %p", sock, sk);
1070 memset(&haddr, 0, sizeof(haddr));
1071 len = min_t(unsigned int, sizeof(haddr), addr_len);
1072 memcpy(&haddr, addr, len);
1074 if (haddr.hci_family != AF_BLUETOOTH)
1079 /* Allow detaching from dead device and attaching to alive device, if
1080 * the caller wants to re-bind (instead of close) this socket in
1081 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1083 hdev = hci_pi(sk)->hdev;
1084 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1085 hci_pi(sk)->hdev = NULL;
1086 sk->sk_state = BT_OPEN;
1091 if (sk->sk_state == BT_BOUND) {
1096 switch (haddr.hci_channel) {
1097 case HCI_CHANNEL_RAW:
1098 if (hci_pi(sk)->hdev) {
1103 if (haddr.hci_dev != HCI_DEV_NONE) {
1104 hdev = hci_dev_get(haddr.hci_dev);
1110 atomic_inc(&hdev->promisc);
1113 hci_pi(sk)->channel = haddr.hci_channel;
1115 if (!hci_sock_gen_cookie(sk)) {
1116 /* In the case when a cookie has already been assigned,
1117 * then there has been already an ioctl issued against
1118 * an unbound socket and with that triggerd an open
1119 * notification. Send a close notification first to
1120 * allow the state transition to bounded.
1122 skb = create_monitor_ctrl_close(sk);
1124 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1125 HCI_SOCK_TRUSTED, NULL);
1130 if (capable(CAP_NET_ADMIN))
1131 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1133 hci_pi(sk)->hdev = hdev;
1135 /* Send event to monitor */
1136 skb = create_monitor_ctrl_open(sk);
1138 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1139 HCI_SOCK_TRUSTED, NULL);
1144 case HCI_CHANNEL_USER:
1145 if (hci_pi(sk)->hdev) {
1150 if (haddr.hci_dev == HCI_DEV_NONE) {
1155 if (!capable(CAP_NET_ADMIN)) {
1160 hdev = hci_dev_get(haddr.hci_dev);
1166 if (test_bit(HCI_INIT, &hdev->flags) ||
1167 hci_dev_test_flag(hdev, HCI_SETUP) ||
1168 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1169 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1170 test_bit(HCI_UP, &hdev->flags))) {
1176 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1182 mgmt_index_removed(hdev);
1184 err = hci_dev_open(hdev->id);
1186 if (err == -EALREADY) {
1187 /* In case the transport is already up and
1188 * running, clear the error here.
1190 * This can happen when opening a user
1191 * channel and HCI_AUTO_OFF grace period
1196 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1197 mgmt_index_added(hdev);
1203 hci_pi(sk)->channel = haddr.hci_channel;
1205 if (!hci_sock_gen_cookie(sk)) {
1206 /* In the case when a cookie has already been assigned,
1207 * this socket will transition from a raw socket into
1208 * a user channel socket. For a clean transition, send
1209 * the close notification first.
1211 skb = create_monitor_ctrl_close(sk);
1213 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1214 HCI_SOCK_TRUSTED, NULL);
1219 /* The user channel is restricted to CAP_NET_ADMIN
1220 * capabilities and with that implicitly trusted.
1222 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1224 hci_pi(sk)->hdev = hdev;
1226 /* Send event to monitor */
1227 skb = create_monitor_ctrl_open(sk);
1229 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1230 HCI_SOCK_TRUSTED, NULL);
1234 atomic_inc(&hdev->promisc);
1237 case HCI_CHANNEL_MONITOR:
1238 if (haddr.hci_dev != HCI_DEV_NONE) {
1243 if (!capable(CAP_NET_RAW)) {
1248 hci_pi(sk)->channel = haddr.hci_channel;
1250 /* The monitor interface is restricted to CAP_NET_RAW
1251 * capabilities and with that implicitly trusted.
1253 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1255 send_monitor_note(sk, "Linux version %s (%s)",
1256 init_utsname()->release,
1257 init_utsname()->machine);
1258 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1259 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1260 send_monitor_replay(sk);
1261 send_monitor_control_replay(sk);
1263 atomic_inc(&monitor_promisc);
1266 case HCI_CHANNEL_LOGGING:
1267 if (haddr.hci_dev != HCI_DEV_NONE) {
1272 if (!capable(CAP_NET_ADMIN)) {
1277 hci_pi(sk)->channel = haddr.hci_channel;
1281 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1286 if (haddr.hci_dev != HCI_DEV_NONE) {
1291 /* Users with CAP_NET_ADMIN capabilities are allowed
1292 * access to all management commands and events. For
1293 * untrusted users the interface is restricted and
1294 * also only untrusted events are sent.
1296 if (capable(CAP_NET_ADMIN))
1297 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1299 hci_pi(sk)->channel = haddr.hci_channel;
1301 /* At the moment the index and unconfigured index events
1302 * are enabled unconditionally. Setting them on each
1303 * socket when binding keeps this functionality. They
1304 * however might be cleared later and then sending of these
1305 * events will be disabled, but that is then intentional.
1307 * This also enables generic events that are safe to be
1308 * received by untrusted users. Example for such events
1309 * are changes to settings, class of device, name etc.
1311 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1312 if (!hci_sock_gen_cookie(sk)) {
1313 /* In the case when a cookie has already been
1314 * assigned, this socket will transtion from
1315 * a raw socket into a control socket. To
1316 * allow for a clean transtion, send the
1317 * close notification first.
1319 skb = create_monitor_ctrl_close(sk);
1321 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1322 HCI_SOCK_TRUSTED, NULL);
1327 /* Send event to monitor */
1328 skb = create_monitor_ctrl_open(sk);
1330 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1331 HCI_SOCK_TRUSTED, NULL);
1335 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1336 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1337 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1338 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1339 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1340 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1345 sk->sk_state = BT_BOUND;
1352 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1353 int *addr_len, int peer)
1355 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1356 struct sock *sk = sock->sk;
1357 struct hci_dev *hdev;
1360 BT_DBG("sock %p sk %p", sock, sk);
1367 hdev = hci_hdev_from_sock(sk);
1369 err = PTR_ERR(hdev);
1373 *addr_len = sizeof(*haddr);
1374 haddr->hci_family = AF_BLUETOOTH;
1375 haddr->hci_dev = hdev->id;
1376 haddr->hci_channel= hci_pi(sk)->channel;
1383 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1384 struct sk_buff *skb)
1386 __u32 mask = hci_pi(sk)->cmsg_mask;
1388 if (mask & HCI_CMSG_DIR) {
1389 int incoming = bt_cb(skb)->incoming;
1390 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1394 if (mask & HCI_CMSG_TSTAMP) {
1395 #ifdef CONFIG_COMPAT
1396 struct compat_timeval ctv;
1402 skb_get_timestamp(skb, &tv);
1406 #ifdef CONFIG_COMPAT
1407 if (!COMPAT_USE_64BIT_TIME &&
1408 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1409 ctv.tv_sec = tv.tv_sec;
1410 ctv.tv_usec = tv.tv_usec;
1416 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1420 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1421 size_t len, int flags)
1423 int noblock = flags & MSG_DONTWAIT;
1424 struct sock *sk = sock->sk;
1425 struct sk_buff *skb;
1427 unsigned int skblen;
1429 BT_DBG("sock %p, sk %p", sock, sk);
1431 if (flags & MSG_OOB)
1434 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1437 if (sk->sk_state == BT_CLOSED)
1440 skb = skb_recv_datagram(sk, flags, noblock, &err);
1447 msg->msg_flags |= MSG_TRUNC;
1451 skb_reset_transport_header(skb);
1452 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1454 switch (hci_pi(sk)->channel) {
1455 case HCI_CHANNEL_RAW:
1456 hci_sock_cmsg(sk, msg, skb);
1458 case HCI_CHANNEL_USER:
1459 case HCI_CHANNEL_MONITOR:
1460 sock_recv_timestamp(msg, sk, skb);
1463 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1464 sock_recv_timestamp(msg, sk, skb);
1468 skb_free_datagram(sk, skb);
1470 if (flags & MSG_TRUNC)
1473 return err ? : copied;
1476 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1477 struct msghdr *msg, size_t msglen)
1481 struct mgmt_hdr *hdr;
1482 u16 opcode, index, len;
1483 struct hci_dev *hdev = NULL;
1484 const struct hci_mgmt_handler *handler;
1485 bool var_len, no_hdev;
1488 BT_DBG("got %zu bytes", msglen);
1490 if (msglen < sizeof(*hdr))
1493 buf = kmalloc(msglen, GFP_KERNEL);
1497 if (memcpy_from_msg(buf, msg, msglen)) {
1503 opcode = __le16_to_cpu(hdr->opcode);
1504 index = __le16_to_cpu(hdr->index);
1505 len = __le16_to_cpu(hdr->len);
1507 if (len != msglen - sizeof(*hdr)) {
1512 if (chan->channel == HCI_CHANNEL_CONTROL) {
1513 struct sk_buff *skb;
1515 /* Send event to monitor */
1516 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1517 buf + sizeof(*hdr));
1519 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1520 HCI_SOCK_TRUSTED, NULL);
1525 if (opcode >= chan->handler_count ||
1526 chan->handlers[opcode].func == NULL) {
1527 BT_DBG("Unknown op %u", opcode);
1528 err = mgmt_cmd_status(sk, index, opcode,
1529 MGMT_STATUS_UNKNOWN_COMMAND);
1533 handler = &chan->handlers[opcode];
1535 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1536 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1537 err = mgmt_cmd_status(sk, index, opcode,
1538 MGMT_STATUS_PERMISSION_DENIED);
1542 if (index != MGMT_INDEX_NONE) {
1543 hdev = hci_dev_get(index);
1545 err = mgmt_cmd_status(sk, index, opcode,
1546 MGMT_STATUS_INVALID_INDEX);
1550 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1551 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1552 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1553 err = mgmt_cmd_status(sk, index, opcode,
1554 MGMT_STATUS_INVALID_INDEX);
1558 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1559 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1560 err = mgmt_cmd_status(sk, index, opcode,
1561 MGMT_STATUS_INVALID_INDEX);
1566 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1567 if (no_hdev != !hdev) {
1568 err = mgmt_cmd_status(sk, index, opcode,
1569 MGMT_STATUS_INVALID_INDEX);
1573 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1574 if ((var_len && len < handler->data_len) ||
1575 (!var_len && len != handler->data_len)) {
1576 err = mgmt_cmd_status(sk, index, opcode,
1577 MGMT_STATUS_INVALID_PARAMS);
1581 if (hdev && chan->hdev_init)
1582 chan->hdev_init(sk, hdev);
1584 cp = buf + sizeof(*hdr);
1586 err = handler->func(sk, hdev, cp, len);
1600 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1602 struct hci_mon_hdr *hdr;
1603 struct sk_buff *skb;
1604 struct hci_dev *hdev;
1608 /* The logging frame consists at minimum of the standard header,
1609 * the priority byte, the ident length byte and at least one string
1610 * terminator NUL byte. Anything shorter are invalid packets.
1612 if (len < sizeof(*hdr) + 3)
1615 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1619 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1624 hdr = (void *)skb->data;
1626 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1631 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1632 __u8 priority = skb->data[sizeof(*hdr)];
1633 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1635 /* Only the priorities 0-7 are valid and with that any other
1636 * value results in an invalid packet.
1638 * The priority byte is followed by an ident length byte and
1639 * the NUL terminated ident string. Check that the ident
1640 * length is not overflowing the packet and also that the
1641 * ident string itself is NUL terminated. In case the ident
1642 * length is zero, the length value actually doubles as NUL
1643 * terminator identifier.
1645 * The message follows the ident string (if present) and
1646 * must be NUL terminated. Otherwise it is not a valid packet.
1648 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1649 ident_len > len - sizeof(*hdr) - 3 ||
1650 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1659 index = __le16_to_cpu(hdr->index);
1661 if (index != MGMT_INDEX_NONE) {
1662 hdev = hci_dev_get(index);
1671 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1673 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1684 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1687 struct sock *sk = sock->sk;
1688 struct hci_mgmt_chan *chan;
1689 struct hci_dev *hdev;
1690 struct sk_buff *skb;
1693 BT_DBG("sock %p sk %p", sock, sk);
1695 if (msg->msg_flags & MSG_OOB)
1698 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1702 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1707 switch (hci_pi(sk)->channel) {
1708 case HCI_CHANNEL_RAW:
1709 case HCI_CHANNEL_USER:
1711 case HCI_CHANNEL_MONITOR:
1714 case HCI_CHANNEL_LOGGING:
1715 err = hci_logging_frame(sk, msg, len);
1718 mutex_lock(&mgmt_chan_list_lock);
1719 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1721 err = hci_mgmt_cmd(chan, sk, msg, len);
1725 mutex_unlock(&mgmt_chan_list_lock);
1729 hdev = hci_hdev_from_sock(sk);
1731 err = PTR_ERR(hdev);
1735 if (!test_bit(HCI_UP, &hdev->flags)) {
1740 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1744 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1749 hci_skb_pkt_type(skb) = skb->data[0];
1752 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1753 /* No permission check is needed for user channel
1754 * since that gets enforced when binding the socket.
1756 * However check that the packet type is valid.
1758 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1759 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1760 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1765 skb_queue_tail(&hdev->raw_q, skb);
1766 queue_work(hdev->workqueue, &hdev->tx_work);
1767 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1768 u16 opcode = get_unaligned_le16(skb->data);
1769 u16 ogf = hci_opcode_ogf(opcode);
1770 u16 ocf = hci_opcode_ocf(opcode);
1772 if (((ogf > HCI_SFLT_MAX_OGF) ||
1773 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1774 &hci_sec_filter.ocf_mask[ogf])) &&
1775 !capable(CAP_NET_RAW)) {
1780 /* Since the opcode has already been extracted here, store
1781 * a copy of the value for later use by the drivers.
1783 hci_skb_opcode(skb) = opcode;
1786 skb_queue_tail(&hdev->raw_q, skb);
1787 queue_work(hdev->workqueue, &hdev->tx_work);
1789 /* Stand-alone HCI commands must be flagged as
1790 * single-command requests.
1792 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1794 skb_queue_tail(&hdev->cmd_q, skb);
1795 queue_work(hdev->workqueue, &hdev->cmd_work);
1798 if (!capable(CAP_NET_RAW)) {
1803 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1804 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1809 skb_queue_tail(&hdev->raw_q, skb);
1810 queue_work(hdev->workqueue, &hdev->tx_work);
1824 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1825 char __user *optval, unsigned int len)
1827 struct hci_ufilter uf = { .opcode = 0 };
1828 struct sock *sk = sock->sk;
1829 int err = 0, opt = 0;
1831 BT_DBG("sk %p, opt %d", sk, optname);
1833 if (level != SOL_HCI)
1834 return -ENOPROTOOPT;
1838 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1845 if (get_user(opt, (int __user *)optval)) {
1851 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1853 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1856 case HCI_TIME_STAMP:
1857 if (get_user(opt, (int __user *)optval)) {
1863 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1865 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1870 struct hci_filter *f = &hci_pi(sk)->filter;
1872 uf.type_mask = f->type_mask;
1873 uf.opcode = f->opcode;
1874 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1875 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1878 len = min_t(unsigned int, len, sizeof(uf));
1879 if (copy_from_user(&uf, optval, len)) {
1884 if (!capable(CAP_NET_RAW)) {
1885 uf.type_mask &= hci_sec_filter.type_mask;
1886 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1887 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1891 struct hci_filter *f = &hci_pi(sk)->filter;
1893 f->type_mask = uf.type_mask;
1894 f->opcode = uf.opcode;
1895 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1896 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1910 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1911 char __user *optval, int __user *optlen)
1913 struct hci_ufilter uf;
1914 struct sock *sk = sock->sk;
1915 int len, opt, err = 0;
1917 BT_DBG("sk %p, opt %d", sk, optname);
1919 if (level != SOL_HCI)
1920 return -ENOPROTOOPT;
1922 if (get_user(len, optlen))
1927 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1934 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1939 if (put_user(opt, optval))
1943 case HCI_TIME_STAMP:
1944 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1949 if (put_user(opt, optval))
1955 struct hci_filter *f = &hci_pi(sk)->filter;
1957 memset(&uf, 0, sizeof(uf));
1958 uf.type_mask = f->type_mask;
1959 uf.opcode = f->opcode;
1960 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1961 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1964 len = min_t(unsigned int, len, sizeof(uf));
1965 if (copy_to_user(optval, &uf, len))
1979 static void hci_sock_destruct(struct sock *sk)
1981 skb_queue_purge(&sk->sk_receive_queue);
1982 skb_queue_purge(&sk->sk_write_queue);
1985 static const struct proto_ops hci_sock_ops = {
1986 .family = PF_BLUETOOTH,
1987 .owner = THIS_MODULE,
1988 .release = hci_sock_release,
1989 .bind = hci_sock_bind,
1990 .getname = hci_sock_getname,
1991 .sendmsg = hci_sock_sendmsg,
1992 .recvmsg = hci_sock_recvmsg,
1993 .ioctl = hci_sock_ioctl,
1994 .poll = datagram_poll,
1995 .listen = sock_no_listen,
1996 .shutdown = sock_no_shutdown,
1997 .setsockopt = hci_sock_setsockopt,
1998 .getsockopt = hci_sock_getsockopt,
1999 .connect = sock_no_connect,
2000 .socketpair = sock_no_socketpair,
2001 .accept = sock_no_accept,
2002 .mmap = sock_no_mmap
2005 static struct proto hci_sk_proto = {
2007 .owner = THIS_MODULE,
2008 .obj_size = sizeof(struct hci_pinfo)
2011 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2016 BT_DBG("sock %p", sock);
2018 if (sock->type != SOCK_RAW)
2019 return -ESOCKTNOSUPPORT;
2021 sock->ops = &hci_sock_ops;
2023 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2027 sock_init_data(sock, sk);
2029 sock_reset_flag(sk, SOCK_ZAPPED);
2031 sk->sk_protocol = protocol;
2033 sock->state = SS_UNCONNECTED;
2034 sk->sk_state = BT_OPEN;
2035 sk->sk_destruct = hci_sock_destruct;
2037 bt_sock_link(&hci_sk_list, sk);
2041 static const struct net_proto_family hci_sock_family_ops = {
2042 .family = PF_BLUETOOTH,
2043 .owner = THIS_MODULE,
2044 .create = hci_sock_create,
2047 int __init hci_sock_init(void)
2051 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2053 err = proto_register(&hci_sk_proto, 0);
2057 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2059 BT_ERR("HCI socket registration failed");
2063 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2065 BT_ERR("Failed to create HCI proc file");
2066 bt_sock_unregister(BTPROTO_HCI);
2070 BT_INFO("HCI socket layer initialized");
2075 proto_unregister(&hci_sk_proto);
2079 void hci_sock_cleanup(void)
2081 bt_procfs_cleanup(&init_net, "hci");
2082 bt_sock_unregister(BTPROTO_HCI);
2083 proto_unregister(&hci_sk_proto);