2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
42 static DEFINE_IDA(sock_cookie_ida);
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter;
56 unsigned short channel;
59 char comm[TASK_COMM_LEN];
62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64 struct hci_dev *hdev = hci_pi(sk)->hdev;
67 return ERR_PTR(-EBADFD);
68 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69 return ERR_PTR(-EPIPE);
73 void hci_sock_set_flag(struct sock *sk, int nr)
75 set_bit(nr, &hci_pi(sk)->flags);
78 void hci_sock_clear_flag(struct sock *sk, int nr)
80 clear_bit(nr, &hci_pi(sk)->flags);
83 int hci_sock_test_flag(struct sock *sk, int nr)
85 return test_bit(nr, &hci_pi(sk)->flags);
88 unsigned short hci_sock_get_channel(struct sock *sk)
90 return hci_pi(sk)->channel;
93 u32 hci_sock_get_cookie(struct sock *sk)
95 return hci_pi(sk)->cookie;
98 static bool hci_sock_gen_cookie(struct sock *sk)
100 int id = hci_pi(sk)->cookie;
103 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
107 hci_pi(sk)->cookie = id;
108 get_task_comm(hci_pi(sk)->comm, current);
115 static void hci_sock_free_cookie(struct sock *sk)
117 int id = hci_pi(sk)->cookie;
120 hci_pi(sk)->cookie = 0xffffffff;
121 ida_simple_remove(&sock_cookie_ida, id);
125 static inline int hci_test_bit(int nr, const void *addr)
127 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF 5
133 struct hci_sec_filter {
136 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
139 static const struct hci_sec_filter hci_sec_filter = {
143 { 0x1000d9fe, 0x0000b00c },
148 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149 /* OGF_LINK_POLICY */
150 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
152 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155 /* OGF_STATUS_PARAM */
156 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
160 static struct bt_sock_list hci_sk_list = {
161 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166 struct hci_filter *flt;
167 int flt_type, flt_event;
170 flt = &hci_pi(sk)->filter;
172 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174 if (!test_bit(flt_type, &flt->type_mask))
177 /* Extra filter for event packets only */
178 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
181 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183 if (!hci_test_bit(flt_event, &flt->event_mask))
186 /* Check filter only when opcode is set */
190 if (flt_event == HCI_EV_CMD_COMPLETE &&
191 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
194 if (flt_event == HCI_EV_CMD_STATUS &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
201 /* Send frame to RAW socket */
202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
205 struct sk_buff *skb_copy = NULL;
207 BT_DBG("hdev %p len %d", hdev, skb->len);
209 read_lock(&hci_sk_list.lock);
211 sk_for_each(sk, &hci_sk_list.head) {
212 struct sk_buff *nskb;
214 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
217 /* Don't send frame to the socket it came from */
221 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
227 if (is_filtered_packet(sk, skb))
229 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230 if (!bt_cb(skb)->incoming)
232 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
237 /* Don't send frame to other channel types */
242 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
247 /* Put type byte before the data */
248 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
251 nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 if (sock_queue_rcv_skb(sk, nskb))
259 read_unlock(&hci_sk_list.lock);
264 /* Send frame to sockets with specific channel */
265 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
266 int flag, struct sock *skip_sk)
270 BT_DBG("channel %u len %d", channel, skb->len);
272 read_lock(&hci_sk_list.lock);
274 sk_for_each(sk, &hci_sk_list.head) {
275 struct sk_buff *nskb;
277 /* Ignore socket without the flag set */
278 if (!hci_sock_test_flag(sk, flag))
281 /* Skip the original socket */
285 if (sk->sk_state != BT_BOUND)
288 if (hci_pi(sk)->channel != channel)
291 nskb = skb_clone(skb, GFP_ATOMIC);
295 if (sock_queue_rcv_skb(sk, nskb))
299 read_unlock(&hci_sk_list.lock);
302 /* Send frame to monitor socket */
303 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
305 struct sk_buff *skb_copy = NULL;
306 struct hci_mon_hdr *hdr;
309 if (!atomic_read(&monitor_promisc))
312 BT_DBG("hdev %p len %d", hdev, skb->len);
314 switch (hci_skb_pkt_type(skb)) {
315 case HCI_COMMAND_PKT:
316 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
319 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
321 case HCI_ACLDATA_PKT:
322 if (bt_cb(skb)->incoming)
323 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
325 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
327 case HCI_SCODATA_PKT:
328 if (bt_cb(skb)->incoming)
329 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
331 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
334 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
340 /* Create a private copy with headroom */
341 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
345 /* Put header before the data */
346 hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
347 hdr->opcode = opcode;
348 hdr->index = cpu_to_le16(hdev->id);
349 hdr->len = cpu_to_le16(skb->len);
351 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
352 HCI_SOCK_TRUSTED, NULL);
356 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
357 void *data, u16 data_len, ktime_t tstamp,
358 int flag, struct sock *skip_sk)
364 index = cpu_to_le16(hdev->id);
366 index = cpu_to_le16(MGMT_INDEX_NONE);
368 read_lock(&hci_sk_list.lock);
370 sk_for_each(sk, &hci_sk_list.head) {
371 struct hci_mon_hdr *hdr;
374 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
377 /* Ignore socket without the flag set */
378 if (!hci_sock_test_flag(sk, flag))
381 /* Skip the original socket */
385 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
389 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
390 put_unaligned_le16(event, skb_put(skb, 2));
393 memcpy(skb_put(skb, data_len), data, data_len);
395 skb->tstamp = tstamp;
397 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
398 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
400 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
402 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
403 HCI_SOCK_TRUSTED, NULL);
407 read_unlock(&hci_sk_list.lock);
410 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
412 struct hci_mon_hdr *hdr;
413 struct hci_mon_new_index *ni;
414 struct hci_mon_index_info *ii;
420 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
424 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
425 ni->type = hdev->dev_type;
427 bacpy(&ni->bdaddr, &hdev->bdaddr);
428 memcpy(ni->name, hdev->name, 8);
430 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
434 skb = bt_skb_alloc(0, GFP_ATOMIC);
438 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
442 if (hdev->manufacturer == 0xffff)
448 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
452 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
453 bacpy(&ii->bdaddr, &hdev->bdaddr);
454 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
456 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
460 skb = bt_skb_alloc(0, GFP_ATOMIC);
464 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
468 skb = bt_skb_alloc(0, GFP_ATOMIC);
472 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
479 __net_timestamp(skb);
481 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
482 hdr->opcode = opcode;
483 hdr->index = cpu_to_le16(hdev->id);
484 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
489 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
491 struct hci_mon_hdr *hdr;
497 /* No message needed when cookie is not present */
498 if (!hci_pi(sk)->cookie)
501 switch (hci_pi(sk)->channel) {
502 case HCI_CHANNEL_RAW:
504 ver[0] = BT_SUBSYS_VERSION;
505 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
507 case HCI_CHANNEL_USER:
509 ver[0] = BT_SUBSYS_VERSION;
510 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
512 case HCI_CHANNEL_CONTROL:
514 mgmt_fill_version_info(ver);
517 /* No message for unsupported format */
521 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
525 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
527 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
528 put_unaligned_le16(format, skb_put(skb, 2));
529 memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
530 put_unaligned_le32(flags, skb_put(skb, 4));
531 *skb_put(skb, 1) = TASK_COMM_LEN;
532 memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
534 __net_timestamp(skb);
536 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
537 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
538 if (hci_pi(sk)->hdev)
539 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
541 hdr->index = cpu_to_le16(HCI_DEV_NONE);
542 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
547 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
549 struct hci_mon_hdr *hdr;
552 /* No message needed when cookie is not present */
553 if (!hci_pi(sk)->cookie)
556 switch (hci_pi(sk)->channel) {
557 case HCI_CHANNEL_RAW:
558 case HCI_CHANNEL_USER:
559 case HCI_CHANNEL_CONTROL:
562 /* No message for unsupported format */
566 skb = bt_skb_alloc(4, GFP_ATOMIC);
570 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
572 __net_timestamp(skb);
574 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
575 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
576 if (hci_pi(sk)->hdev)
577 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
579 hdr->index = cpu_to_le16(HCI_DEV_NONE);
580 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
585 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
589 struct hci_mon_hdr *hdr;
592 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
596 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
597 put_unaligned_le16(opcode, skb_put(skb, 2));
600 memcpy(skb_put(skb, len), buf, len);
602 __net_timestamp(skb);
604 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
605 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
606 hdr->index = cpu_to_le16(index);
607 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
612 static void __printf(2, 3)
613 send_monitor_note(struct sock *sk, const char *fmt, ...)
616 struct hci_mon_hdr *hdr;
621 len = vsnprintf(NULL, 0, fmt, args);
624 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
629 vsprintf(skb_put(skb, len), fmt, args);
630 *skb_put(skb, 1) = 0;
633 __net_timestamp(skb);
635 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
636 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
637 hdr->index = cpu_to_le16(HCI_DEV_NONE);
638 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
640 if (sock_queue_rcv_skb(sk, skb))
644 static void send_monitor_replay(struct sock *sk)
646 struct hci_dev *hdev;
648 read_lock(&hci_dev_list_lock);
650 list_for_each_entry(hdev, &hci_dev_list, list) {
653 skb = create_monitor_event(hdev, HCI_DEV_REG);
657 if (sock_queue_rcv_skb(sk, skb))
660 if (!test_bit(HCI_RUNNING, &hdev->flags))
663 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
667 if (sock_queue_rcv_skb(sk, skb))
670 if (test_bit(HCI_UP, &hdev->flags))
671 skb = create_monitor_event(hdev, HCI_DEV_UP);
672 else if (hci_dev_test_flag(hdev, HCI_SETUP))
673 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
678 if (sock_queue_rcv_skb(sk, skb))
683 read_unlock(&hci_dev_list_lock);
686 static void send_monitor_control_replay(struct sock *mon_sk)
690 read_lock(&hci_sk_list.lock);
692 sk_for_each(sk, &hci_sk_list.head) {
695 skb = create_monitor_ctrl_open(sk);
699 if (sock_queue_rcv_skb(mon_sk, skb))
703 read_unlock(&hci_sk_list.lock);
706 /* Generate internal stack event */
707 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
709 struct hci_event_hdr *hdr;
710 struct hci_ev_stack_internal *ev;
713 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
717 hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
718 hdr->evt = HCI_EV_STACK_INTERNAL;
719 hdr->plen = sizeof(*ev) + dlen;
721 ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
723 memcpy(ev->data, data, dlen);
725 bt_cb(skb)->incoming = 1;
726 __net_timestamp(skb);
728 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
729 hci_send_to_sock(hdev, skb);
733 void hci_sock_dev_event(struct hci_dev *hdev, int event)
735 BT_DBG("hdev %s event %d", hdev->name, event);
737 if (atomic_read(&monitor_promisc)) {
740 /* Send event to monitor */
741 skb = create_monitor_event(hdev, event);
743 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
744 HCI_SOCK_TRUSTED, NULL);
749 if (event <= HCI_DEV_DOWN) {
750 struct hci_ev_si_device ev;
752 /* Send event to sockets */
754 ev.dev_id = hdev->id;
755 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
758 if (event == HCI_DEV_UNREG) {
761 /* Wake up sockets using this dead device */
762 read_lock(&hci_sk_list.lock);
763 sk_for_each(sk, &hci_sk_list.head) {
764 if (hci_pi(sk)->hdev == hdev) {
766 sk->sk_state_change(sk);
769 read_unlock(&hci_sk_list.lock);
773 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
775 struct hci_mgmt_chan *c;
777 list_for_each_entry(c, &mgmt_chan_list, list) {
778 if (c->channel == channel)
785 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
787 struct hci_mgmt_chan *c;
789 mutex_lock(&mgmt_chan_list_lock);
790 c = __hci_mgmt_chan_find(channel);
791 mutex_unlock(&mgmt_chan_list_lock);
796 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
798 if (c->channel < HCI_CHANNEL_CONTROL)
801 mutex_lock(&mgmt_chan_list_lock);
802 if (__hci_mgmt_chan_find(c->channel)) {
803 mutex_unlock(&mgmt_chan_list_lock);
807 list_add_tail(&c->list, &mgmt_chan_list);
809 mutex_unlock(&mgmt_chan_list_lock);
813 EXPORT_SYMBOL(hci_mgmt_chan_register);
815 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
817 mutex_lock(&mgmt_chan_list_lock);
819 mutex_unlock(&mgmt_chan_list_lock);
821 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
823 static int hci_sock_release(struct socket *sock)
825 struct sock *sk = sock->sk;
826 struct hci_dev *hdev;
829 BT_DBG("sock %p sk %p", sock, sk);
836 switch (hci_pi(sk)->channel) {
837 case HCI_CHANNEL_MONITOR:
838 atomic_dec(&monitor_promisc);
840 case HCI_CHANNEL_RAW:
841 case HCI_CHANNEL_USER:
842 case HCI_CHANNEL_CONTROL:
843 /* Send event to monitor */
844 skb = create_monitor_ctrl_close(sk);
846 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
847 HCI_SOCK_TRUSTED, NULL);
851 hci_sock_free_cookie(sk);
855 bt_sock_unlink(&hci_sk_list, sk);
857 hdev = hci_pi(sk)->hdev;
859 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
860 /* When releasing an user channel exclusive access,
861 * call hci_dev_do_close directly instead of calling
862 * hci_dev_close to ensure the exclusive access will
863 * be released and the controller brought back down.
865 * The checking of HCI_AUTO_OFF is not needed in this
866 * case since it will have been cleared already when
867 * opening the user channel.
869 hci_dev_do_close(hdev);
870 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
871 mgmt_index_added(hdev);
874 atomic_dec(&hdev->promisc);
880 skb_queue_purge(&sk->sk_receive_queue);
881 skb_queue_purge(&sk->sk_write_queue);
888 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
893 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
898 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
900 hci_dev_unlock(hdev);
905 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
910 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
915 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
917 hci_dev_unlock(hdev);
922 /* Ioctls that require bound socket */
923 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
926 struct hci_dev *hdev = hci_hdev_from_sock(sk);
929 return PTR_ERR(hdev);
931 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
934 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
937 if (hdev->dev_type != HCI_PRIMARY)
942 if (!capable(CAP_NET_ADMIN))
947 return hci_get_conn_info(hdev, (void __user *)arg);
950 return hci_get_auth_info(hdev, (void __user *)arg);
953 if (!capable(CAP_NET_ADMIN))
955 return hci_sock_blacklist_add(hdev, (void __user *)arg);
958 if (!capable(CAP_NET_ADMIN))
960 return hci_sock_blacklist_del(hdev, (void __user *)arg);
966 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
969 void __user *argp = (void __user *)arg;
970 struct sock *sk = sock->sk;
973 BT_DBG("cmd %x arg %lx", cmd, arg);
977 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
982 /* When calling an ioctl on an unbound raw socket, then ensure
983 * that the monitor gets informed. Ensure that the resulting event
984 * is only send once by checking if the cookie exists or not. The
985 * socket cookie will be only ever generated once for the lifetime
988 if (hci_sock_gen_cookie(sk)) {
991 if (capable(CAP_NET_ADMIN))
992 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
994 /* Send event to monitor */
995 skb = create_monitor_ctrl_open(sk);
997 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
998 HCI_SOCK_TRUSTED, NULL);
1007 return hci_get_dev_list(argp);
1010 return hci_get_dev_info(argp);
1012 case HCIGETCONNLIST:
1013 return hci_get_conn_list(argp);
1016 if (!capable(CAP_NET_ADMIN))
1018 return hci_dev_open(arg);
1021 if (!capable(CAP_NET_ADMIN))
1023 return hci_dev_close(arg);
1026 if (!capable(CAP_NET_ADMIN))
1028 return hci_dev_reset(arg);
1031 if (!capable(CAP_NET_ADMIN))
1033 return hci_dev_reset_stat(arg);
1040 case HCISETLINKMODE:
1043 if (!capable(CAP_NET_ADMIN))
1045 return hci_dev_cmd(cmd, argp);
1048 return hci_inquiry(argp);
1053 err = hci_sock_bound_ioctl(sk, cmd, arg);
1060 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1063 struct sockaddr_hci haddr;
1064 struct sock *sk = sock->sk;
1065 struct hci_dev *hdev = NULL;
1066 struct sk_buff *skb;
1069 BT_DBG("sock %p sk %p", sock, sk);
1074 memset(&haddr, 0, sizeof(haddr));
1075 len = min_t(unsigned int, sizeof(haddr), addr_len);
1076 memcpy(&haddr, addr, len);
1078 if (haddr.hci_family != AF_BLUETOOTH)
1083 /* Allow detaching from dead device and attaching to alive device, if
1084 * the caller wants to re-bind (instead of close) this socket in
1085 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1087 hdev = hci_pi(sk)->hdev;
1088 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1089 hci_pi(sk)->hdev = NULL;
1090 sk->sk_state = BT_OPEN;
1095 if (sk->sk_state == BT_BOUND) {
1100 switch (haddr.hci_channel) {
1101 case HCI_CHANNEL_RAW:
1102 if (hci_pi(sk)->hdev) {
1107 if (haddr.hci_dev != HCI_DEV_NONE) {
1108 hdev = hci_dev_get(haddr.hci_dev);
1114 atomic_inc(&hdev->promisc);
1117 hci_pi(sk)->channel = haddr.hci_channel;
1119 if (!hci_sock_gen_cookie(sk)) {
1120 /* In the case when a cookie has already been assigned,
1121 * then there has been already an ioctl issued against
1122 * an unbound socket and with that triggerd an open
1123 * notification. Send a close notification first to
1124 * allow the state transition to bounded.
1126 skb = create_monitor_ctrl_close(sk);
1128 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1129 HCI_SOCK_TRUSTED, NULL);
1134 if (capable(CAP_NET_ADMIN))
1135 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1137 hci_pi(sk)->hdev = hdev;
1139 /* Send event to monitor */
1140 skb = create_monitor_ctrl_open(sk);
1142 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1143 HCI_SOCK_TRUSTED, NULL);
1148 case HCI_CHANNEL_USER:
1149 if (hci_pi(sk)->hdev) {
1154 if (haddr.hci_dev == HCI_DEV_NONE) {
1159 if (!capable(CAP_NET_ADMIN)) {
1164 hdev = hci_dev_get(haddr.hci_dev);
1170 if (test_bit(HCI_INIT, &hdev->flags) ||
1171 hci_dev_test_flag(hdev, HCI_SETUP) ||
1172 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1173 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1174 test_bit(HCI_UP, &hdev->flags))) {
1180 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1186 mgmt_index_removed(hdev);
1188 err = hci_dev_open(hdev->id);
1190 if (err == -EALREADY) {
1191 /* In case the transport is already up and
1192 * running, clear the error here.
1194 * This can happen when opening an user
1195 * channel and HCI_AUTO_OFF grace period
1200 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1201 mgmt_index_added(hdev);
1207 hci_pi(sk)->channel = haddr.hci_channel;
1209 if (!hci_sock_gen_cookie(sk)) {
1210 /* In the case when a cookie has already been assigned,
1211 * this socket will transition from a raw socket into
1212 * an user channel socket. For a clean transition, send
1213 * the close notification first.
1215 skb = create_monitor_ctrl_close(sk);
1217 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1218 HCI_SOCK_TRUSTED, NULL);
1223 /* The user channel is restricted to CAP_NET_ADMIN
1224 * capabilities and with that implicitly trusted.
1226 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1228 hci_pi(sk)->hdev = hdev;
1230 /* Send event to monitor */
1231 skb = create_monitor_ctrl_open(sk);
1233 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1234 HCI_SOCK_TRUSTED, NULL);
1238 atomic_inc(&hdev->promisc);
1241 case HCI_CHANNEL_MONITOR:
1242 if (haddr.hci_dev != HCI_DEV_NONE) {
1247 if (!capable(CAP_NET_RAW)) {
1252 hci_pi(sk)->channel = haddr.hci_channel;
1254 /* The monitor interface is restricted to CAP_NET_RAW
1255 * capabilities and with that implicitly trusted.
1257 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1259 send_monitor_note(sk, "Linux version %s (%s)",
1260 init_utsname()->release,
1261 init_utsname()->machine);
1262 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1263 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1264 send_monitor_replay(sk);
1265 send_monitor_control_replay(sk);
1267 atomic_inc(&monitor_promisc);
1270 case HCI_CHANNEL_LOGGING:
1271 if (haddr.hci_dev != HCI_DEV_NONE) {
1276 if (!capable(CAP_NET_ADMIN)) {
1281 hci_pi(sk)->channel = haddr.hci_channel;
1285 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1290 if (haddr.hci_dev != HCI_DEV_NONE) {
1295 /* Users with CAP_NET_ADMIN capabilities are allowed
1296 * access to all management commands and events. For
1297 * untrusted users the interface is restricted and
1298 * also only untrusted events are sent.
1300 if (capable(CAP_NET_ADMIN))
1301 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1303 hci_pi(sk)->channel = haddr.hci_channel;
1305 /* At the moment the index and unconfigured index events
1306 * are enabled unconditionally. Setting them on each
1307 * socket when binding keeps this functionality. They
1308 * however might be cleared later and then sending of these
1309 * events will be disabled, but that is then intentional.
1311 * This also enables generic events that are safe to be
1312 * received by untrusted users. Example for such events
1313 * are changes to settings, class of device, name etc.
1315 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1316 if (!hci_sock_gen_cookie(sk)) {
1317 /* In the case when a cookie has already been
1318 * assigned, this socket will transtion from
1319 * a raw socket into a control socket. To
1320 * allow for a clean transtion, send the
1321 * close notification first.
1323 skb = create_monitor_ctrl_close(sk);
1325 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1326 HCI_SOCK_TRUSTED, NULL);
1331 /* Send event to monitor */
1332 skb = create_monitor_ctrl_open(sk);
1334 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1335 HCI_SOCK_TRUSTED, NULL);
1339 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1340 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1341 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1342 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1343 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1344 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1349 sk->sk_state = BT_BOUND;
1356 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1357 int *addr_len, int peer)
1359 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1360 struct sock *sk = sock->sk;
1361 struct hci_dev *hdev;
1364 BT_DBG("sock %p sk %p", sock, sk);
1371 hdev = hci_hdev_from_sock(sk);
1373 err = PTR_ERR(hdev);
1377 *addr_len = sizeof(*haddr);
1378 haddr->hci_family = AF_BLUETOOTH;
1379 haddr->hci_dev = hdev->id;
1380 haddr->hci_channel= hci_pi(sk)->channel;
1387 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1388 struct sk_buff *skb)
1390 __u32 mask = hci_pi(sk)->cmsg_mask;
1392 if (mask & HCI_CMSG_DIR) {
1393 int incoming = bt_cb(skb)->incoming;
1394 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1398 if (mask & HCI_CMSG_TSTAMP) {
1399 #ifdef CONFIG_COMPAT
1400 struct compat_timeval ctv;
1406 skb_get_timestamp(skb, &tv);
1410 #ifdef CONFIG_COMPAT
1411 if (!COMPAT_USE_64BIT_TIME &&
1412 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1413 ctv.tv_sec = tv.tv_sec;
1414 ctv.tv_usec = tv.tv_usec;
1420 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1424 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1425 size_t len, int flags)
1427 int noblock = flags & MSG_DONTWAIT;
1428 struct sock *sk = sock->sk;
1429 struct sk_buff *skb;
1431 unsigned int skblen;
1433 BT_DBG("sock %p, sk %p", sock, sk);
1435 if (flags & MSG_OOB)
1438 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1441 if (sk->sk_state == BT_CLOSED)
1444 skb = skb_recv_datagram(sk, flags, noblock, &err);
1451 msg->msg_flags |= MSG_TRUNC;
1455 skb_reset_transport_header(skb);
1456 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1458 switch (hci_pi(sk)->channel) {
1459 case HCI_CHANNEL_RAW:
1460 hci_sock_cmsg(sk, msg, skb);
1462 case HCI_CHANNEL_USER:
1463 case HCI_CHANNEL_MONITOR:
1464 sock_recv_timestamp(msg, sk, skb);
1467 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1468 sock_recv_timestamp(msg, sk, skb);
1472 skb_free_datagram(sk, skb);
1474 if (flags & MSG_TRUNC)
1477 return err ? : copied;
1480 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1481 struct msghdr *msg, size_t msglen)
1485 struct mgmt_hdr *hdr;
1486 u16 opcode, index, len;
1487 struct hci_dev *hdev = NULL;
1488 const struct hci_mgmt_handler *handler;
1489 bool var_len, no_hdev;
1492 BT_DBG("got %zu bytes", msglen);
1494 if (msglen < sizeof(*hdr))
1497 buf = kmalloc(msglen, GFP_KERNEL);
1501 if (memcpy_from_msg(buf, msg, msglen)) {
1507 opcode = __le16_to_cpu(hdr->opcode);
1508 index = __le16_to_cpu(hdr->index);
1509 len = __le16_to_cpu(hdr->len);
1511 if (len != msglen - sizeof(*hdr)) {
1516 if (chan->channel == HCI_CHANNEL_CONTROL) {
1517 struct sk_buff *skb;
1519 /* Send event to monitor */
1520 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1521 buf + sizeof(*hdr));
1523 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1524 HCI_SOCK_TRUSTED, NULL);
1529 if (opcode >= chan->handler_count ||
1530 chan->handlers[opcode].func == NULL) {
1531 BT_DBG("Unknown op %u", opcode);
1532 err = mgmt_cmd_status(sk, index, opcode,
1533 MGMT_STATUS_UNKNOWN_COMMAND);
1537 handler = &chan->handlers[opcode];
1539 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1540 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1541 err = mgmt_cmd_status(sk, index, opcode,
1542 MGMT_STATUS_PERMISSION_DENIED);
1546 if (index != MGMT_INDEX_NONE) {
1547 hdev = hci_dev_get(index);
1549 err = mgmt_cmd_status(sk, index, opcode,
1550 MGMT_STATUS_INVALID_INDEX);
1554 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1555 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1556 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1557 err = mgmt_cmd_status(sk, index, opcode,
1558 MGMT_STATUS_INVALID_INDEX);
1562 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1563 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1564 err = mgmt_cmd_status(sk, index, opcode,
1565 MGMT_STATUS_INVALID_INDEX);
1570 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1571 if (no_hdev != !hdev) {
1572 err = mgmt_cmd_status(sk, index, opcode,
1573 MGMT_STATUS_INVALID_INDEX);
1577 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1578 if ((var_len && len < handler->data_len) ||
1579 (!var_len && len != handler->data_len)) {
1580 err = mgmt_cmd_status(sk, index, opcode,
1581 MGMT_STATUS_INVALID_PARAMS);
1585 if (hdev && chan->hdev_init)
1586 chan->hdev_init(sk, hdev);
1588 cp = buf + sizeof(*hdr);
1590 err = handler->func(sk, hdev, cp, len);
1604 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1606 struct hci_mon_hdr *hdr;
1607 struct sk_buff *skb;
1608 struct hci_dev *hdev;
1612 /* The logging frame consists at minimum of the standard header,
1613 * the priority byte, the ident length byte and at least one string
1614 * terminator NUL byte. Anything shorter are invalid packets.
1616 if (len < sizeof(*hdr) + 3)
1619 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1623 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1628 hdr = (void *)skb->data;
1630 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1635 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1636 __u8 priority = skb->data[sizeof(*hdr)];
1637 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1639 /* Only the priorities 0-7 are valid and with that any other
1640 * value results in an invalid packet.
1642 * The priority byte is followed by an ident length byte and
1643 * the NUL terminated ident string. Check that the ident
1644 * length is not overflowing the packet and also that the
1645 * ident string itself is NUL terminated. In case the ident
1646 * length is zero, the length value actually doubles as NUL
1647 * terminator identifier.
1649 * The message follows the ident string (if present) and
1650 * must be NUL terminated. Otherwise it is not a valid packet.
1652 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1653 ident_len > len - sizeof(*hdr) - 3 ||
1654 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1663 index = __le16_to_cpu(hdr->index);
1665 if (index != MGMT_INDEX_NONE) {
1666 hdev = hci_dev_get(index);
1675 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1677 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1688 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1691 struct sock *sk = sock->sk;
1692 struct hci_mgmt_chan *chan;
1693 struct hci_dev *hdev;
1694 struct sk_buff *skb;
1697 BT_DBG("sock %p sk %p", sock, sk);
1699 if (msg->msg_flags & MSG_OOB)
1702 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1706 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1711 switch (hci_pi(sk)->channel) {
1712 case HCI_CHANNEL_RAW:
1713 case HCI_CHANNEL_USER:
1715 case HCI_CHANNEL_MONITOR:
1718 case HCI_CHANNEL_LOGGING:
1719 err = hci_logging_frame(sk, msg, len);
1722 mutex_lock(&mgmt_chan_list_lock);
1723 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1725 err = hci_mgmt_cmd(chan, sk, msg, len);
1729 mutex_unlock(&mgmt_chan_list_lock);
1733 hdev = hci_hdev_from_sock(sk);
1735 err = PTR_ERR(hdev);
1739 if (!test_bit(HCI_UP, &hdev->flags)) {
1744 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1748 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1753 hci_skb_pkt_type(skb) = skb->data[0];
1756 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1757 /* No permission check is needed for user channel
1758 * since that gets enforced when binding the socket.
1760 * However check that the packet type is valid.
1762 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1763 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1764 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1769 skb_queue_tail(&hdev->raw_q, skb);
1770 queue_work(hdev->workqueue, &hdev->tx_work);
1771 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1772 u16 opcode = get_unaligned_le16(skb->data);
1773 u16 ogf = hci_opcode_ogf(opcode);
1774 u16 ocf = hci_opcode_ocf(opcode);
1776 if (((ogf > HCI_SFLT_MAX_OGF) ||
1777 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1778 &hci_sec_filter.ocf_mask[ogf])) &&
1779 !capable(CAP_NET_RAW)) {
1784 /* Since the opcode has already been extracted here, store
1785 * a copy of the value for later use by the drivers.
1787 hci_skb_opcode(skb) = opcode;
1790 skb_queue_tail(&hdev->raw_q, skb);
1791 queue_work(hdev->workqueue, &hdev->tx_work);
1793 /* Stand-alone HCI commands must be flagged as
1794 * single-command requests.
1796 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1798 skb_queue_tail(&hdev->cmd_q, skb);
1799 queue_work(hdev->workqueue, &hdev->cmd_work);
1802 if (!capable(CAP_NET_RAW)) {
1807 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1808 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1813 skb_queue_tail(&hdev->raw_q, skb);
1814 queue_work(hdev->workqueue, &hdev->tx_work);
1828 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1829 char __user *optval, unsigned int len)
1831 struct hci_ufilter uf = { .opcode = 0 };
1832 struct sock *sk = sock->sk;
1833 int err = 0, opt = 0;
1835 BT_DBG("sk %p, opt %d", sk, optname);
1837 if (level != SOL_HCI)
1838 return -ENOPROTOOPT;
1842 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1849 if (get_user(opt, (int __user *)optval)) {
1855 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1857 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1860 case HCI_TIME_STAMP:
1861 if (get_user(opt, (int __user *)optval)) {
1867 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1869 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1874 struct hci_filter *f = &hci_pi(sk)->filter;
1876 uf.type_mask = f->type_mask;
1877 uf.opcode = f->opcode;
1878 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1879 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1882 len = min_t(unsigned int, len, sizeof(uf));
1883 if (copy_from_user(&uf, optval, len)) {
1888 if (!capable(CAP_NET_RAW)) {
1889 uf.type_mask &= hci_sec_filter.type_mask;
1890 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1891 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1895 struct hci_filter *f = &hci_pi(sk)->filter;
1897 f->type_mask = uf.type_mask;
1898 f->opcode = uf.opcode;
1899 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1900 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1914 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1915 char __user *optval, int __user *optlen)
1917 struct hci_ufilter uf;
1918 struct sock *sk = sock->sk;
1919 int len, opt, err = 0;
1921 BT_DBG("sk %p, opt %d", sk, optname);
1923 if (level != SOL_HCI)
1924 return -ENOPROTOOPT;
1926 if (get_user(len, optlen))
1931 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1938 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1943 if (put_user(opt, optval))
1947 case HCI_TIME_STAMP:
1948 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1953 if (put_user(opt, optval))
1959 struct hci_filter *f = &hci_pi(sk)->filter;
1961 memset(&uf, 0, sizeof(uf));
1962 uf.type_mask = f->type_mask;
1963 uf.opcode = f->opcode;
1964 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1965 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1968 len = min_t(unsigned int, len, sizeof(uf));
1969 if (copy_to_user(optval, &uf, len))
1983 static const struct proto_ops hci_sock_ops = {
1984 .family = PF_BLUETOOTH,
1985 .owner = THIS_MODULE,
1986 .release = hci_sock_release,
1987 .bind = hci_sock_bind,
1988 .getname = hci_sock_getname,
1989 .sendmsg = hci_sock_sendmsg,
1990 .recvmsg = hci_sock_recvmsg,
1991 .ioctl = hci_sock_ioctl,
1992 .poll = datagram_poll,
1993 .listen = sock_no_listen,
1994 .shutdown = sock_no_shutdown,
1995 .setsockopt = hci_sock_setsockopt,
1996 .getsockopt = hci_sock_getsockopt,
1997 .connect = sock_no_connect,
1998 .socketpair = sock_no_socketpair,
1999 .accept = sock_no_accept,
2000 .mmap = sock_no_mmap
2003 static struct proto hci_sk_proto = {
2005 .owner = THIS_MODULE,
2006 .obj_size = sizeof(struct hci_pinfo)
2009 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2014 BT_DBG("sock %p", sock);
2016 if (sock->type != SOCK_RAW)
2017 return -ESOCKTNOSUPPORT;
2019 sock->ops = &hci_sock_ops;
2021 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2025 sock_init_data(sock, sk);
2027 sock_reset_flag(sk, SOCK_ZAPPED);
2029 sk->sk_protocol = protocol;
2031 sock->state = SS_UNCONNECTED;
2032 sk->sk_state = BT_OPEN;
2034 bt_sock_link(&hci_sk_list, sk);
2038 static const struct net_proto_family hci_sock_family_ops = {
2039 .family = PF_BLUETOOTH,
2040 .owner = THIS_MODULE,
2041 .create = hci_sock_create,
2044 int __init hci_sock_init(void)
2048 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2050 err = proto_register(&hci_sk_proto, 0);
2054 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2056 BT_ERR("HCI socket registration failed");
2060 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2062 BT_ERR("Failed to create HCI proc file");
2063 bt_sock_unregister(BTPROTO_HCI);
2067 BT_INFO("HCI socket layer initialized");
2072 proto_unregister(&hci_sk_proto);
2076 void hci_sock_cleanup(void)
2078 bt_procfs_cleanup(&init_net, "hci");
2079 bt_sock_unregister(BTPROTO_HCI);
2080 proto_unregister(&hci_sk_proto);