GNU Linux-libre 4.4.299-gnu1
[releases.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "mgmt_util.h"
36
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
39
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
42 /* ----- HCI socket interface ----- */
43
44 /* Socket info */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47 struct hci_pinfo {
48         struct bt_sock    bt;
49         struct hci_dev    *hdev;
50         struct hci_filter filter;
51         __u32             cmsg_mask;
52         unsigned short    channel;
53         unsigned long     flags;
54 };
55
56 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
57 {
58         struct hci_dev *hdev = hci_pi(sk)->hdev;
59
60         if (!hdev)
61                 return ERR_PTR(-EBADFD);
62         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
63                 return ERR_PTR(-EPIPE);
64         return hdev;
65 }
66
67 void hci_sock_set_flag(struct sock *sk, int nr)
68 {
69         set_bit(nr, &hci_pi(sk)->flags);
70 }
71
72 void hci_sock_clear_flag(struct sock *sk, int nr)
73 {
74         clear_bit(nr, &hci_pi(sk)->flags);
75 }
76
77 int hci_sock_test_flag(struct sock *sk, int nr)
78 {
79         return test_bit(nr, &hci_pi(sk)->flags);
80 }
81
82 unsigned short hci_sock_get_channel(struct sock *sk)
83 {
84         return hci_pi(sk)->channel;
85 }
86
87 static inline int hci_test_bit(int nr, const void *addr)
88 {
89         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
90 }
91
92 /* Security filter */
93 #define HCI_SFLT_MAX_OGF  5
94
95 struct hci_sec_filter {
96         __u32 type_mask;
97         __u32 event_mask[2];
98         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
99 };
100
101 static const struct hci_sec_filter hci_sec_filter = {
102         /* Packet types */
103         0x10,
104         /* Events */
105         { 0x1000d9fe, 0x0000b00c },
106         /* Commands */
107         {
108                 { 0x0 },
109                 /* OGF_LINK_CTL */
110                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
111                 /* OGF_LINK_POLICY */
112                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
113                 /* OGF_HOST_CTL */
114                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
115                 /* OGF_INFO_PARAM */
116                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
117                 /* OGF_STATUS_PARAM */
118                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
119         }
120 };
121
122 static struct bt_sock_list hci_sk_list = {
123         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
124 };
125
126 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
127 {
128         struct hci_filter *flt;
129         int flt_type, flt_event;
130
131         /* Apply filter */
132         flt = &hci_pi(sk)->filter;
133
134         flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
135
136         if (!test_bit(flt_type, &flt->type_mask))
137                 return true;
138
139         /* Extra filter for event packets only */
140         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
141                 return false;
142
143         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
144
145         if (!hci_test_bit(flt_event, &flt->event_mask))
146                 return true;
147
148         /* Check filter only when opcode is set */
149         if (!flt->opcode)
150                 return false;
151
152         if (flt_event == HCI_EV_CMD_COMPLETE &&
153             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
154                 return true;
155
156         if (flt_event == HCI_EV_CMD_STATUS &&
157             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
158                 return true;
159
160         return false;
161 }
162
163 /* Send frame to RAW socket */
164 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
165 {
166         struct sock *sk;
167         struct sk_buff *skb_copy = NULL;
168
169         BT_DBG("hdev %p len %d", hdev, skb->len);
170
171         read_lock(&hci_sk_list.lock);
172
173         sk_for_each(sk, &hci_sk_list.head) {
174                 struct sk_buff *nskb;
175
176                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
177                         continue;
178
179                 /* Don't send frame to the socket it came from */
180                 if (skb->sk == sk)
181                         continue;
182
183                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
184                         if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
185                             bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
186                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
187                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
188                                 continue;
189                         if (is_filtered_packet(sk, skb))
190                                 continue;
191                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
192                         if (!bt_cb(skb)->incoming)
193                                 continue;
194                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
195                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
196                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
197                                 continue;
198                 } else {
199                         /* Don't send frame to other channel types */
200                         continue;
201                 }
202
203                 if (!skb_copy) {
204                         /* Create a private copy with headroom */
205                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
206                         if (!skb_copy)
207                                 continue;
208
209                         /* Put type byte before the data */
210                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
211                 }
212
213                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
214                 if (!nskb)
215                         continue;
216
217                 if (sock_queue_rcv_skb(sk, nskb))
218                         kfree_skb(nskb);
219         }
220
221         read_unlock(&hci_sk_list.lock);
222
223         kfree_skb(skb_copy);
224 }
225
226 /* Send frame to sockets with specific channel */
227 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
228                          int flag, struct sock *skip_sk)
229 {
230         struct sock *sk;
231
232         BT_DBG("channel %u len %d", channel, skb->len);
233
234         read_lock(&hci_sk_list.lock);
235
236         sk_for_each(sk, &hci_sk_list.head) {
237                 struct sk_buff *nskb;
238
239                 /* Ignore socket without the flag set */
240                 if (!hci_sock_test_flag(sk, flag))
241                         continue;
242
243                 /* Skip the original socket */
244                 if (sk == skip_sk)
245                         continue;
246
247                 if (sk->sk_state != BT_BOUND)
248                         continue;
249
250                 if (hci_pi(sk)->channel != channel)
251                         continue;
252
253                 nskb = skb_clone(skb, GFP_ATOMIC);
254                 if (!nskb)
255                         continue;
256
257                 if (sock_queue_rcv_skb(sk, nskb))
258                         kfree_skb(nskb);
259         }
260
261         read_unlock(&hci_sk_list.lock);
262 }
263
264 /* Send frame to monitor socket */
265 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267         struct sk_buff *skb_copy = NULL;
268         struct hci_mon_hdr *hdr;
269         __le16 opcode;
270
271         if (!atomic_read(&monitor_promisc))
272                 return;
273
274         BT_DBG("hdev %p len %d", hdev, skb->len);
275
276         switch (bt_cb(skb)->pkt_type) {
277         case HCI_COMMAND_PKT:
278                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
279                 break;
280         case HCI_EVENT_PKT:
281                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
282                 break;
283         case HCI_ACLDATA_PKT:
284                 if (bt_cb(skb)->incoming)
285                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
286                 else
287                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
288                 break;
289         case HCI_SCODATA_PKT:
290                 if (bt_cb(skb)->incoming)
291                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
292                 else
293                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
294                 break;
295         case HCI_DIAG_PKT:
296                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
297                 break;
298         default:
299                 return;
300         }
301
302         /* Create a private copy with headroom */
303         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
304         if (!skb_copy)
305                 return;
306
307         /* Put header before the data */
308         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
309         hdr->opcode = opcode;
310         hdr->index = cpu_to_le16(hdev->id);
311         hdr->len = cpu_to_le16(skb->len);
312
313         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
314                             HCI_SOCK_TRUSTED, NULL);
315         kfree_skb(skb_copy);
316 }
317
318 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
319 {
320         struct hci_mon_hdr *hdr;
321         struct hci_mon_new_index *ni;
322         struct hci_mon_index_info *ii;
323         struct sk_buff *skb;
324         __le16 opcode;
325
326         switch (event) {
327         case HCI_DEV_REG:
328                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
329                 if (!skb)
330                         return NULL;
331
332                 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
333                 ni->type = hdev->dev_type;
334                 ni->bus = hdev->bus;
335                 bacpy(&ni->bdaddr, &hdev->bdaddr);
336                 memcpy(ni->name, hdev->name, 8);
337
338                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
339                 break;
340
341         case HCI_DEV_UNREG:
342                 skb = bt_skb_alloc(0, GFP_ATOMIC);
343                 if (!skb)
344                         return NULL;
345
346                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
347                 break;
348
349         case HCI_DEV_SETUP:
350                 if (hdev->manufacturer == 0xffff)
351                         return NULL;
352
353                 /* fall through */
354
355         case HCI_DEV_UP:
356                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
357                 if (!skb)
358                         return NULL;
359
360                 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
361                 bacpy(&ii->bdaddr, &hdev->bdaddr);
362                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
363
364                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
365                 break;
366
367         case HCI_DEV_OPEN:
368                 skb = bt_skb_alloc(0, GFP_ATOMIC);
369                 if (!skb)
370                         return NULL;
371
372                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
373                 break;
374
375         case HCI_DEV_CLOSE:
376                 skb = bt_skb_alloc(0, GFP_ATOMIC);
377                 if (!skb)
378                         return NULL;
379
380                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
381                 break;
382
383         default:
384                 return NULL;
385         }
386
387         __net_timestamp(skb);
388
389         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
390         hdr->opcode = opcode;
391         hdr->index = cpu_to_le16(hdev->id);
392         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
393
394         return skb;
395 }
396
397 static void send_monitor_replay(struct sock *sk)
398 {
399         struct hci_dev *hdev;
400
401         read_lock(&hci_dev_list_lock);
402
403         list_for_each_entry(hdev, &hci_dev_list, list) {
404                 struct sk_buff *skb;
405
406                 skb = create_monitor_event(hdev, HCI_DEV_REG);
407                 if (!skb)
408                         continue;
409
410                 if (sock_queue_rcv_skb(sk, skb))
411                         kfree_skb(skb);
412
413                 if (!test_bit(HCI_RUNNING, &hdev->flags))
414                         continue;
415
416                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
417                 if (!skb)
418                         continue;
419
420                 if (sock_queue_rcv_skb(sk, skb))
421                         kfree_skb(skb);
422
423                 if (test_bit(HCI_UP, &hdev->flags))
424                         skb = create_monitor_event(hdev, HCI_DEV_UP);
425                 else if (hci_dev_test_flag(hdev, HCI_SETUP))
426                         skb = create_monitor_event(hdev, HCI_DEV_SETUP);
427                 else
428                         skb = NULL;
429
430                 if (skb) {
431                         if (sock_queue_rcv_skb(sk, skb))
432                                 kfree_skb(skb);
433                 }
434         }
435
436         read_unlock(&hci_dev_list_lock);
437 }
438
439 /* Generate internal stack event */
440 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
441 {
442         struct hci_event_hdr *hdr;
443         struct hci_ev_stack_internal *ev;
444         struct sk_buff *skb;
445
446         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
447         if (!skb)
448                 return;
449
450         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
451         hdr->evt  = HCI_EV_STACK_INTERNAL;
452         hdr->plen = sizeof(*ev) + dlen;
453
454         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
455         ev->type = type;
456         memcpy(ev->data, data, dlen);
457
458         bt_cb(skb)->incoming = 1;
459         __net_timestamp(skb);
460
461         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
462         hci_send_to_sock(hdev, skb);
463         kfree_skb(skb);
464 }
465
466 void hci_sock_dev_event(struct hci_dev *hdev, int event)
467 {
468         BT_DBG("hdev %s event %d", hdev->name, event);
469
470         if (atomic_read(&monitor_promisc)) {
471                 struct sk_buff *skb;
472
473                 /* Send event to monitor */
474                 skb = create_monitor_event(hdev, event);
475                 if (skb) {
476                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
477                                             HCI_SOCK_TRUSTED, NULL);
478                         kfree_skb(skb);
479                 }
480         }
481
482         if (event <= HCI_DEV_DOWN) {
483                 struct hci_ev_si_device ev;
484
485                 /* Send event to sockets */
486                 ev.event  = event;
487                 ev.dev_id = hdev->id;
488                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
489         }
490
491         if (event == HCI_DEV_UNREG) {
492                 struct sock *sk;
493
494                 /* Wake up sockets using this dead device */
495                 read_lock(&hci_sk_list.lock);
496                 sk_for_each(sk, &hci_sk_list.head) {
497                         if (hci_pi(sk)->hdev == hdev) {
498                                 sk->sk_err = EPIPE;
499                                 sk->sk_state_change(sk);
500                         }
501                 }
502                 read_unlock(&hci_sk_list.lock);
503         }
504 }
505
506 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
507 {
508         struct hci_mgmt_chan *c;
509
510         list_for_each_entry(c, &mgmt_chan_list, list) {
511                 if (c->channel == channel)
512                         return c;
513         }
514
515         return NULL;
516 }
517
518 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
519 {
520         struct hci_mgmt_chan *c;
521
522         mutex_lock(&mgmt_chan_list_lock);
523         c = __hci_mgmt_chan_find(channel);
524         mutex_unlock(&mgmt_chan_list_lock);
525
526         return c;
527 }
528
529 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
530 {
531         if (c->channel < HCI_CHANNEL_CONTROL)
532                 return -EINVAL;
533
534         mutex_lock(&mgmt_chan_list_lock);
535         if (__hci_mgmt_chan_find(c->channel)) {
536                 mutex_unlock(&mgmt_chan_list_lock);
537                 return -EALREADY;
538         }
539
540         list_add_tail(&c->list, &mgmt_chan_list);
541
542         mutex_unlock(&mgmt_chan_list_lock);
543
544         return 0;
545 }
546 EXPORT_SYMBOL(hci_mgmt_chan_register);
547
548 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
549 {
550         mutex_lock(&mgmt_chan_list_lock);
551         list_del(&c->list);
552         mutex_unlock(&mgmt_chan_list_lock);
553 }
554 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
555
556 static int hci_sock_release(struct socket *sock)
557 {
558         struct sock *sk = sock->sk;
559         struct hci_dev *hdev;
560
561         BT_DBG("sock %p sk %p", sock, sk);
562
563         if (!sk)
564                 return 0;
565
566         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
567                 atomic_dec(&monitor_promisc);
568
569         bt_sock_unlink(&hci_sk_list, sk);
570
571         hdev = hci_pi(sk)->hdev;
572         if (hdev) {
573                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
574                         /* When releasing an user channel exclusive access,
575                          * call hci_dev_do_close directly instead of calling
576                          * hci_dev_close to ensure the exclusive access will
577                          * be released and the controller brought back down.
578                          *
579                          * The checking of HCI_AUTO_OFF is not needed in this
580                          * case since it will have been cleared already when
581                          * opening the user channel.
582                          */
583                         hci_dev_do_close(hdev);
584                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
585                         mgmt_index_added(hdev);
586                 }
587
588                 atomic_dec(&hdev->promisc);
589                 hci_dev_put(hdev);
590         }
591
592         sock_orphan(sk);
593
594         skb_queue_purge(&sk->sk_receive_queue);
595         skb_queue_purge(&sk->sk_write_queue);
596
597         sock_put(sk);
598         return 0;
599 }
600
601 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
602 {
603         bdaddr_t bdaddr;
604         int err;
605
606         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
607                 return -EFAULT;
608
609         hci_dev_lock(hdev);
610
611         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
612
613         hci_dev_unlock(hdev);
614
615         return err;
616 }
617
618 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
619 {
620         bdaddr_t bdaddr;
621         int err;
622
623         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
624                 return -EFAULT;
625
626         hci_dev_lock(hdev);
627
628         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
629
630         hci_dev_unlock(hdev);
631
632         return err;
633 }
634
635 /* Ioctls that require bound socket */
636 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
637                                 unsigned long arg)
638 {
639         struct hci_dev *hdev = hci_hdev_from_sock(sk);
640
641         if (IS_ERR(hdev))
642                 return PTR_ERR(hdev);
643
644         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
645                 return -EBUSY;
646
647         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
648                 return -EOPNOTSUPP;
649
650         if (hdev->dev_type != HCI_BREDR)
651                 return -EOPNOTSUPP;
652
653         switch (cmd) {
654         case HCISETRAW:
655                 if (!capable(CAP_NET_ADMIN))
656                         return -EPERM;
657                 return -EOPNOTSUPP;
658
659         case HCIGETCONNINFO:
660                 return hci_get_conn_info(hdev, (void __user *) arg);
661
662         case HCIGETAUTHINFO:
663                 return hci_get_auth_info(hdev, (void __user *) arg);
664
665         case HCIBLOCKADDR:
666                 if (!capable(CAP_NET_ADMIN))
667                         return -EPERM;
668                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
669
670         case HCIUNBLOCKADDR:
671                 if (!capable(CAP_NET_ADMIN))
672                         return -EPERM;
673                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
674         }
675
676         return -ENOIOCTLCMD;
677 }
678
679 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
680                           unsigned long arg)
681 {
682         void __user *argp = (void __user *) arg;
683         struct sock *sk = sock->sk;
684         int err;
685
686         BT_DBG("cmd %x arg %lx", cmd, arg);
687
688         lock_sock(sk);
689
690         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
691                 err = -EBADFD;
692                 goto done;
693         }
694
695         release_sock(sk);
696
697         switch (cmd) {
698         case HCIGETDEVLIST:
699                 return hci_get_dev_list(argp);
700
701         case HCIGETDEVINFO:
702                 return hci_get_dev_info(argp);
703
704         case HCIGETCONNLIST:
705                 return hci_get_conn_list(argp);
706
707         case HCIDEVUP:
708                 if (!capable(CAP_NET_ADMIN))
709                         return -EPERM;
710                 return hci_dev_open(arg);
711
712         case HCIDEVDOWN:
713                 if (!capable(CAP_NET_ADMIN))
714                         return -EPERM;
715                 return hci_dev_close(arg);
716
717         case HCIDEVRESET:
718                 if (!capable(CAP_NET_ADMIN))
719                         return -EPERM;
720                 return hci_dev_reset(arg);
721
722         case HCIDEVRESTAT:
723                 if (!capable(CAP_NET_ADMIN))
724                         return -EPERM;
725                 return hci_dev_reset_stat(arg);
726
727         case HCISETSCAN:
728         case HCISETAUTH:
729         case HCISETENCRYPT:
730         case HCISETPTYPE:
731         case HCISETLINKPOL:
732         case HCISETLINKMODE:
733         case HCISETACLMTU:
734         case HCISETSCOMTU:
735                 if (!capable(CAP_NET_ADMIN))
736                         return -EPERM;
737                 return hci_dev_cmd(cmd, argp);
738
739         case HCIINQUIRY:
740                 return hci_inquiry(argp);
741         }
742
743         lock_sock(sk);
744
745         err = hci_sock_bound_ioctl(sk, cmd, arg);
746
747 done:
748         release_sock(sk);
749         return err;
750 }
751
752 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
753                          int addr_len)
754 {
755         struct sockaddr_hci haddr;
756         struct sock *sk = sock->sk;
757         struct hci_dev *hdev = NULL;
758         int len, err = 0;
759
760         BT_DBG("sock %p sk %p", sock, sk);
761
762         if (!addr)
763                 return -EINVAL;
764
765         memset(&haddr, 0, sizeof(haddr));
766         len = min_t(unsigned int, sizeof(haddr), addr_len);
767         memcpy(&haddr, addr, len);
768
769         if (haddr.hci_family != AF_BLUETOOTH)
770                 return -EINVAL;
771
772         lock_sock(sk);
773
774         /* Allow detaching from dead device and attaching to alive device, if
775          * the caller wants to re-bind (instead of close) this socket in
776          * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
777          */
778         hdev = hci_pi(sk)->hdev;
779         if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
780                 hci_pi(sk)->hdev = NULL;
781                 sk->sk_state = BT_OPEN;
782                 hci_dev_put(hdev);
783         }
784         hdev = NULL;
785
786         if (sk->sk_state == BT_BOUND) {
787                 err = -EALREADY;
788                 goto done;
789         }
790
791         switch (haddr.hci_channel) {
792         case HCI_CHANNEL_RAW:
793                 if (hci_pi(sk)->hdev) {
794                         err = -EALREADY;
795                         goto done;
796                 }
797
798                 if (haddr.hci_dev != HCI_DEV_NONE) {
799                         hdev = hci_dev_get(haddr.hci_dev);
800                         if (!hdev) {
801                                 err = -ENODEV;
802                                 goto done;
803                         }
804
805                         atomic_inc(&hdev->promisc);
806                 }
807
808                 hci_pi(sk)->hdev = hdev;
809                 break;
810
811         case HCI_CHANNEL_USER:
812                 if (hci_pi(sk)->hdev) {
813                         err = -EALREADY;
814                         goto done;
815                 }
816
817                 if (haddr.hci_dev == HCI_DEV_NONE) {
818                         err = -EINVAL;
819                         goto done;
820                 }
821
822                 if (!capable(CAP_NET_ADMIN)) {
823                         err = -EPERM;
824                         goto done;
825                 }
826
827                 hdev = hci_dev_get(haddr.hci_dev);
828                 if (!hdev) {
829                         err = -ENODEV;
830                         goto done;
831                 }
832
833                 if (test_bit(HCI_INIT, &hdev->flags) ||
834                     hci_dev_test_flag(hdev, HCI_SETUP) ||
835                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
836                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
837                      test_bit(HCI_UP, &hdev->flags))) {
838                         err = -EBUSY;
839                         hci_dev_put(hdev);
840                         goto done;
841                 }
842
843                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
844                         err = -EUSERS;
845                         hci_dev_put(hdev);
846                         goto done;
847                 }
848
849                 mgmt_index_removed(hdev);
850
851                 err = hci_dev_open(hdev->id);
852                 if (err) {
853                         if (err == -EALREADY) {
854                                 /* In case the transport is already up and
855                                  * running, clear the error here.
856                                  *
857                                  * This can happen when opening an user
858                                  * channel and HCI_AUTO_OFF grace period
859                                  * is still active.
860                                  */
861                                 err = 0;
862                         } else {
863                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
864                                 mgmt_index_added(hdev);
865                                 hci_dev_put(hdev);
866                                 goto done;
867                         }
868                 }
869
870                 atomic_inc(&hdev->promisc);
871
872                 hci_pi(sk)->hdev = hdev;
873                 break;
874
875         case HCI_CHANNEL_MONITOR:
876                 if (haddr.hci_dev != HCI_DEV_NONE) {
877                         err = -EINVAL;
878                         goto done;
879                 }
880
881                 if (!capable(CAP_NET_RAW)) {
882                         err = -EPERM;
883                         goto done;
884                 }
885
886                 /* The monitor interface is restricted to CAP_NET_RAW
887                  * capabilities and with that implicitly trusted.
888                  */
889                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
890
891                 send_monitor_replay(sk);
892
893                 atomic_inc(&monitor_promisc);
894                 break;
895
896         default:
897                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
898                         err = -EINVAL;
899                         goto done;
900                 }
901
902                 if (haddr.hci_dev != HCI_DEV_NONE) {
903                         err = -EINVAL;
904                         goto done;
905                 }
906
907                 /* Users with CAP_NET_ADMIN capabilities are allowed
908                  * access to all management commands and events. For
909                  * untrusted users the interface is restricted and
910                  * also only untrusted events are sent.
911                  */
912                 if (capable(CAP_NET_ADMIN))
913                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
914
915                 /* At the moment the index and unconfigured index events
916                  * are enabled unconditionally. Setting them on each
917                  * socket when binding keeps this functionality. They
918                  * however might be cleared later and then sending of these
919                  * events will be disabled, but that is then intentional.
920                  *
921                  * This also enables generic events that are safe to be
922                  * received by untrusted users. Example for such events
923                  * are changes to settings, class of device, name etc.
924                  */
925                 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
926                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
927                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
928                         hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
929                 }
930                 break;
931         }
932
933
934         hci_pi(sk)->channel = haddr.hci_channel;
935         sk->sk_state = BT_BOUND;
936
937 done:
938         release_sock(sk);
939         return err;
940 }
941
942 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
943                             int *addr_len, int peer)
944 {
945         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
946         struct sock *sk = sock->sk;
947         struct hci_dev *hdev;
948         int err = 0;
949
950         BT_DBG("sock %p sk %p", sock, sk);
951
952         if (peer)
953                 return -EOPNOTSUPP;
954
955         lock_sock(sk);
956
957         hdev = hci_hdev_from_sock(sk);
958         if (IS_ERR(hdev)) {
959                 err = PTR_ERR(hdev);
960                 goto done;
961         }
962
963         *addr_len = sizeof(*haddr);
964         haddr->hci_family = AF_BLUETOOTH;
965         haddr->hci_dev    = hdev->id;
966         haddr->hci_channel= hci_pi(sk)->channel;
967
968 done:
969         release_sock(sk);
970         return err;
971 }
972
973 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
974                           struct sk_buff *skb)
975 {
976         __u32 mask = hci_pi(sk)->cmsg_mask;
977
978         if (mask & HCI_CMSG_DIR) {
979                 int incoming = bt_cb(skb)->incoming;
980                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
981                          &incoming);
982         }
983
984         if (mask & HCI_CMSG_TSTAMP) {
985 #ifdef CONFIG_COMPAT
986                 struct compat_timeval ctv;
987 #endif
988                 struct timeval tv;
989                 void *data;
990                 int len;
991
992                 skb_get_timestamp(skb, &tv);
993
994                 data = &tv;
995                 len = sizeof(tv);
996 #ifdef CONFIG_COMPAT
997                 if (!COMPAT_USE_64BIT_TIME &&
998                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
999                         ctv.tv_sec = tv.tv_sec;
1000                         ctv.tv_usec = tv.tv_usec;
1001                         data = &ctv;
1002                         len = sizeof(ctv);
1003                 }
1004 #endif
1005
1006                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1007         }
1008 }
1009
1010 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1011                             int flags)
1012 {
1013         int noblock = flags & MSG_DONTWAIT;
1014         struct sock *sk = sock->sk;
1015         struct sk_buff *skb;
1016         int copied, err;
1017
1018         BT_DBG("sock %p, sk %p", sock, sk);
1019
1020         if (flags & MSG_OOB)
1021                 return -EOPNOTSUPP;
1022
1023         if (sk->sk_state == BT_CLOSED)
1024                 return 0;
1025
1026         skb = skb_recv_datagram(sk, flags, noblock, &err);
1027         if (!skb)
1028                 return err;
1029
1030         copied = skb->len;
1031         if (len < copied) {
1032                 msg->msg_flags |= MSG_TRUNC;
1033                 copied = len;
1034         }
1035
1036         skb_reset_transport_header(skb);
1037         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1038
1039         switch (hci_pi(sk)->channel) {
1040         case HCI_CHANNEL_RAW:
1041                 hci_sock_cmsg(sk, msg, skb);
1042                 break;
1043         case HCI_CHANNEL_USER:
1044         case HCI_CHANNEL_MONITOR:
1045                 sock_recv_timestamp(msg, sk, skb);
1046                 break;
1047         default:
1048                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1049                         sock_recv_timestamp(msg, sk, skb);
1050                 break;
1051         }
1052
1053         skb_free_datagram(sk, skb);
1054
1055         return err ? : copied;
1056 }
1057
1058 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1059                         struct msghdr *msg, size_t msglen)
1060 {
1061         void *buf;
1062         u8 *cp;
1063         struct mgmt_hdr *hdr;
1064         u16 opcode, index, len;
1065         struct hci_dev *hdev = NULL;
1066         const struct hci_mgmt_handler *handler;
1067         bool var_len, no_hdev;
1068         int err;
1069
1070         BT_DBG("got %zu bytes", msglen);
1071
1072         if (msglen < sizeof(*hdr))
1073                 return -EINVAL;
1074
1075         buf = kmalloc(msglen, GFP_KERNEL);
1076         if (!buf)
1077                 return -ENOMEM;
1078
1079         if (memcpy_from_msg(buf, msg, msglen)) {
1080                 err = -EFAULT;
1081                 goto done;
1082         }
1083
1084         hdr = buf;
1085         opcode = __le16_to_cpu(hdr->opcode);
1086         index = __le16_to_cpu(hdr->index);
1087         len = __le16_to_cpu(hdr->len);
1088
1089         if (len != msglen - sizeof(*hdr)) {
1090                 err = -EINVAL;
1091                 goto done;
1092         }
1093
1094         if (opcode >= chan->handler_count ||
1095             chan->handlers[opcode].func == NULL) {
1096                 BT_DBG("Unknown op %u", opcode);
1097                 err = mgmt_cmd_status(sk, index, opcode,
1098                                       MGMT_STATUS_UNKNOWN_COMMAND);
1099                 goto done;
1100         }
1101
1102         handler = &chan->handlers[opcode];
1103
1104         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1105             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1106                 err = mgmt_cmd_status(sk, index, opcode,
1107                                       MGMT_STATUS_PERMISSION_DENIED);
1108                 goto done;
1109         }
1110
1111         if (index != MGMT_INDEX_NONE) {
1112                 hdev = hci_dev_get(index);
1113                 if (!hdev) {
1114                         err = mgmt_cmd_status(sk, index, opcode,
1115                                               MGMT_STATUS_INVALID_INDEX);
1116                         goto done;
1117                 }
1118
1119                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1120                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1121                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1122                         err = mgmt_cmd_status(sk, index, opcode,
1123                                               MGMT_STATUS_INVALID_INDEX);
1124                         goto done;
1125                 }
1126
1127                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1128                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1129                         err = mgmt_cmd_status(sk, index, opcode,
1130                                               MGMT_STATUS_INVALID_INDEX);
1131                         goto done;
1132                 }
1133         }
1134
1135         no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1136         if (no_hdev != !hdev) {
1137                 err = mgmt_cmd_status(sk, index, opcode,
1138                                       MGMT_STATUS_INVALID_INDEX);
1139                 goto done;
1140         }
1141
1142         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1143         if ((var_len && len < handler->data_len) ||
1144             (!var_len && len != handler->data_len)) {
1145                 err = mgmt_cmd_status(sk, index, opcode,
1146                                       MGMT_STATUS_INVALID_PARAMS);
1147                 goto done;
1148         }
1149
1150         if (hdev && chan->hdev_init)
1151                 chan->hdev_init(sk, hdev);
1152
1153         cp = buf + sizeof(*hdr);
1154
1155         err = handler->func(sk, hdev, cp, len);
1156         if (err < 0)
1157                 goto done;
1158
1159         err = msglen;
1160
1161 done:
1162         if (hdev)
1163                 hci_dev_put(hdev);
1164
1165         kfree(buf);
1166         return err;
1167 }
1168
1169 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1170                             size_t len)
1171 {
1172         struct sock *sk = sock->sk;
1173         struct hci_mgmt_chan *chan;
1174         struct hci_dev *hdev;
1175         struct sk_buff *skb;
1176         int err;
1177
1178         BT_DBG("sock %p sk %p", sock, sk);
1179
1180         if (msg->msg_flags & MSG_OOB)
1181                 return -EOPNOTSUPP;
1182
1183         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1184                                MSG_CMSG_COMPAT))
1185                 return -EINVAL;
1186
1187         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1188                 return -EINVAL;
1189
1190         lock_sock(sk);
1191
1192         switch (hci_pi(sk)->channel) {
1193         case HCI_CHANNEL_RAW:
1194         case HCI_CHANNEL_USER:
1195                 break;
1196         case HCI_CHANNEL_MONITOR:
1197                 err = -EOPNOTSUPP;
1198                 goto done;
1199         default:
1200                 mutex_lock(&mgmt_chan_list_lock);
1201                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1202                 if (chan)
1203                         err = hci_mgmt_cmd(chan, sk, msg, len);
1204                 else
1205                         err = -EINVAL;
1206
1207                 mutex_unlock(&mgmt_chan_list_lock);
1208                 goto done;
1209         }
1210
1211         hdev = hci_hdev_from_sock(sk);
1212         if (IS_ERR(hdev)) {
1213                 err = PTR_ERR(hdev);
1214                 goto done;
1215         }
1216
1217         if (!test_bit(HCI_UP, &hdev->flags)) {
1218                 err = -ENETDOWN;
1219                 goto done;
1220         }
1221
1222         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1223         if (!skb)
1224                 goto done;
1225
1226         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1227                 err = -EFAULT;
1228                 goto drop;
1229         }
1230
1231         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1232         skb_pull(skb, 1);
1233
1234         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1235                 /* No permission check is needed for user channel
1236                  * since that gets enforced when binding the socket.
1237                  *
1238                  * However check that the packet type is valid.
1239                  */
1240                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1241                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1242                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1243                         err = -EINVAL;
1244                         goto drop;
1245                 }
1246
1247                 skb_queue_tail(&hdev->raw_q, skb);
1248                 queue_work(hdev->workqueue, &hdev->tx_work);
1249         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1250                 u16 opcode = get_unaligned_le16(skb->data);
1251                 u16 ogf = hci_opcode_ogf(opcode);
1252                 u16 ocf = hci_opcode_ocf(opcode);
1253
1254                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1255                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1256                                    &hci_sec_filter.ocf_mask[ogf])) &&
1257                     !capable(CAP_NET_RAW)) {
1258                         err = -EPERM;
1259                         goto drop;
1260                 }
1261
1262                 if (ogf == 0x3f) {
1263                         skb_queue_tail(&hdev->raw_q, skb);
1264                         queue_work(hdev->workqueue, &hdev->tx_work);
1265                 } else {
1266                         /* Stand-alone HCI commands must be flagged as
1267                          * single-command requests.
1268                          */
1269                         bt_cb(skb)->hci.req_start = true;
1270
1271                         skb_queue_tail(&hdev->cmd_q, skb);
1272                         queue_work(hdev->workqueue, &hdev->cmd_work);
1273                 }
1274         } else {
1275                 if (!capable(CAP_NET_RAW)) {
1276                         err = -EPERM;
1277                         goto drop;
1278                 }
1279
1280                 if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1281                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1282                         err = -EINVAL;
1283                         goto drop;
1284                 }
1285
1286                 skb_queue_tail(&hdev->raw_q, skb);
1287                 queue_work(hdev->workqueue, &hdev->tx_work);
1288         }
1289
1290         err = len;
1291
1292 done:
1293         release_sock(sk);
1294         return err;
1295
1296 drop:
1297         kfree_skb(skb);
1298         goto done;
1299 }
1300
1301 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1302                                char __user *optval, unsigned int len)
1303 {
1304         struct hci_ufilter uf = { .opcode = 0 };
1305         struct sock *sk = sock->sk;
1306         int err = 0, opt = 0;
1307
1308         BT_DBG("sk %p, opt %d", sk, optname);
1309
1310         lock_sock(sk);
1311
1312         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1313                 err = -EBADFD;
1314                 goto done;
1315         }
1316
1317         switch (optname) {
1318         case HCI_DATA_DIR:
1319                 if (get_user(opt, (int __user *)optval)) {
1320                         err = -EFAULT;
1321                         break;
1322                 }
1323
1324                 if (opt)
1325                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1326                 else
1327                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1328                 break;
1329
1330         case HCI_TIME_STAMP:
1331                 if (get_user(opt, (int __user *)optval)) {
1332                         err = -EFAULT;
1333                         break;
1334                 }
1335
1336                 if (opt)
1337                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1338                 else
1339                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1340                 break;
1341
1342         case HCI_FILTER:
1343                 {
1344                         struct hci_filter *f = &hci_pi(sk)->filter;
1345
1346                         uf.type_mask = f->type_mask;
1347                         uf.opcode    = f->opcode;
1348                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1349                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1350                 }
1351
1352                 len = min_t(unsigned int, len, sizeof(uf));
1353                 if (copy_from_user(&uf, optval, len)) {
1354                         err = -EFAULT;
1355                         break;
1356                 }
1357
1358                 if (!capable(CAP_NET_RAW)) {
1359                         uf.type_mask &= hci_sec_filter.type_mask;
1360                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1361                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1362                 }
1363
1364                 {
1365                         struct hci_filter *f = &hci_pi(sk)->filter;
1366
1367                         f->type_mask = uf.type_mask;
1368                         f->opcode    = uf.opcode;
1369                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1370                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1371                 }
1372                 break;
1373
1374         default:
1375                 err = -ENOPROTOOPT;
1376                 break;
1377         }
1378
1379 done:
1380         release_sock(sk);
1381         return err;
1382 }
1383
1384 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1385                                char __user *optval, int __user *optlen)
1386 {
1387         struct hci_ufilter uf;
1388         struct sock *sk = sock->sk;
1389         int len, opt, err = 0;
1390
1391         BT_DBG("sk %p, opt %d", sk, optname);
1392
1393         if (get_user(len, optlen))
1394                 return -EFAULT;
1395
1396         lock_sock(sk);
1397
1398         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1399                 err = -EBADFD;
1400                 goto done;
1401         }
1402
1403         switch (optname) {
1404         case HCI_DATA_DIR:
1405                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1406                         opt = 1;
1407                 else
1408                         opt = 0;
1409
1410                 if (put_user(opt, optval))
1411                         err = -EFAULT;
1412                 break;
1413
1414         case HCI_TIME_STAMP:
1415                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1416                         opt = 1;
1417                 else
1418                         opt = 0;
1419
1420                 if (put_user(opt, optval))
1421                         err = -EFAULT;
1422                 break;
1423
1424         case HCI_FILTER:
1425                 {
1426                         struct hci_filter *f = &hci_pi(sk)->filter;
1427
1428                         memset(&uf, 0, sizeof(uf));
1429                         uf.type_mask = f->type_mask;
1430                         uf.opcode    = f->opcode;
1431                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1432                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1433                 }
1434
1435                 len = min_t(unsigned int, len, sizeof(uf));
1436                 if (copy_to_user(optval, &uf, len))
1437                         err = -EFAULT;
1438                 break;
1439
1440         default:
1441                 err = -ENOPROTOOPT;
1442                 break;
1443         }
1444
1445 done:
1446         release_sock(sk);
1447         return err;
1448 }
1449
1450 static const struct proto_ops hci_sock_ops = {
1451         .family         = PF_BLUETOOTH,
1452         .owner          = THIS_MODULE,
1453         .release        = hci_sock_release,
1454         .bind           = hci_sock_bind,
1455         .getname        = hci_sock_getname,
1456         .sendmsg        = hci_sock_sendmsg,
1457         .recvmsg        = hci_sock_recvmsg,
1458         .ioctl          = hci_sock_ioctl,
1459         .poll           = datagram_poll,
1460         .listen         = sock_no_listen,
1461         .shutdown       = sock_no_shutdown,
1462         .setsockopt     = hci_sock_setsockopt,
1463         .getsockopt     = hci_sock_getsockopt,
1464         .connect        = sock_no_connect,
1465         .socketpair     = sock_no_socketpair,
1466         .accept         = sock_no_accept,
1467         .mmap           = sock_no_mmap
1468 };
1469
1470 static struct proto hci_sk_proto = {
1471         .name           = "HCI",
1472         .owner          = THIS_MODULE,
1473         .obj_size       = sizeof(struct hci_pinfo)
1474 };
1475
1476 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1477                            int kern)
1478 {
1479         struct sock *sk;
1480
1481         BT_DBG("sock %p", sock);
1482
1483         if (sock->type != SOCK_RAW)
1484                 return -ESOCKTNOSUPPORT;
1485
1486         sock->ops = &hci_sock_ops;
1487
1488         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1489         if (!sk)
1490                 return -ENOMEM;
1491
1492         sock_init_data(sock, sk);
1493
1494         sock_reset_flag(sk, SOCK_ZAPPED);
1495
1496         sk->sk_protocol = protocol;
1497
1498         sock->state = SS_UNCONNECTED;
1499         sk->sk_state = BT_OPEN;
1500
1501         bt_sock_link(&hci_sk_list, sk);
1502         return 0;
1503 }
1504
1505 static const struct net_proto_family hci_sock_family_ops = {
1506         .family = PF_BLUETOOTH,
1507         .owner  = THIS_MODULE,
1508         .create = hci_sock_create,
1509 };
1510
1511 int __init hci_sock_init(void)
1512 {
1513         int err;
1514
1515         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1516
1517         err = proto_register(&hci_sk_proto, 0);
1518         if (err < 0)
1519                 return err;
1520
1521         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1522         if (err < 0) {
1523                 BT_ERR("HCI socket registration failed");
1524                 goto error;
1525         }
1526
1527         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1528         if (err < 0) {
1529                 BT_ERR("Failed to create HCI proc file");
1530                 bt_sock_unregister(BTPROTO_HCI);
1531                 goto error;
1532         }
1533
1534         BT_INFO("HCI socket layer initialized");
1535
1536         return 0;
1537
1538 error:
1539         proto_unregister(&hci_sk_proto);
1540         return err;
1541 }
1542
1543 void hci_sock_cleanup(void)
1544 {
1545         bt_procfs_cleanup(&init_net, "hci");
1546         bt_sock_unregister(BTPROTO_HCI);
1547         proto_unregister(&hci_sk_proto);
1548 }