GNU Linux-libre 4.14.313-gnu1
[releases.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "mgmt_util.h"
38
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42 static DEFINE_IDA(sock_cookie_ida);
43
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46 /* ----- HCI socket interface ----- */
47
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51 struct hci_pinfo {
52         struct bt_sock    bt;
53         struct hci_dev    *hdev;
54         struct hci_filter filter;
55         __u32             cmsg_mask;
56         unsigned short    channel;
57         unsigned long     flags;
58         __u32             cookie;
59         char              comm[TASK_COMM_LEN];
60 };
61
62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
63 {
64         struct hci_dev *hdev = hci_pi(sk)->hdev;
65
66         if (!hdev)
67                 return ERR_PTR(-EBADFD);
68         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69                 return ERR_PTR(-EPIPE);
70         return hdev;
71 }
72
73 void hci_sock_set_flag(struct sock *sk, int nr)
74 {
75         set_bit(nr, &hci_pi(sk)->flags);
76 }
77
78 void hci_sock_clear_flag(struct sock *sk, int nr)
79 {
80         clear_bit(nr, &hci_pi(sk)->flags);
81 }
82
83 int hci_sock_test_flag(struct sock *sk, int nr)
84 {
85         return test_bit(nr, &hci_pi(sk)->flags);
86 }
87
88 unsigned short hci_sock_get_channel(struct sock *sk)
89 {
90         return hci_pi(sk)->channel;
91 }
92
93 u32 hci_sock_get_cookie(struct sock *sk)
94 {
95         return hci_pi(sk)->cookie;
96 }
97
98 static bool hci_sock_gen_cookie(struct sock *sk)
99 {
100         int id = hci_pi(sk)->cookie;
101
102         if (!id) {
103                 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
104                 if (id < 0)
105                         id = 0xffffffff;
106
107                 hci_pi(sk)->cookie = id;
108                 get_task_comm(hci_pi(sk)->comm, current);
109                 return true;
110         }
111
112         return false;
113 }
114
115 static void hci_sock_free_cookie(struct sock *sk)
116 {
117         int id = hci_pi(sk)->cookie;
118
119         if (id) {
120                 hci_pi(sk)->cookie = 0xffffffff;
121                 ida_simple_remove(&sock_cookie_ida, id);
122         }
123 }
124
125 static inline int hci_test_bit(int nr, const void *addr)
126 {
127         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
128 }
129
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF  5
132
133 struct hci_sec_filter {
134         __u32 type_mask;
135         __u32 event_mask[2];
136         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
137 };
138
139 static const struct hci_sec_filter hci_sec_filter = {
140         /* Packet types */
141         0x10,
142         /* Events */
143         { 0x1000d9fe, 0x0000b00c },
144         /* Commands */
145         {
146                 { 0x0 },
147                 /* OGF_LINK_CTL */
148                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149                 /* OGF_LINK_POLICY */
150                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
151                 /* OGF_HOST_CTL */
152                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
153                 /* OGF_INFO_PARAM */
154                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155                 /* OGF_STATUS_PARAM */
156                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
157         }
158 };
159
160 static struct bt_sock_list hci_sk_list = {
161         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
162 };
163
164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
165 {
166         struct hci_filter *flt;
167         int flt_type, flt_event;
168
169         /* Apply filter */
170         flt = &hci_pi(sk)->filter;
171
172         flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
173
174         if (!test_bit(flt_type, &flt->type_mask))
175                 return true;
176
177         /* Extra filter for event packets only */
178         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
179                 return false;
180
181         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
182
183         if (!hci_test_bit(flt_event, &flt->event_mask))
184                 return true;
185
186         /* Check filter only when opcode is set */
187         if (!flt->opcode)
188                 return false;
189
190         if (flt_event == HCI_EV_CMD_COMPLETE &&
191             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
192                 return true;
193
194         if (flt_event == HCI_EV_CMD_STATUS &&
195             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
196                 return true;
197
198         return false;
199 }
200
201 /* Send frame to RAW socket */
202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
203 {
204         struct sock *sk;
205         struct sk_buff *skb_copy = NULL;
206
207         BT_DBG("hdev %p len %d", hdev, skb->len);
208
209         read_lock(&hci_sk_list.lock);
210
211         sk_for_each(sk, &hci_sk_list.head) {
212                 struct sk_buff *nskb;
213
214                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
215                         continue;
216
217                 /* Don't send frame to the socket it came from */
218                 if (skb->sk == sk)
219                         continue;
220
221                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222                         if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223                             hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
226                                 continue;
227                         if (is_filtered_packet(sk, skb))
228                                 continue;
229                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230                         if (!bt_cb(skb)->incoming)
231                                 continue;
232                         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
235                                 continue;
236                 } else {
237                         /* Don't send frame to other channel types */
238                         continue;
239                 }
240
241                 if (!skb_copy) {
242                         /* Create a private copy with headroom */
243                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
244                         if (!skb_copy)
245                                 continue;
246
247                         /* Put type byte before the data */
248                         memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
249                 }
250
251                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
252                 if (!nskb)
253                         continue;
254
255                 if (sock_queue_rcv_skb(sk, nskb))
256                         kfree_skb(nskb);
257         }
258
259         read_unlock(&hci_sk_list.lock);
260
261         kfree_skb(skb_copy);
262 }
263
264 /* Send frame to sockets with specific channel */
265 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
266                          int flag, struct sock *skip_sk)
267 {
268         struct sock *sk;
269
270         BT_DBG("channel %u len %d", channel, skb->len);
271
272         read_lock(&hci_sk_list.lock);
273
274         sk_for_each(sk, &hci_sk_list.head) {
275                 struct sk_buff *nskb;
276
277                 /* Ignore socket without the flag set */
278                 if (!hci_sock_test_flag(sk, flag))
279                         continue;
280
281                 /* Skip the original socket */
282                 if (sk == skip_sk)
283                         continue;
284
285                 if (sk->sk_state != BT_BOUND)
286                         continue;
287
288                 if (hci_pi(sk)->channel != channel)
289                         continue;
290
291                 nskb = skb_clone(skb, GFP_ATOMIC);
292                 if (!nskb)
293                         continue;
294
295                 if (sock_queue_rcv_skb(sk, nskb))
296                         kfree_skb(nskb);
297         }
298
299         read_unlock(&hci_sk_list.lock);
300 }
301
302 /* Send frame to monitor socket */
303 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
304 {
305         struct sk_buff *skb_copy = NULL;
306         struct hci_mon_hdr *hdr;
307         __le16 opcode;
308
309         if (!atomic_read(&monitor_promisc))
310                 return;
311
312         BT_DBG("hdev %p len %d", hdev, skb->len);
313
314         switch (hci_skb_pkt_type(skb)) {
315         case HCI_COMMAND_PKT:
316                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
317                 break;
318         case HCI_EVENT_PKT:
319                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
320                 break;
321         case HCI_ACLDATA_PKT:
322                 if (bt_cb(skb)->incoming)
323                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
324                 else
325                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
326                 break;
327         case HCI_SCODATA_PKT:
328                 if (bt_cb(skb)->incoming)
329                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
330                 else
331                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
332                 break;
333         case HCI_DIAG_PKT:
334                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
335                 break;
336         default:
337                 return;
338         }
339
340         /* Create a private copy with headroom */
341         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
342         if (!skb_copy)
343                 return;
344
345         /* Put header before the data */
346         hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
347         hdr->opcode = opcode;
348         hdr->index = cpu_to_le16(hdev->id);
349         hdr->len = cpu_to_le16(skb->len);
350
351         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
352                             HCI_SOCK_TRUSTED, NULL);
353         kfree_skb(skb_copy);
354 }
355
356 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
357                                  void *data, u16 data_len, ktime_t tstamp,
358                                  int flag, struct sock *skip_sk)
359 {
360         struct sock *sk;
361         __le16 index;
362
363         if (hdev)
364                 index = cpu_to_le16(hdev->id);
365         else
366                 index = cpu_to_le16(MGMT_INDEX_NONE);
367
368         read_lock(&hci_sk_list.lock);
369
370         sk_for_each(sk, &hci_sk_list.head) {
371                 struct hci_mon_hdr *hdr;
372                 struct sk_buff *skb;
373
374                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
375                         continue;
376
377                 /* Ignore socket without the flag set */
378                 if (!hci_sock_test_flag(sk, flag))
379                         continue;
380
381                 /* Skip the original socket */
382                 if (sk == skip_sk)
383                         continue;
384
385                 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
386                 if (!skb)
387                         continue;
388
389                 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
390                 put_unaligned_le16(event, skb_put(skb, 2));
391
392                 if (data)
393                         skb_put_data(skb, data, data_len);
394
395                 skb->tstamp = tstamp;
396
397                 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
398                 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
399                 hdr->index = index;
400                 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
401
402                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
403                                     HCI_SOCK_TRUSTED, NULL);
404                 kfree_skb(skb);
405         }
406
407         read_unlock(&hci_sk_list.lock);
408 }
409
410 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
411 {
412         struct hci_mon_hdr *hdr;
413         struct hci_mon_new_index *ni;
414         struct hci_mon_index_info *ii;
415         struct sk_buff *skb;
416         __le16 opcode;
417
418         switch (event) {
419         case HCI_DEV_REG:
420                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
421                 if (!skb)
422                         return NULL;
423
424                 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
425                 ni->type = hdev->dev_type;
426                 ni->bus = hdev->bus;
427                 bacpy(&ni->bdaddr, &hdev->bdaddr);
428                 memcpy(ni->name, hdev->name, 8);
429
430                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
431                 break;
432
433         case HCI_DEV_UNREG:
434                 skb = bt_skb_alloc(0, GFP_ATOMIC);
435                 if (!skb)
436                         return NULL;
437
438                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
439                 break;
440
441         case HCI_DEV_SETUP:
442                 if (hdev->manufacturer == 0xffff)
443                         return NULL;
444
445                 /* fall through */
446
447         case HCI_DEV_UP:
448                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
449                 if (!skb)
450                         return NULL;
451
452                 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
453                 bacpy(&ii->bdaddr, &hdev->bdaddr);
454                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
455
456                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
457                 break;
458
459         case HCI_DEV_OPEN:
460                 skb = bt_skb_alloc(0, GFP_ATOMIC);
461                 if (!skb)
462                         return NULL;
463
464                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
465                 break;
466
467         case HCI_DEV_CLOSE:
468                 skb = bt_skb_alloc(0, GFP_ATOMIC);
469                 if (!skb)
470                         return NULL;
471
472                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
473                 break;
474
475         default:
476                 return NULL;
477         }
478
479         __net_timestamp(skb);
480
481         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
482         hdr->opcode = opcode;
483         hdr->index = cpu_to_le16(hdev->id);
484         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
485
486         return skb;
487 }
488
489 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
490 {
491         struct hci_mon_hdr *hdr;
492         struct sk_buff *skb;
493         u16 format;
494         u8 ver[3];
495         u32 flags;
496
497         /* No message needed when cookie is not present */
498         if (!hci_pi(sk)->cookie)
499                 return NULL;
500
501         switch (hci_pi(sk)->channel) {
502         case HCI_CHANNEL_RAW:
503                 format = 0x0000;
504                 ver[0] = BT_SUBSYS_VERSION;
505                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
506                 break;
507         case HCI_CHANNEL_USER:
508                 format = 0x0001;
509                 ver[0] = BT_SUBSYS_VERSION;
510                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
511                 break;
512         case HCI_CHANNEL_CONTROL:
513                 format = 0x0002;
514                 mgmt_fill_version_info(ver);
515                 break;
516         default:
517                 /* No message for unsupported format */
518                 return NULL;
519         }
520
521         skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
522         if (!skb)
523                 return NULL;
524
525         flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
526
527         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
528         put_unaligned_le16(format, skb_put(skb, 2));
529         skb_put_data(skb, ver, sizeof(ver));
530         put_unaligned_le32(flags, skb_put(skb, 4));
531         skb_put_u8(skb, TASK_COMM_LEN);
532         skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
533
534         __net_timestamp(skb);
535
536         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
537         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
538         if (hci_pi(sk)->hdev)
539                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
540         else
541                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
542         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
543
544         return skb;
545 }
546
547 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
548 {
549         struct hci_mon_hdr *hdr;
550         struct sk_buff *skb;
551
552         /* No message needed when cookie is not present */
553         if (!hci_pi(sk)->cookie)
554                 return NULL;
555
556         switch (hci_pi(sk)->channel) {
557         case HCI_CHANNEL_RAW:
558         case HCI_CHANNEL_USER:
559         case HCI_CHANNEL_CONTROL:
560                 break;
561         default:
562                 /* No message for unsupported format */
563                 return NULL;
564         }
565
566         skb = bt_skb_alloc(4, GFP_ATOMIC);
567         if (!skb)
568                 return NULL;
569
570         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
571
572         __net_timestamp(skb);
573
574         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
575         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
576         if (hci_pi(sk)->hdev)
577                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
578         else
579                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
580         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
581
582         return skb;
583 }
584
585 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
586                                                    u16 opcode, u16 len,
587                                                    const void *buf)
588 {
589         struct hci_mon_hdr *hdr;
590         struct sk_buff *skb;
591
592         skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
593         if (!skb)
594                 return NULL;
595
596         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
597         put_unaligned_le16(opcode, skb_put(skb, 2));
598
599         if (buf)
600                 skb_put_data(skb, buf, len);
601
602         __net_timestamp(skb);
603
604         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
605         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
606         hdr->index = cpu_to_le16(index);
607         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
608
609         return skb;
610 }
611
612 static void __printf(2, 3)
613 send_monitor_note(struct sock *sk, const char *fmt, ...)
614 {
615         size_t len;
616         struct hci_mon_hdr *hdr;
617         struct sk_buff *skb;
618         va_list args;
619
620         va_start(args, fmt);
621         len = vsnprintf(NULL, 0, fmt, args);
622         va_end(args);
623
624         skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
625         if (!skb)
626                 return;
627
628         va_start(args, fmt);
629         vsprintf(skb_put(skb, len), fmt, args);
630         *(u8 *)skb_put(skb, 1) = 0;
631         va_end(args);
632
633         __net_timestamp(skb);
634
635         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
636         hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
637         hdr->index = cpu_to_le16(HCI_DEV_NONE);
638         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
639
640         if (sock_queue_rcv_skb(sk, skb))
641                 kfree_skb(skb);
642 }
643
644 static void send_monitor_replay(struct sock *sk)
645 {
646         struct hci_dev *hdev;
647
648         read_lock(&hci_dev_list_lock);
649
650         list_for_each_entry(hdev, &hci_dev_list, list) {
651                 struct sk_buff *skb;
652
653                 skb = create_monitor_event(hdev, HCI_DEV_REG);
654                 if (!skb)
655                         continue;
656
657                 if (sock_queue_rcv_skb(sk, skb))
658                         kfree_skb(skb);
659
660                 if (!test_bit(HCI_RUNNING, &hdev->flags))
661                         continue;
662
663                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
664                 if (!skb)
665                         continue;
666
667                 if (sock_queue_rcv_skb(sk, skb))
668                         kfree_skb(skb);
669
670                 if (test_bit(HCI_UP, &hdev->flags))
671                         skb = create_monitor_event(hdev, HCI_DEV_UP);
672                 else if (hci_dev_test_flag(hdev, HCI_SETUP))
673                         skb = create_monitor_event(hdev, HCI_DEV_SETUP);
674                 else
675                         skb = NULL;
676
677                 if (skb) {
678                         if (sock_queue_rcv_skb(sk, skb))
679                                 kfree_skb(skb);
680                 }
681         }
682
683         read_unlock(&hci_dev_list_lock);
684 }
685
686 static void send_monitor_control_replay(struct sock *mon_sk)
687 {
688         struct sock *sk;
689
690         read_lock(&hci_sk_list.lock);
691
692         sk_for_each(sk, &hci_sk_list.head) {
693                 struct sk_buff *skb;
694
695                 skb = create_monitor_ctrl_open(sk);
696                 if (!skb)
697                         continue;
698
699                 if (sock_queue_rcv_skb(mon_sk, skb))
700                         kfree_skb(skb);
701         }
702
703         read_unlock(&hci_sk_list.lock);
704 }
705
706 /* Generate internal stack event */
707 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
708 {
709         struct hci_event_hdr *hdr;
710         struct hci_ev_stack_internal *ev;
711         struct sk_buff *skb;
712
713         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
714         if (!skb)
715                 return;
716
717         hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
718         hdr->evt  = HCI_EV_STACK_INTERNAL;
719         hdr->plen = sizeof(*ev) + dlen;
720
721         ev = skb_put(skb, sizeof(*ev) + dlen);
722         ev->type = type;
723         memcpy(ev->data, data, dlen);
724
725         bt_cb(skb)->incoming = 1;
726         __net_timestamp(skb);
727
728         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
729         hci_send_to_sock(hdev, skb);
730         kfree_skb(skb);
731 }
732
733 void hci_sock_dev_event(struct hci_dev *hdev, int event)
734 {
735         BT_DBG("hdev %s event %d", hdev->name, event);
736
737         if (atomic_read(&monitor_promisc)) {
738                 struct sk_buff *skb;
739
740                 /* Send event to monitor */
741                 skb = create_monitor_event(hdev, event);
742                 if (skb) {
743                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
744                                             HCI_SOCK_TRUSTED, NULL);
745                         kfree_skb(skb);
746                 }
747         }
748
749         if (event <= HCI_DEV_DOWN) {
750                 struct hci_ev_si_device ev;
751
752                 /* Send event to sockets */
753                 ev.event  = event;
754                 ev.dev_id = hdev->id;
755                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
756         }
757
758         if (event == HCI_DEV_UNREG) {
759                 struct sock *sk;
760
761                 /* Wake up sockets using this dead device */
762                 read_lock(&hci_sk_list.lock);
763                 sk_for_each(sk, &hci_sk_list.head) {
764                         if (hci_pi(sk)->hdev == hdev) {
765                                 sk->sk_err = EPIPE;
766                                 sk->sk_state_change(sk);
767                         }
768                 }
769                 read_unlock(&hci_sk_list.lock);
770         }
771 }
772
773 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
774 {
775         struct hci_mgmt_chan *c;
776
777         list_for_each_entry(c, &mgmt_chan_list, list) {
778                 if (c->channel == channel)
779                         return c;
780         }
781
782         return NULL;
783 }
784
785 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
786 {
787         struct hci_mgmt_chan *c;
788
789         mutex_lock(&mgmt_chan_list_lock);
790         c = __hci_mgmt_chan_find(channel);
791         mutex_unlock(&mgmt_chan_list_lock);
792
793         return c;
794 }
795
796 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
797 {
798         if (c->channel < HCI_CHANNEL_CONTROL)
799                 return -EINVAL;
800
801         mutex_lock(&mgmt_chan_list_lock);
802         if (__hci_mgmt_chan_find(c->channel)) {
803                 mutex_unlock(&mgmt_chan_list_lock);
804                 return -EALREADY;
805         }
806
807         list_add_tail(&c->list, &mgmt_chan_list);
808
809         mutex_unlock(&mgmt_chan_list_lock);
810
811         return 0;
812 }
813 EXPORT_SYMBOL(hci_mgmt_chan_register);
814
815 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
816 {
817         mutex_lock(&mgmt_chan_list_lock);
818         list_del(&c->list);
819         mutex_unlock(&mgmt_chan_list_lock);
820 }
821 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
822
823 static int hci_sock_release(struct socket *sock)
824 {
825         struct sock *sk = sock->sk;
826         struct hci_dev *hdev;
827         struct sk_buff *skb;
828
829         BT_DBG("sock %p sk %p", sock, sk);
830
831         if (!sk)
832                 return 0;
833
834         lock_sock(sk);
835
836         switch (hci_pi(sk)->channel) {
837         case HCI_CHANNEL_MONITOR:
838                 atomic_dec(&monitor_promisc);
839                 break;
840         case HCI_CHANNEL_RAW:
841         case HCI_CHANNEL_USER:
842         case HCI_CHANNEL_CONTROL:
843                 /* Send event to monitor */
844                 skb = create_monitor_ctrl_close(sk);
845                 if (skb) {
846                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
847                                             HCI_SOCK_TRUSTED, NULL);
848                         kfree_skb(skb);
849                 }
850
851                 hci_sock_free_cookie(sk);
852                 break;
853         }
854
855         bt_sock_unlink(&hci_sk_list, sk);
856
857         hdev = hci_pi(sk)->hdev;
858         if (hdev) {
859                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
860                         /* When releasing a user channel exclusive access,
861                          * call hci_dev_do_close directly instead of calling
862                          * hci_dev_close to ensure the exclusive access will
863                          * be released and the controller brought back down.
864                          *
865                          * The checking of HCI_AUTO_OFF is not needed in this
866                          * case since it will have been cleared already when
867                          * opening the user channel.
868                          */
869                         hci_dev_do_close(hdev);
870                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
871                         mgmt_index_added(hdev);
872                 }
873
874                 atomic_dec(&hdev->promisc);
875                 hci_dev_put(hdev);
876         }
877
878         sock_orphan(sk);
879         release_sock(sk);
880         sock_put(sk);
881         return 0;
882 }
883
884 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
885 {
886         bdaddr_t bdaddr;
887         int err;
888
889         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
890                 return -EFAULT;
891
892         hci_dev_lock(hdev);
893
894         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
895
896         hci_dev_unlock(hdev);
897
898         return err;
899 }
900
901 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
902 {
903         bdaddr_t bdaddr;
904         int err;
905
906         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
907                 return -EFAULT;
908
909         hci_dev_lock(hdev);
910
911         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
912
913         hci_dev_unlock(hdev);
914
915         return err;
916 }
917
918 /* Ioctls that require bound socket */
919 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
920                                 unsigned long arg)
921 {
922         struct hci_dev *hdev = hci_hdev_from_sock(sk);
923
924         if (IS_ERR(hdev))
925                 return PTR_ERR(hdev);
926
927         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
928                 return -EBUSY;
929
930         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
931                 return -EOPNOTSUPP;
932
933         if (hdev->dev_type != HCI_PRIMARY)
934                 return -EOPNOTSUPP;
935
936         switch (cmd) {
937         case HCISETRAW:
938                 if (!capable(CAP_NET_ADMIN))
939                         return -EPERM;
940                 return -EOPNOTSUPP;
941
942         case HCIGETCONNINFO:
943                 return hci_get_conn_info(hdev, (void __user *)arg);
944
945         case HCIGETAUTHINFO:
946                 return hci_get_auth_info(hdev, (void __user *)arg);
947
948         case HCIBLOCKADDR:
949                 if (!capable(CAP_NET_ADMIN))
950                         return -EPERM;
951                 return hci_sock_blacklist_add(hdev, (void __user *)arg);
952
953         case HCIUNBLOCKADDR:
954                 if (!capable(CAP_NET_ADMIN))
955                         return -EPERM;
956                 return hci_sock_blacklist_del(hdev, (void __user *)arg);
957         }
958
959         return -ENOIOCTLCMD;
960 }
961
962 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
963                           unsigned long arg)
964 {
965         void __user *argp = (void __user *)arg;
966         struct sock *sk = sock->sk;
967         int err;
968
969         BT_DBG("cmd %x arg %lx", cmd, arg);
970
971         lock_sock(sk);
972
973         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
974                 err = -EBADFD;
975                 goto done;
976         }
977
978         /* When calling an ioctl on an unbound raw socket, then ensure
979          * that the monitor gets informed. Ensure that the resulting event
980          * is only send once by checking if the cookie exists or not. The
981          * socket cookie will be only ever generated once for the lifetime
982          * of a given socket.
983          */
984         if (hci_sock_gen_cookie(sk)) {
985                 struct sk_buff *skb;
986
987                 if (capable(CAP_NET_ADMIN))
988                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
989
990                 /* Send event to monitor */
991                 skb = create_monitor_ctrl_open(sk);
992                 if (skb) {
993                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
994                                             HCI_SOCK_TRUSTED, NULL);
995                         kfree_skb(skb);
996                 }
997         }
998
999         release_sock(sk);
1000
1001         switch (cmd) {
1002         case HCIGETDEVLIST:
1003                 return hci_get_dev_list(argp);
1004
1005         case HCIGETDEVINFO:
1006                 return hci_get_dev_info(argp);
1007
1008         case HCIGETCONNLIST:
1009                 return hci_get_conn_list(argp);
1010
1011         case HCIDEVUP:
1012                 if (!capable(CAP_NET_ADMIN))
1013                         return -EPERM;
1014                 return hci_dev_open(arg);
1015
1016         case HCIDEVDOWN:
1017                 if (!capable(CAP_NET_ADMIN))
1018                         return -EPERM;
1019                 return hci_dev_close(arg);
1020
1021         case HCIDEVRESET:
1022                 if (!capable(CAP_NET_ADMIN))
1023                         return -EPERM;
1024                 return hci_dev_reset(arg);
1025
1026         case HCIDEVRESTAT:
1027                 if (!capable(CAP_NET_ADMIN))
1028                         return -EPERM;
1029                 return hci_dev_reset_stat(arg);
1030
1031         case HCISETSCAN:
1032         case HCISETAUTH:
1033         case HCISETENCRYPT:
1034         case HCISETPTYPE:
1035         case HCISETLINKPOL:
1036         case HCISETLINKMODE:
1037         case HCISETACLMTU:
1038         case HCISETSCOMTU:
1039                 if (!capable(CAP_NET_ADMIN))
1040                         return -EPERM;
1041                 return hci_dev_cmd(cmd, argp);
1042
1043         case HCIINQUIRY:
1044                 return hci_inquiry(argp);
1045         }
1046
1047         lock_sock(sk);
1048
1049         err = hci_sock_bound_ioctl(sk, cmd, arg);
1050
1051 done:
1052         release_sock(sk);
1053         return err;
1054 }
1055
1056 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1057                          int addr_len)
1058 {
1059         struct sockaddr_hci haddr;
1060         struct sock *sk = sock->sk;
1061         struct hci_dev *hdev = NULL;
1062         struct sk_buff *skb;
1063         int len, err = 0;
1064
1065         BT_DBG("sock %p sk %p", sock, sk);
1066
1067         if (!addr)
1068                 return -EINVAL;
1069
1070         memset(&haddr, 0, sizeof(haddr));
1071         len = min_t(unsigned int, sizeof(haddr), addr_len);
1072         memcpy(&haddr, addr, len);
1073
1074         if (haddr.hci_family != AF_BLUETOOTH)
1075                 return -EINVAL;
1076
1077         lock_sock(sk);
1078
1079         /* Allow detaching from dead device and attaching to alive device, if
1080          * the caller wants to re-bind (instead of close) this socket in
1081          * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1082          */
1083         hdev = hci_pi(sk)->hdev;
1084         if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1085                 hci_pi(sk)->hdev = NULL;
1086                 sk->sk_state = BT_OPEN;
1087                 hci_dev_put(hdev);
1088         }
1089         hdev = NULL;
1090
1091         if (sk->sk_state == BT_BOUND) {
1092                 err = -EALREADY;
1093                 goto done;
1094         }
1095
1096         switch (haddr.hci_channel) {
1097         case HCI_CHANNEL_RAW:
1098                 if (hci_pi(sk)->hdev) {
1099                         err = -EALREADY;
1100                         goto done;
1101                 }
1102
1103                 if (haddr.hci_dev != HCI_DEV_NONE) {
1104                         hdev = hci_dev_get(haddr.hci_dev);
1105                         if (!hdev) {
1106                                 err = -ENODEV;
1107                                 goto done;
1108                         }
1109
1110                         atomic_inc(&hdev->promisc);
1111                 }
1112
1113                 hci_pi(sk)->channel = haddr.hci_channel;
1114
1115                 if (!hci_sock_gen_cookie(sk)) {
1116                         /* In the case when a cookie has already been assigned,
1117                          * then there has been already an ioctl issued against
1118                          * an unbound socket and with that triggerd an open
1119                          * notification. Send a close notification first to
1120                          * allow the state transition to bounded.
1121                          */
1122                         skb = create_monitor_ctrl_close(sk);
1123                         if (skb) {
1124                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1125                                                     HCI_SOCK_TRUSTED, NULL);
1126                                 kfree_skb(skb);
1127                         }
1128                 }
1129
1130                 if (capable(CAP_NET_ADMIN))
1131                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1132
1133                 hci_pi(sk)->hdev = hdev;
1134
1135                 /* Send event to monitor */
1136                 skb = create_monitor_ctrl_open(sk);
1137                 if (skb) {
1138                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1139                                             HCI_SOCK_TRUSTED, NULL);
1140                         kfree_skb(skb);
1141                 }
1142                 break;
1143
1144         case HCI_CHANNEL_USER:
1145                 if (hci_pi(sk)->hdev) {
1146                         err = -EALREADY;
1147                         goto done;
1148                 }
1149
1150                 if (haddr.hci_dev == HCI_DEV_NONE) {
1151                         err = -EINVAL;
1152                         goto done;
1153                 }
1154
1155                 if (!capable(CAP_NET_ADMIN)) {
1156                         err = -EPERM;
1157                         goto done;
1158                 }
1159
1160                 hdev = hci_dev_get(haddr.hci_dev);
1161                 if (!hdev) {
1162                         err = -ENODEV;
1163                         goto done;
1164                 }
1165
1166                 if (test_bit(HCI_INIT, &hdev->flags) ||
1167                     hci_dev_test_flag(hdev, HCI_SETUP) ||
1168                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1169                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1170                      test_bit(HCI_UP, &hdev->flags))) {
1171                         err = -EBUSY;
1172                         hci_dev_put(hdev);
1173                         goto done;
1174                 }
1175
1176                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1177                         err = -EUSERS;
1178                         hci_dev_put(hdev);
1179                         goto done;
1180                 }
1181
1182                 mgmt_index_removed(hdev);
1183
1184                 err = hci_dev_open(hdev->id);
1185                 if (err) {
1186                         if (err == -EALREADY) {
1187                                 /* In case the transport is already up and
1188                                  * running, clear the error here.
1189                                  *
1190                                  * This can happen when opening a user
1191                                  * channel and HCI_AUTO_OFF grace period
1192                                  * is still active.
1193                                  */
1194                                 err = 0;
1195                         } else {
1196                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1197                                 mgmt_index_added(hdev);
1198                                 hci_dev_put(hdev);
1199                                 goto done;
1200                         }
1201                 }
1202
1203                 hci_pi(sk)->channel = haddr.hci_channel;
1204
1205                 if (!hci_sock_gen_cookie(sk)) {
1206                         /* In the case when a cookie has already been assigned,
1207                          * this socket will transition from a raw socket into
1208                          * a user channel socket. For a clean transition, send
1209                          * the close notification first.
1210                          */
1211                         skb = create_monitor_ctrl_close(sk);
1212                         if (skb) {
1213                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1214                                                     HCI_SOCK_TRUSTED, NULL);
1215                                 kfree_skb(skb);
1216                         }
1217                 }
1218
1219                 /* The user channel is restricted to CAP_NET_ADMIN
1220                  * capabilities and with that implicitly trusted.
1221                  */
1222                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1223
1224                 hci_pi(sk)->hdev = hdev;
1225
1226                 /* Send event to monitor */
1227                 skb = create_monitor_ctrl_open(sk);
1228                 if (skb) {
1229                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1230                                             HCI_SOCK_TRUSTED, NULL);
1231                         kfree_skb(skb);
1232                 }
1233
1234                 atomic_inc(&hdev->promisc);
1235                 break;
1236
1237         case HCI_CHANNEL_MONITOR:
1238                 if (haddr.hci_dev != HCI_DEV_NONE) {
1239                         err = -EINVAL;
1240                         goto done;
1241                 }
1242
1243                 if (!capable(CAP_NET_RAW)) {
1244                         err = -EPERM;
1245                         goto done;
1246                 }
1247
1248                 hci_pi(sk)->channel = haddr.hci_channel;
1249
1250                 /* The monitor interface is restricted to CAP_NET_RAW
1251                  * capabilities and with that implicitly trusted.
1252                  */
1253                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1254
1255                 send_monitor_note(sk, "Linux version %s (%s)",
1256                                   init_utsname()->release,
1257                                   init_utsname()->machine);
1258                 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1259                                   BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1260                 send_monitor_replay(sk);
1261                 send_monitor_control_replay(sk);
1262
1263                 atomic_inc(&monitor_promisc);
1264                 break;
1265
1266         case HCI_CHANNEL_LOGGING:
1267                 if (haddr.hci_dev != HCI_DEV_NONE) {
1268                         err = -EINVAL;
1269                         goto done;
1270                 }
1271
1272                 if (!capable(CAP_NET_ADMIN)) {
1273                         err = -EPERM;
1274                         goto done;
1275                 }
1276
1277                 hci_pi(sk)->channel = haddr.hci_channel;
1278                 break;
1279
1280         default:
1281                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1282                         err = -EINVAL;
1283                         goto done;
1284                 }
1285
1286                 if (haddr.hci_dev != HCI_DEV_NONE) {
1287                         err = -EINVAL;
1288                         goto done;
1289                 }
1290
1291                 /* Users with CAP_NET_ADMIN capabilities are allowed
1292                  * access to all management commands and events. For
1293                  * untrusted users the interface is restricted and
1294                  * also only untrusted events are sent.
1295                  */
1296                 if (capable(CAP_NET_ADMIN))
1297                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1298
1299                 hci_pi(sk)->channel = haddr.hci_channel;
1300
1301                 /* At the moment the index and unconfigured index events
1302                  * are enabled unconditionally. Setting them on each
1303                  * socket when binding keeps this functionality. They
1304                  * however might be cleared later and then sending of these
1305                  * events will be disabled, but that is then intentional.
1306                  *
1307                  * This also enables generic events that are safe to be
1308                  * received by untrusted users. Example for such events
1309                  * are changes to settings, class of device, name etc.
1310                  */
1311                 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1312                         if (!hci_sock_gen_cookie(sk)) {
1313                                 /* In the case when a cookie has already been
1314                                  * assigned, this socket will transtion from
1315                                  * a raw socket into a control socket. To
1316                                  * allow for a clean transtion, send the
1317                                  * close notification first.
1318                                  */
1319                                 skb = create_monitor_ctrl_close(sk);
1320                                 if (skb) {
1321                                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1322                                                             HCI_SOCK_TRUSTED, NULL);
1323                                         kfree_skb(skb);
1324                                 }
1325                         }
1326
1327                         /* Send event to monitor */
1328                         skb = create_monitor_ctrl_open(sk);
1329                         if (skb) {
1330                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1331                                                     HCI_SOCK_TRUSTED, NULL);
1332                                 kfree_skb(skb);
1333                         }
1334
1335                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1336                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1337                         hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1338                         hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1339                         hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1340                         hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1341                 }
1342                 break;
1343         }
1344
1345         sk->sk_state = BT_BOUND;
1346
1347 done:
1348         release_sock(sk);
1349         return err;
1350 }
1351
1352 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1353                             int *addr_len, int peer)
1354 {
1355         struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1356         struct sock *sk = sock->sk;
1357         struct hci_dev *hdev;
1358         int err = 0;
1359
1360         BT_DBG("sock %p sk %p", sock, sk);
1361
1362         if (peer)
1363                 return -EOPNOTSUPP;
1364
1365         lock_sock(sk);
1366
1367         hdev = hci_hdev_from_sock(sk);
1368         if (IS_ERR(hdev)) {
1369                 err = PTR_ERR(hdev);
1370                 goto done;
1371         }
1372
1373         *addr_len = sizeof(*haddr);
1374         haddr->hci_family = AF_BLUETOOTH;
1375         haddr->hci_dev    = hdev->id;
1376         haddr->hci_channel= hci_pi(sk)->channel;
1377
1378 done:
1379         release_sock(sk);
1380         return err;
1381 }
1382
1383 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1384                           struct sk_buff *skb)
1385 {
1386         __u32 mask = hci_pi(sk)->cmsg_mask;
1387
1388         if (mask & HCI_CMSG_DIR) {
1389                 int incoming = bt_cb(skb)->incoming;
1390                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1391                          &incoming);
1392         }
1393
1394         if (mask & HCI_CMSG_TSTAMP) {
1395 #ifdef CONFIG_COMPAT
1396                 struct compat_timeval ctv;
1397 #endif
1398                 struct timeval tv;
1399                 void *data;
1400                 int len;
1401
1402                 skb_get_timestamp(skb, &tv);
1403
1404                 data = &tv;
1405                 len = sizeof(tv);
1406 #ifdef CONFIG_COMPAT
1407                 if (!COMPAT_USE_64BIT_TIME &&
1408                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
1409                         ctv.tv_sec = tv.tv_sec;
1410                         ctv.tv_usec = tv.tv_usec;
1411                         data = &ctv;
1412                         len = sizeof(ctv);
1413                 }
1414 #endif
1415
1416                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1417         }
1418 }
1419
1420 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1421                             size_t len, int flags)
1422 {
1423         int noblock = flags & MSG_DONTWAIT;
1424         struct sock *sk = sock->sk;
1425         struct sk_buff *skb;
1426         int copied, err;
1427         unsigned int skblen;
1428
1429         BT_DBG("sock %p, sk %p", sock, sk);
1430
1431         if (flags & MSG_OOB)
1432                 return -EOPNOTSUPP;
1433
1434         if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1435                 return -EOPNOTSUPP;
1436
1437         if (sk->sk_state == BT_CLOSED)
1438                 return 0;
1439
1440         skb = skb_recv_datagram(sk, flags, noblock, &err);
1441         if (!skb)
1442                 return err;
1443
1444         skblen = skb->len;
1445         copied = skb->len;
1446         if (len < copied) {
1447                 msg->msg_flags |= MSG_TRUNC;
1448                 copied = len;
1449         }
1450
1451         skb_reset_transport_header(skb);
1452         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1453
1454         switch (hci_pi(sk)->channel) {
1455         case HCI_CHANNEL_RAW:
1456                 hci_sock_cmsg(sk, msg, skb);
1457                 break;
1458         case HCI_CHANNEL_USER:
1459         case HCI_CHANNEL_MONITOR:
1460                 sock_recv_timestamp(msg, sk, skb);
1461                 break;
1462         default:
1463                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1464                         sock_recv_timestamp(msg, sk, skb);
1465                 break;
1466         }
1467
1468         skb_free_datagram(sk, skb);
1469
1470         if (flags & MSG_TRUNC)
1471                 copied = skblen;
1472
1473         return err ? : copied;
1474 }
1475
1476 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1477                         struct msghdr *msg, size_t msglen)
1478 {
1479         void *buf;
1480         u8 *cp;
1481         struct mgmt_hdr *hdr;
1482         u16 opcode, index, len;
1483         struct hci_dev *hdev = NULL;
1484         const struct hci_mgmt_handler *handler;
1485         bool var_len, no_hdev;
1486         int err;
1487
1488         BT_DBG("got %zu bytes", msglen);
1489
1490         if (msglen < sizeof(*hdr))
1491                 return -EINVAL;
1492
1493         buf = kmalloc(msglen, GFP_KERNEL);
1494         if (!buf)
1495                 return -ENOMEM;
1496
1497         if (memcpy_from_msg(buf, msg, msglen)) {
1498                 err = -EFAULT;
1499                 goto done;
1500         }
1501
1502         hdr = buf;
1503         opcode = __le16_to_cpu(hdr->opcode);
1504         index = __le16_to_cpu(hdr->index);
1505         len = __le16_to_cpu(hdr->len);
1506
1507         if (len != msglen - sizeof(*hdr)) {
1508                 err = -EINVAL;
1509                 goto done;
1510         }
1511
1512         if (chan->channel == HCI_CHANNEL_CONTROL) {
1513                 struct sk_buff *skb;
1514
1515                 /* Send event to monitor */
1516                 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1517                                                   buf + sizeof(*hdr));
1518                 if (skb) {
1519                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1520                                             HCI_SOCK_TRUSTED, NULL);
1521                         kfree_skb(skb);
1522                 }
1523         }
1524
1525         if (opcode >= chan->handler_count ||
1526             chan->handlers[opcode].func == NULL) {
1527                 BT_DBG("Unknown op %u", opcode);
1528                 err = mgmt_cmd_status(sk, index, opcode,
1529                                       MGMT_STATUS_UNKNOWN_COMMAND);
1530                 goto done;
1531         }
1532
1533         handler = &chan->handlers[opcode];
1534
1535         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1536             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1537                 err = mgmt_cmd_status(sk, index, opcode,
1538                                       MGMT_STATUS_PERMISSION_DENIED);
1539                 goto done;
1540         }
1541
1542         if (index != MGMT_INDEX_NONE) {
1543                 hdev = hci_dev_get(index);
1544                 if (!hdev) {
1545                         err = mgmt_cmd_status(sk, index, opcode,
1546                                               MGMT_STATUS_INVALID_INDEX);
1547                         goto done;
1548                 }
1549
1550                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1551                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1552                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1553                         err = mgmt_cmd_status(sk, index, opcode,
1554                                               MGMT_STATUS_INVALID_INDEX);
1555                         goto done;
1556                 }
1557
1558                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1559                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1560                         err = mgmt_cmd_status(sk, index, opcode,
1561                                               MGMT_STATUS_INVALID_INDEX);
1562                         goto done;
1563                 }
1564         }
1565
1566         no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1567         if (no_hdev != !hdev) {
1568                 err = mgmt_cmd_status(sk, index, opcode,
1569                                       MGMT_STATUS_INVALID_INDEX);
1570                 goto done;
1571         }
1572
1573         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1574         if ((var_len && len < handler->data_len) ||
1575             (!var_len && len != handler->data_len)) {
1576                 err = mgmt_cmd_status(sk, index, opcode,
1577                                       MGMT_STATUS_INVALID_PARAMS);
1578                 goto done;
1579         }
1580
1581         if (hdev && chan->hdev_init)
1582                 chan->hdev_init(sk, hdev);
1583
1584         cp = buf + sizeof(*hdr);
1585
1586         err = handler->func(sk, hdev, cp, len);
1587         if (err < 0)
1588                 goto done;
1589
1590         err = msglen;
1591
1592 done:
1593         if (hdev)
1594                 hci_dev_put(hdev);
1595
1596         kfree(buf);
1597         return err;
1598 }
1599
1600 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1601 {
1602         struct hci_mon_hdr *hdr;
1603         struct sk_buff *skb;
1604         struct hci_dev *hdev;
1605         u16 index;
1606         int err;
1607
1608         /* The logging frame consists at minimum of the standard header,
1609          * the priority byte, the ident length byte and at least one string
1610          * terminator NUL byte. Anything shorter are invalid packets.
1611          */
1612         if (len < sizeof(*hdr) + 3)
1613                 return -EINVAL;
1614
1615         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1616         if (!skb)
1617                 return err;
1618
1619         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1620                 err = -EFAULT;
1621                 goto drop;
1622         }
1623
1624         hdr = (void *)skb->data;
1625
1626         if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1627                 err = -EINVAL;
1628                 goto drop;
1629         }
1630
1631         if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1632                 __u8 priority = skb->data[sizeof(*hdr)];
1633                 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1634
1635                 /* Only the priorities 0-7 are valid and with that any other
1636                  * value results in an invalid packet.
1637                  *
1638                  * The priority byte is followed by an ident length byte and
1639                  * the NUL terminated ident string. Check that the ident
1640                  * length is not overflowing the packet and also that the
1641                  * ident string itself is NUL terminated. In case the ident
1642                  * length is zero, the length value actually doubles as NUL
1643                  * terminator identifier.
1644                  *
1645                  * The message follows the ident string (if present) and
1646                  * must be NUL terminated. Otherwise it is not a valid packet.
1647                  */
1648                 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1649                     ident_len > len - sizeof(*hdr) - 3 ||
1650                     skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1651                         err = -EINVAL;
1652                         goto drop;
1653                 }
1654         } else {
1655                 err = -EINVAL;
1656                 goto drop;
1657         }
1658
1659         index = __le16_to_cpu(hdr->index);
1660
1661         if (index != MGMT_INDEX_NONE) {
1662                 hdev = hci_dev_get(index);
1663                 if (!hdev) {
1664                         err = -ENODEV;
1665                         goto drop;
1666                 }
1667         } else {
1668                 hdev = NULL;
1669         }
1670
1671         hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1672
1673         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1674         err = len;
1675
1676         if (hdev)
1677                 hci_dev_put(hdev);
1678
1679 drop:
1680         kfree_skb(skb);
1681         return err;
1682 }
1683
1684 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1685                             size_t len)
1686 {
1687         struct sock *sk = sock->sk;
1688         struct hci_mgmt_chan *chan;
1689         struct hci_dev *hdev;
1690         struct sk_buff *skb;
1691         int err;
1692
1693         BT_DBG("sock %p sk %p", sock, sk);
1694
1695         if (msg->msg_flags & MSG_OOB)
1696                 return -EOPNOTSUPP;
1697
1698         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1699                                MSG_CMSG_COMPAT))
1700                 return -EINVAL;
1701
1702         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1703                 return -EINVAL;
1704
1705         lock_sock(sk);
1706
1707         switch (hci_pi(sk)->channel) {
1708         case HCI_CHANNEL_RAW:
1709         case HCI_CHANNEL_USER:
1710                 break;
1711         case HCI_CHANNEL_MONITOR:
1712                 err = -EOPNOTSUPP;
1713                 goto done;
1714         case HCI_CHANNEL_LOGGING:
1715                 err = hci_logging_frame(sk, msg, len);
1716                 goto done;
1717         default:
1718                 mutex_lock(&mgmt_chan_list_lock);
1719                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1720                 if (chan)
1721                         err = hci_mgmt_cmd(chan, sk, msg, len);
1722                 else
1723                         err = -EINVAL;
1724
1725                 mutex_unlock(&mgmt_chan_list_lock);
1726                 goto done;
1727         }
1728
1729         hdev = hci_hdev_from_sock(sk);
1730         if (IS_ERR(hdev)) {
1731                 err = PTR_ERR(hdev);
1732                 goto done;
1733         }
1734
1735         if (!test_bit(HCI_UP, &hdev->flags)) {
1736                 err = -ENETDOWN;
1737                 goto done;
1738         }
1739
1740         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1741         if (!skb)
1742                 goto done;
1743
1744         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1745                 err = -EFAULT;
1746                 goto drop;
1747         }
1748
1749         hci_skb_pkt_type(skb) = skb->data[0];
1750         skb_pull(skb, 1);
1751
1752         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1753                 /* No permission check is needed for user channel
1754                  * since that gets enforced when binding the socket.
1755                  *
1756                  * However check that the packet type is valid.
1757                  */
1758                 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1759                     hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1760                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1761                         err = -EINVAL;
1762                         goto drop;
1763                 }
1764
1765                 skb_queue_tail(&hdev->raw_q, skb);
1766                 queue_work(hdev->workqueue, &hdev->tx_work);
1767         } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1768                 u16 opcode = get_unaligned_le16(skb->data);
1769                 u16 ogf = hci_opcode_ogf(opcode);
1770                 u16 ocf = hci_opcode_ocf(opcode);
1771
1772                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1773                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1774                                    &hci_sec_filter.ocf_mask[ogf])) &&
1775                     !capable(CAP_NET_RAW)) {
1776                         err = -EPERM;
1777                         goto drop;
1778                 }
1779
1780                 /* Since the opcode has already been extracted here, store
1781                  * a copy of the value for later use by the drivers.
1782                  */
1783                 hci_skb_opcode(skb) = opcode;
1784
1785                 if (ogf == 0x3f) {
1786                         skb_queue_tail(&hdev->raw_q, skb);
1787                         queue_work(hdev->workqueue, &hdev->tx_work);
1788                 } else {
1789                         /* Stand-alone HCI commands must be flagged as
1790                          * single-command requests.
1791                          */
1792                         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1793
1794                         skb_queue_tail(&hdev->cmd_q, skb);
1795                         queue_work(hdev->workqueue, &hdev->cmd_work);
1796                 }
1797         } else {
1798                 if (!capable(CAP_NET_RAW)) {
1799                         err = -EPERM;
1800                         goto drop;
1801                 }
1802
1803                 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1804                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1805                         err = -EINVAL;
1806                         goto drop;
1807                 }
1808
1809                 skb_queue_tail(&hdev->raw_q, skb);
1810                 queue_work(hdev->workqueue, &hdev->tx_work);
1811         }
1812
1813         err = len;
1814
1815 done:
1816         release_sock(sk);
1817         return err;
1818
1819 drop:
1820         kfree_skb(skb);
1821         goto done;
1822 }
1823
1824 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1825                                char __user *optval, unsigned int len)
1826 {
1827         struct hci_ufilter uf = { .opcode = 0 };
1828         struct sock *sk = sock->sk;
1829         int err = 0, opt = 0;
1830
1831         BT_DBG("sk %p, opt %d", sk, optname);
1832
1833         if (level != SOL_HCI)
1834                 return -ENOPROTOOPT;
1835
1836         lock_sock(sk);
1837
1838         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1839                 err = -EBADFD;
1840                 goto done;
1841         }
1842
1843         switch (optname) {
1844         case HCI_DATA_DIR:
1845                 if (get_user(opt, (int __user *)optval)) {
1846                         err = -EFAULT;
1847                         break;
1848                 }
1849
1850                 if (opt)
1851                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1852                 else
1853                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1854                 break;
1855
1856         case HCI_TIME_STAMP:
1857                 if (get_user(opt, (int __user *)optval)) {
1858                         err = -EFAULT;
1859                         break;
1860                 }
1861
1862                 if (opt)
1863                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1864                 else
1865                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1866                 break;
1867
1868         case HCI_FILTER:
1869                 {
1870                         struct hci_filter *f = &hci_pi(sk)->filter;
1871
1872                         uf.type_mask = f->type_mask;
1873                         uf.opcode    = f->opcode;
1874                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1875                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1876                 }
1877
1878                 len = min_t(unsigned int, len, sizeof(uf));
1879                 if (copy_from_user(&uf, optval, len)) {
1880                         err = -EFAULT;
1881                         break;
1882                 }
1883
1884                 if (!capable(CAP_NET_RAW)) {
1885                         uf.type_mask &= hci_sec_filter.type_mask;
1886                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1887                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1888                 }
1889
1890                 {
1891                         struct hci_filter *f = &hci_pi(sk)->filter;
1892
1893                         f->type_mask = uf.type_mask;
1894                         f->opcode    = uf.opcode;
1895                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1896                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1897                 }
1898                 break;
1899
1900         default:
1901                 err = -ENOPROTOOPT;
1902                 break;
1903         }
1904
1905 done:
1906         release_sock(sk);
1907         return err;
1908 }
1909
1910 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1911                                char __user *optval, int __user *optlen)
1912 {
1913         struct hci_ufilter uf;
1914         struct sock *sk = sock->sk;
1915         int len, opt, err = 0;
1916
1917         BT_DBG("sk %p, opt %d", sk, optname);
1918
1919         if (level != SOL_HCI)
1920                 return -ENOPROTOOPT;
1921
1922         if (get_user(len, optlen))
1923                 return -EFAULT;
1924
1925         lock_sock(sk);
1926
1927         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1928                 err = -EBADFD;
1929                 goto done;
1930         }
1931
1932         switch (optname) {
1933         case HCI_DATA_DIR:
1934                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1935                         opt = 1;
1936                 else
1937                         opt = 0;
1938
1939                 if (put_user(opt, optval))
1940                         err = -EFAULT;
1941                 break;
1942
1943         case HCI_TIME_STAMP:
1944                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1945                         opt = 1;
1946                 else
1947                         opt = 0;
1948
1949                 if (put_user(opt, optval))
1950                         err = -EFAULT;
1951                 break;
1952
1953         case HCI_FILTER:
1954                 {
1955                         struct hci_filter *f = &hci_pi(sk)->filter;
1956
1957                         memset(&uf, 0, sizeof(uf));
1958                         uf.type_mask = f->type_mask;
1959                         uf.opcode    = f->opcode;
1960                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1961                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1962                 }
1963
1964                 len = min_t(unsigned int, len, sizeof(uf));
1965                 if (copy_to_user(optval, &uf, len))
1966                         err = -EFAULT;
1967                 break;
1968
1969         default:
1970                 err = -ENOPROTOOPT;
1971                 break;
1972         }
1973
1974 done:
1975         release_sock(sk);
1976         return err;
1977 }
1978
1979 static void hci_sock_destruct(struct sock *sk)
1980 {
1981         skb_queue_purge(&sk->sk_receive_queue);
1982         skb_queue_purge(&sk->sk_write_queue);
1983 }
1984
1985 static const struct proto_ops hci_sock_ops = {
1986         .family         = PF_BLUETOOTH,
1987         .owner          = THIS_MODULE,
1988         .release        = hci_sock_release,
1989         .bind           = hci_sock_bind,
1990         .getname        = hci_sock_getname,
1991         .sendmsg        = hci_sock_sendmsg,
1992         .recvmsg        = hci_sock_recvmsg,
1993         .ioctl          = hci_sock_ioctl,
1994         .poll           = datagram_poll,
1995         .listen         = sock_no_listen,
1996         .shutdown       = sock_no_shutdown,
1997         .setsockopt     = hci_sock_setsockopt,
1998         .getsockopt     = hci_sock_getsockopt,
1999         .connect        = sock_no_connect,
2000         .socketpair     = sock_no_socketpair,
2001         .accept         = sock_no_accept,
2002         .mmap           = sock_no_mmap
2003 };
2004
2005 static struct proto hci_sk_proto = {
2006         .name           = "HCI",
2007         .owner          = THIS_MODULE,
2008         .obj_size       = sizeof(struct hci_pinfo)
2009 };
2010
2011 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2012                            int kern)
2013 {
2014         struct sock *sk;
2015
2016         BT_DBG("sock %p", sock);
2017
2018         if (sock->type != SOCK_RAW)
2019                 return -ESOCKTNOSUPPORT;
2020
2021         sock->ops = &hci_sock_ops;
2022
2023         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2024         if (!sk)
2025                 return -ENOMEM;
2026
2027         sock_init_data(sock, sk);
2028
2029         sock_reset_flag(sk, SOCK_ZAPPED);
2030
2031         sk->sk_protocol = protocol;
2032
2033         sock->state = SS_UNCONNECTED;
2034         sk->sk_state = BT_OPEN;
2035         sk->sk_destruct = hci_sock_destruct;
2036
2037         bt_sock_link(&hci_sk_list, sk);
2038         return 0;
2039 }
2040
2041 static const struct net_proto_family hci_sock_family_ops = {
2042         .family = PF_BLUETOOTH,
2043         .owner  = THIS_MODULE,
2044         .create = hci_sock_create,
2045 };
2046
2047 int __init hci_sock_init(void)
2048 {
2049         int err;
2050
2051         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2052
2053         err = proto_register(&hci_sk_proto, 0);
2054         if (err < 0)
2055                 return err;
2056
2057         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2058         if (err < 0) {
2059                 BT_ERR("HCI socket registration failed");
2060                 goto error;
2061         }
2062
2063         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2064         if (err < 0) {
2065                 BT_ERR("Failed to create HCI proc file");
2066                 bt_sock_unregister(BTPROTO_HCI);
2067                 goto error;
2068         }
2069
2070         BT_INFO("HCI socket layer initialized");
2071
2072         return 0;
2073
2074 error:
2075         proto_unregister(&hci_sk_proto);
2076         return err;
2077 }
2078
2079 void hci_sock_cleanup(void)
2080 {
2081         bt_procfs_cleanup(&init_net, "hci");
2082         bt_sock_unregister(BTPROTO_HCI);
2083         proto_unregister(&hci_sk_proto);
2084 }