GNU Linux-libre 6.1.90-gnu
[releases.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "mgmt_util.h"
38
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42 static DEFINE_IDA(sock_cookie_ida);
43
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46 /* ----- HCI socket interface ----- */
47
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51 struct hci_pinfo {
52         struct bt_sock    bt;
53         struct hci_dev    *hdev;
54         struct hci_filter filter;
55         __u8              cmsg_mask;
56         unsigned short    channel;
57         unsigned long     flags;
58         __u32             cookie;
59         char              comm[TASK_COMM_LEN];
60         __u16             mtu;
61 };
62
63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64 {
65         struct hci_dev *hdev = hci_pi(sk)->hdev;
66
67         if (!hdev)
68                 return ERR_PTR(-EBADFD);
69         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70                 return ERR_PTR(-EPIPE);
71         return hdev;
72 }
73
74 void hci_sock_set_flag(struct sock *sk, int nr)
75 {
76         set_bit(nr, &hci_pi(sk)->flags);
77 }
78
79 void hci_sock_clear_flag(struct sock *sk, int nr)
80 {
81         clear_bit(nr, &hci_pi(sk)->flags);
82 }
83
84 int hci_sock_test_flag(struct sock *sk, int nr)
85 {
86         return test_bit(nr, &hci_pi(sk)->flags);
87 }
88
89 unsigned short hci_sock_get_channel(struct sock *sk)
90 {
91         return hci_pi(sk)->channel;
92 }
93
94 u32 hci_sock_get_cookie(struct sock *sk)
95 {
96         return hci_pi(sk)->cookie;
97 }
98
99 static bool hci_sock_gen_cookie(struct sock *sk)
100 {
101         int id = hci_pi(sk)->cookie;
102
103         if (!id) {
104                 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
105                 if (id < 0)
106                         id = 0xffffffff;
107
108                 hci_pi(sk)->cookie = id;
109                 get_task_comm(hci_pi(sk)->comm, current);
110                 return true;
111         }
112
113         return false;
114 }
115
116 static void hci_sock_free_cookie(struct sock *sk)
117 {
118         int id = hci_pi(sk)->cookie;
119
120         if (id) {
121                 hci_pi(sk)->cookie = 0xffffffff;
122                 ida_simple_remove(&sock_cookie_ida, id);
123         }
124 }
125
126 static inline int hci_test_bit(int nr, const void *addr)
127 {
128         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
129 }
130
131 /* Security filter */
132 #define HCI_SFLT_MAX_OGF  5
133
134 struct hci_sec_filter {
135         __u32 type_mask;
136         __u32 event_mask[2];
137         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
138 };
139
140 static const struct hci_sec_filter hci_sec_filter = {
141         /* Packet types */
142         0x10,
143         /* Events */
144         { 0x1000d9fe, 0x0000b00c },
145         /* Commands */
146         {
147                 { 0x0 },
148                 /* OGF_LINK_CTL */
149                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150                 /* OGF_LINK_POLICY */
151                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
152                 /* OGF_HOST_CTL */
153                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154                 /* OGF_INFO_PARAM */
155                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
156                 /* OGF_STATUS_PARAM */
157                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
158         }
159 };
160
161 static struct bt_sock_list hci_sk_list = {
162         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
163 };
164
165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166 {
167         struct hci_filter *flt;
168         int flt_type, flt_event;
169
170         /* Apply filter */
171         flt = &hci_pi(sk)->filter;
172
173         flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174
175         if (!test_bit(flt_type, &flt->type_mask))
176                 return true;
177
178         /* Extra filter for event packets only */
179         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
180                 return false;
181
182         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183
184         if (!hci_test_bit(flt_event, &flt->event_mask))
185                 return true;
186
187         /* Check filter only when opcode is set */
188         if (!flt->opcode)
189                 return false;
190
191         if (flt_event == HCI_EV_CMD_COMPLETE &&
192             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
193                 return true;
194
195         if (flt_event == HCI_EV_CMD_STATUS &&
196             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
197                 return true;
198
199         return false;
200 }
201
202 /* Send frame to RAW socket */
203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
204 {
205         struct sock *sk;
206         struct sk_buff *skb_copy = NULL;
207
208         BT_DBG("hdev %p len %d", hdev, skb->len);
209
210         read_lock(&hci_sk_list.lock);
211
212         sk_for_each(sk, &hci_sk_list.head) {
213                 struct sk_buff *nskb;
214
215                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
216                         continue;
217
218                 /* Don't send frame to the socket it came from */
219                 if (skb->sk == sk)
220                         continue;
221
222                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223                         if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224                             hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227                             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
228                                 continue;
229                         if (is_filtered_packet(sk, skb))
230                                 continue;
231                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232                         if (!bt_cb(skb)->incoming)
233                                 continue;
234                         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237                             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
238                                 continue;
239                 } else {
240                         /* Don't send frame to other channel types */
241                         continue;
242                 }
243
244                 if (!skb_copy) {
245                         /* Create a private copy with headroom */
246                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
247                         if (!skb_copy)
248                                 continue;
249
250                         /* Put type byte before the data */
251                         memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
252                 }
253
254                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
255                 if (!nskb)
256                         continue;
257
258                 if (sock_queue_rcv_skb(sk, nskb))
259                         kfree_skb(nskb);
260         }
261
262         read_unlock(&hci_sk_list.lock);
263
264         kfree_skb(skb_copy);
265 }
266
267 /* Send frame to sockets with specific channel */
268 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
269                                   int flag, struct sock *skip_sk)
270 {
271         struct sock *sk;
272
273         BT_DBG("channel %u len %d", channel, skb->len);
274
275         sk_for_each(sk, &hci_sk_list.head) {
276                 struct sk_buff *nskb;
277
278                 /* Ignore socket without the flag set */
279                 if (!hci_sock_test_flag(sk, flag))
280                         continue;
281
282                 /* Skip the original socket */
283                 if (sk == skip_sk)
284                         continue;
285
286                 if (sk->sk_state != BT_BOUND)
287                         continue;
288
289                 if (hci_pi(sk)->channel != channel)
290                         continue;
291
292                 nskb = skb_clone(skb, GFP_ATOMIC);
293                 if (!nskb)
294                         continue;
295
296                 if (sock_queue_rcv_skb(sk, nskb))
297                         kfree_skb(nskb);
298         }
299
300 }
301
302 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
303                          int flag, struct sock *skip_sk)
304 {
305         read_lock(&hci_sk_list.lock);
306         __hci_send_to_channel(channel, skb, flag, skip_sk);
307         read_unlock(&hci_sk_list.lock);
308 }
309
310 /* Send frame to monitor socket */
311 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
312 {
313         struct sk_buff *skb_copy = NULL;
314         struct hci_mon_hdr *hdr;
315         __le16 opcode;
316
317         if (!atomic_read(&monitor_promisc))
318                 return;
319
320         BT_DBG("hdev %p len %d", hdev, skb->len);
321
322         switch (hci_skb_pkt_type(skb)) {
323         case HCI_COMMAND_PKT:
324                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
325                 break;
326         case HCI_EVENT_PKT:
327                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
328                 break;
329         case HCI_ACLDATA_PKT:
330                 if (bt_cb(skb)->incoming)
331                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
332                 else
333                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
334                 break;
335         case HCI_SCODATA_PKT:
336                 if (bt_cb(skb)->incoming)
337                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
338                 else
339                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
340                 break;
341         case HCI_ISODATA_PKT:
342                 if (bt_cb(skb)->incoming)
343                         opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
344                 else
345                         opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
346                 break;
347         case HCI_DIAG_PKT:
348                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
349                 break;
350         default:
351                 return;
352         }
353
354         /* Create a private copy with headroom */
355         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
356         if (!skb_copy)
357                 return;
358
359         /* Put header before the data */
360         hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
361         hdr->opcode = opcode;
362         hdr->index = cpu_to_le16(hdev->id);
363         hdr->len = cpu_to_le16(skb->len);
364
365         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
366                             HCI_SOCK_TRUSTED, NULL);
367         kfree_skb(skb_copy);
368 }
369
370 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
371                                  void *data, u16 data_len, ktime_t tstamp,
372                                  int flag, struct sock *skip_sk)
373 {
374         struct sock *sk;
375         __le16 index;
376
377         if (hdev)
378                 index = cpu_to_le16(hdev->id);
379         else
380                 index = cpu_to_le16(MGMT_INDEX_NONE);
381
382         read_lock(&hci_sk_list.lock);
383
384         sk_for_each(sk, &hci_sk_list.head) {
385                 struct hci_mon_hdr *hdr;
386                 struct sk_buff *skb;
387
388                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
389                         continue;
390
391                 /* Ignore socket without the flag set */
392                 if (!hci_sock_test_flag(sk, flag))
393                         continue;
394
395                 /* Skip the original socket */
396                 if (sk == skip_sk)
397                         continue;
398
399                 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
400                 if (!skb)
401                         continue;
402
403                 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
404                 put_unaligned_le16(event, skb_put(skb, 2));
405
406                 if (data)
407                         skb_put_data(skb, data, data_len);
408
409                 skb->tstamp = tstamp;
410
411                 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
412                 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
413                 hdr->index = index;
414                 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
415
416                 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
417                                       HCI_SOCK_TRUSTED, NULL);
418                 kfree_skb(skb);
419         }
420
421         read_unlock(&hci_sk_list.lock);
422 }
423
424 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
425 {
426         struct hci_mon_hdr *hdr;
427         struct hci_mon_new_index *ni;
428         struct hci_mon_index_info *ii;
429         struct sk_buff *skb;
430         __le16 opcode;
431
432         switch (event) {
433         case HCI_DEV_REG:
434                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
435                 if (!skb)
436                         return NULL;
437
438                 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
439                 ni->type = hdev->dev_type;
440                 ni->bus = hdev->bus;
441                 bacpy(&ni->bdaddr, &hdev->bdaddr);
442                 memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
443                                strnlen(hdev->name, sizeof(ni->name)), '\0');
444
445                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
446                 break;
447
448         case HCI_DEV_UNREG:
449                 skb = bt_skb_alloc(0, GFP_ATOMIC);
450                 if (!skb)
451                         return NULL;
452
453                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
454                 break;
455
456         case HCI_DEV_SETUP:
457                 if (hdev->manufacturer == 0xffff)
458                         return NULL;
459                 fallthrough;
460
461         case HCI_DEV_UP:
462                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
463                 if (!skb)
464                         return NULL;
465
466                 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
467                 bacpy(&ii->bdaddr, &hdev->bdaddr);
468                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
469
470                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
471                 break;
472
473         case HCI_DEV_OPEN:
474                 skb = bt_skb_alloc(0, GFP_ATOMIC);
475                 if (!skb)
476                         return NULL;
477
478                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
479                 break;
480
481         case HCI_DEV_CLOSE:
482                 skb = bt_skb_alloc(0, GFP_ATOMIC);
483                 if (!skb)
484                         return NULL;
485
486                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
487                 break;
488
489         default:
490                 return NULL;
491         }
492
493         __net_timestamp(skb);
494
495         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
496         hdr->opcode = opcode;
497         hdr->index = cpu_to_le16(hdev->id);
498         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
499
500         return skb;
501 }
502
503 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
504 {
505         struct hci_mon_hdr *hdr;
506         struct sk_buff *skb;
507         u16 format;
508         u8 ver[3];
509         u32 flags;
510
511         /* No message needed when cookie is not present */
512         if (!hci_pi(sk)->cookie)
513                 return NULL;
514
515         switch (hci_pi(sk)->channel) {
516         case HCI_CHANNEL_RAW:
517                 format = 0x0000;
518                 ver[0] = BT_SUBSYS_VERSION;
519                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
520                 break;
521         case HCI_CHANNEL_USER:
522                 format = 0x0001;
523                 ver[0] = BT_SUBSYS_VERSION;
524                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
525                 break;
526         case HCI_CHANNEL_CONTROL:
527                 format = 0x0002;
528                 mgmt_fill_version_info(ver);
529                 break;
530         default:
531                 /* No message for unsupported format */
532                 return NULL;
533         }
534
535         skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
536         if (!skb)
537                 return NULL;
538
539         flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
540
541         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
542         put_unaligned_le16(format, skb_put(skb, 2));
543         skb_put_data(skb, ver, sizeof(ver));
544         put_unaligned_le32(flags, skb_put(skb, 4));
545         skb_put_u8(skb, TASK_COMM_LEN);
546         skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
547
548         __net_timestamp(skb);
549
550         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
551         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
552         if (hci_pi(sk)->hdev)
553                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
554         else
555                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
556         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
557
558         return skb;
559 }
560
561 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
562 {
563         struct hci_mon_hdr *hdr;
564         struct sk_buff *skb;
565
566         /* No message needed when cookie is not present */
567         if (!hci_pi(sk)->cookie)
568                 return NULL;
569
570         switch (hci_pi(sk)->channel) {
571         case HCI_CHANNEL_RAW:
572         case HCI_CHANNEL_USER:
573         case HCI_CHANNEL_CONTROL:
574                 break;
575         default:
576                 /* No message for unsupported format */
577                 return NULL;
578         }
579
580         skb = bt_skb_alloc(4, GFP_ATOMIC);
581         if (!skb)
582                 return NULL;
583
584         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
585
586         __net_timestamp(skb);
587
588         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
589         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
590         if (hci_pi(sk)->hdev)
591                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
592         else
593                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
594         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
595
596         return skb;
597 }
598
599 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
600                                                    u16 opcode, u16 len,
601                                                    const void *buf)
602 {
603         struct hci_mon_hdr *hdr;
604         struct sk_buff *skb;
605
606         skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
607         if (!skb)
608                 return NULL;
609
610         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
611         put_unaligned_le16(opcode, skb_put(skb, 2));
612
613         if (buf)
614                 skb_put_data(skb, buf, len);
615
616         __net_timestamp(skb);
617
618         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
619         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
620         hdr->index = cpu_to_le16(index);
621         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
622
623         return skb;
624 }
625
626 static void __printf(2, 3)
627 send_monitor_note(struct sock *sk, const char *fmt, ...)
628 {
629         size_t len;
630         struct hci_mon_hdr *hdr;
631         struct sk_buff *skb;
632         va_list args;
633
634         va_start(args, fmt);
635         len = vsnprintf(NULL, 0, fmt, args);
636         va_end(args);
637
638         skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
639         if (!skb)
640                 return;
641
642         va_start(args, fmt);
643         vsprintf(skb_put(skb, len), fmt, args);
644         *(u8 *)skb_put(skb, 1) = 0;
645         va_end(args);
646
647         __net_timestamp(skb);
648
649         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
650         hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
651         hdr->index = cpu_to_le16(HCI_DEV_NONE);
652         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
653
654         if (sock_queue_rcv_skb(sk, skb))
655                 kfree_skb(skb);
656 }
657
658 static void send_monitor_replay(struct sock *sk)
659 {
660         struct hci_dev *hdev;
661
662         read_lock(&hci_dev_list_lock);
663
664         list_for_each_entry(hdev, &hci_dev_list, list) {
665                 struct sk_buff *skb;
666
667                 skb = create_monitor_event(hdev, HCI_DEV_REG);
668                 if (!skb)
669                         continue;
670
671                 if (sock_queue_rcv_skb(sk, skb))
672                         kfree_skb(skb);
673
674                 if (!test_bit(HCI_RUNNING, &hdev->flags))
675                         continue;
676
677                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
678                 if (!skb)
679                         continue;
680
681                 if (sock_queue_rcv_skb(sk, skb))
682                         kfree_skb(skb);
683
684                 if (test_bit(HCI_UP, &hdev->flags))
685                         skb = create_monitor_event(hdev, HCI_DEV_UP);
686                 else if (hci_dev_test_flag(hdev, HCI_SETUP))
687                         skb = create_monitor_event(hdev, HCI_DEV_SETUP);
688                 else
689                         skb = NULL;
690
691                 if (skb) {
692                         if (sock_queue_rcv_skb(sk, skb))
693                                 kfree_skb(skb);
694                 }
695         }
696
697         read_unlock(&hci_dev_list_lock);
698 }
699
700 static void send_monitor_control_replay(struct sock *mon_sk)
701 {
702         struct sock *sk;
703
704         read_lock(&hci_sk_list.lock);
705
706         sk_for_each(sk, &hci_sk_list.head) {
707                 struct sk_buff *skb;
708
709                 skb = create_monitor_ctrl_open(sk);
710                 if (!skb)
711                         continue;
712
713                 if (sock_queue_rcv_skb(mon_sk, skb))
714                         kfree_skb(skb);
715         }
716
717         read_unlock(&hci_sk_list.lock);
718 }
719
720 /* Generate internal stack event */
721 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
722 {
723         struct hci_event_hdr *hdr;
724         struct hci_ev_stack_internal *ev;
725         struct sk_buff *skb;
726
727         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
728         if (!skb)
729                 return;
730
731         hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
732         hdr->evt  = HCI_EV_STACK_INTERNAL;
733         hdr->plen = sizeof(*ev) + dlen;
734
735         ev = skb_put(skb, sizeof(*ev) + dlen);
736         ev->type = type;
737         memcpy(ev->data, data, dlen);
738
739         bt_cb(skb)->incoming = 1;
740         __net_timestamp(skb);
741
742         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
743         hci_send_to_sock(hdev, skb);
744         kfree_skb(skb);
745 }
746
747 void hci_sock_dev_event(struct hci_dev *hdev, int event)
748 {
749         BT_DBG("hdev %s event %d", hdev->name, event);
750
751         if (atomic_read(&monitor_promisc)) {
752                 struct sk_buff *skb;
753
754                 /* Send event to monitor */
755                 skb = create_monitor_event(hdev, event);
756                 if (skb) {
757                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
758                                             HCI_SOCK_TRUSTED, NULL);
759                         kfree_skb(skb);
760                 }
761         }
762
763         if (event <= HCI_DEV_DOWN) {
764                 struct hci_ev_si_device ev;
765
766                 /* Send event to sockets */
767                 ev.event  = event;
768                 ev.dev_id = hdev->id;
769                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
770         }
771
772         if (event == HCI_DEV_UNREG) {
773                 struct sock *sk;
774
775                 /* Wake up sockets using this dead device */
776                 read_lock(&hci_sk_list.lock);
777                 sk_for_each(sk, &hci_sk_list.head) {
778                         if (hci_pi(sk)->hdev == hdev) {
779                                 sk->sk_err = EPIPE;
780                                 sk->sk_state_change(sk);
781                         }
782                 }
783                 read_unlock(&hci_sk_list.lock);
784         }
785 }
786
787 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
788 {
789         struct hci_mgmt_chan *c;
790
791         list_for_each_entry(c, &mgmt_chan_list, list) {
792                 if (c->channel == channel)
793                         return c;
794         }
795
796         return NULL;
797 }
798
799 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
800 {
801         struct hci_mgmt_chan *c;
802
803         mutex_lock(&mgmt_chan_list_lock);
804         c = __hci_mgmt_chan_find(channel);
805         mutex_unlock(&mgmt_chan_list_lock);
806
807         return c;
808 }
809
810 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
811 {
812         if (c->channel < HCI_CHANNEL_CONTROL)
813                 return -EINVAL;
814
815         mutex_lock(&mgmt_chan_list_lock);
816         if (__hci_mgmt_chan_find(c->channel)) {
817                 mutex_unlock(&mgmt_chan_list_lock);
818                 return -EALREADY;
819         }
820
821         list_add_tail(&c->list, &mgmt_chan_list);
822
823         mutex_unlock(&mgmt_chan_list_lock);
824
825         return 0;
826 }
827 EXPORT_SYMBOL(hci_mgmt_chan_register);
828
829 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
830 {
831         mutex_lock(&mgmt_chan_list_lock);
832         list_del(&c->list);
833         mutex_unlock(&mgmt_chan_list_lock);
834 }
835 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
836
837 static int hci_sock_release(struct socket *sock)
838 {
839         struct sock *sk = sock->sk;
840         struct hci_dev *hdev;
841         struct sk_buff *skb;
842
843         BT_DBG("sock %p sk %p", sock, sk);
844
845         if (!sk)
846                 return 0;
847
848         lock_sock(sk);
849
850         switch (hci_pi(sk)->channel) {
851         case HCI_CHANNEL_MONITOR:
852                 atomic_dec(&monitor_promisc);
853                 break;
854         case HCI_CHANNEL_RAW:
855         case HCI_CHANNEL_USER:
856         case HCI_CHANNEL_CONTROL:
857                 /* Send event to monitor */
858                 skb = create_monitor_ctrl_close(sk);
859                 if (skb) {
860                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
861                                             HCI_SOCK_TRUSTED, NULL);
862                         kfree_skb(skb);
863                 }
864
865                 hci_sock_free_cookie(sk);
866                 break;
867         }
868
869         bt_sock_unlink(&hci_sk_list, sk);
870
871         hdev = hci_pi(sk)->hdev;
872         if (hdev) {
873                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
874                     !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
875                         /* When releasing a user channel exclusive access,
876                          * call hci_dev_do_close directly instead of calling
877                          * hci_dev_close to ensure the exclusive access will
878                          * be released and the controller brought back down.
879                          *
880                          * The checking of HCI_AUTO_OFF is not needed in this
881                          * case since it will have been cleared already when
882                          * opening the user channel.
883                          *
884                          * Make sure to also check that we haven't already
885                          * unregistered since all the cleanup will have already
886                          * been complete and hdev will get released when we put
887                          * below.
888                          */
889                         hci_dev_do_close(hdev);
890                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
891                         mgmt_index_added(hdev);
892                 }
893
894                 atomic_dec(&hdev->promisc);
895                 hci_dev_put(hdev);
896         }
897
898         sock_orphan(sk);
899         release_sock(sk);
900         sock_put(sk);
901         return 0;
902 }
903
904 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
905 {
906         bdaddr_t bdaddr;
907         int err;
908
909         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
910                 return -EFAULT;
911
912         hci_dev_lock(hdev);
913
914         err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
915
916         hci_dev_unlock(hdev);
917
918         return err;
919 }
920
921 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
922 {
923         bdaddr_t bdaddr;
924         int err;
925
926         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
927                 return -EFAULT;
928
929         hci_dev_lock(hdev);
930
931         err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
932
933         hci_dev_unlock(hdev);
934
935         return err;
936 }
937
938 /* Ioctls that require bound socket */
939 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
940                                 unsigned long arg)
941 {
942         struct hci_dev *hdev = hci_hdev_from_sock(sk);
943
944         if (IS_ERR(hdev))
945                 return PTR_ERR(hdev);
946
947         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
948                 return -EBUSY;
949
950         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
951                 return -EOPNOTSUPP;
952
953         if (hdev->dev_type != HCI_PRIMARY)
954                 return -EOPNOTSUPP;
955
956         switch (cmd) {
957         case HCISETRAW:
958                 if (!capable(CAP_NET_ADMIN))
959                         return -EPERM;
960                 return -EOPNOTSUPP;
961
962         case HCIGETCONNINFO:
963                 return hci_get_conn_info(hdev, (void __user *)arg);
964
965         case HCIGETAUTHINFO:
966                 return hci_get_auth_info(hdev, (void __user *)arg);
967
968         case HCIBLOCKADDR:
969                 if (!capable(CAP_NET_ADMIN))
970                         return -EPERM;
971                 return hci_sock_reject_list_add(hdev, (void __user *)arg);
972
973         case HCIUNBLOCKADDR:
974                 if (!capable(CAP_NET_ADMIN))
975                         return -EPERM;
976                 return hci_sock_reject_list_del(hdev, (void __user *)arg);
977         }
978
979         return -ENOIOCTLCMD;
980 }
981
982 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
983                           unsigned long arg)
984 {
985         void __user *argp = (void __user *)arg;
986         struct sock *sk = sock->sk;
987         int err;
988
989         BT_DBG("cmd %x arg %lx", cmd, arg);
990
991         /* Make sure the cmd is valid before doing anything */
992         switch (cmd) {
993         case HCIGETDEVLIST:
994         case HCIGETDEVINFO:
995         case HCIGETCONNLIST:
996         case HCIDEVUP:
997         case HCIDEVDOWN:
998         case HCIDEVRESET:
999         case HCIDEVRESTAT:
1000         case HCISETSCAN:
1001         case HCISETAUTH:
1002         case HCISETENCRYPT:
1003         case HCISETPTYPE:
1004         case HCISETLINKPOL:
1005         case HCISETLINKMODE:
1006         case HCISETACLMTU:
1007         case HCISETSCOMTU:
1008         case HCIINQUIRY:
1009         case HCISETRAW:
1010         case HCIGETCONNINFO:
1011         case HCIGETAUTHINFO:
1012         case HCIBLOCKADDR:
1013         case HCIUNBLOCKADDR:
1014                 break;
1015         default:
1016                 return -ENOIOCTLCMD;
1017         }
1018
1019         lock_sock(sk);
1020
1021         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1022                 err = -EBADFD;
1023                 goto done;
1024         }
1025
1026         /* When calling an ioctl on an unbound raw socket, then ensure
1027          * that the monitor gets informed. Ensure that the resulting event
1028          * is only send once by checking if the cookie exists or not. The
1029          * socket cookie will be only ever generated once for the lifetime
1030          * of a given socket.
1031          */
1032         if (hci_sock_gen_cookie(sk)) {
1033                 struct sk_buff *skb;
1034
1035                 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1036                  * flag. Make sure that not only the current task but also
1037                  * the socket opener has the required capability, since
1038                  * privileged programs can be tricked into making ioctl calls
1039                  * on HCI sockets, and the socket should not be marked as
1040                  * trusted simply because the ioctl caller is privileged.
1041                  */
1042                 if (sk_capable(sk, CAP_NET_ADMIN))
1043                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1044
1045                 /* Send event to monitor */
1046                 skb = create_monitor_ctrl_open(sk);
1047                 if (skb) {
1048                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1049                                             HCI_SOCK_TRUSTED, NULL);
1050                         kfree_skb(skb);
1051                 }
1052         }
1053
1054         release_sock(sk);
1055
1056         switch (cmd) {
1057         case HCIGETDEVLIST:
1058                 return hci_get_dev_list(argp);
1059
1060         case HCIGETDEVINFO:
1061                 return hci_get_dev_info(argp);
1062
1063         case HCIGETCONNLIST:
1064                 return hci_get_conn_list(argp);
1065
1066         case HCIDEVUP:
1067                 if (!capable(CAP_NET_ADMIN))
1068                         return -EPERM;
1069                 return hci_dev_open(arg);
1070
1071         case HCIDEVDOWN:
1072                 if (!capable(CAP_NET_ADMIN))
1073                         return -EPERM;
1074                 return hci_dev_close(arg);
1075
1076         case HCIDEVRESET:
1077                 if (!capable(CAP_NET_ADMIN))
1078                         return -EPERM;
1079                 return hci_dev_reset(arg);
1080
1081         case HCIDEVRESTAT:
1082                 if (!capable(CAP_NET_ADMIN))
1083                         return -EPERM;
1084                 return hci_dev_reset_stat(arg);
1085
1086         case HCISETSCAN:
1087         case HCISETAUTH:
1088         case HCISETENCRYPT:
1089         case HCISETPTYPE:
1090         case HCISETLINKPOL:
1091         case HCISETLINKMODE:
1092         case HCISETACLMTU:
1093         case HCISETSCOMTU:
1094                 if (!capable(CAP_NET_ADMIN))
1095                         return -EPERM;
1096                 return hci_dev_cmd(cmd, argp);
1097
1098         case HCIINQUIRY:
1099                 return hci_inquiry(argp);
1100         }
1101
1102         lock_sock(sk);
1103
1104         err = hci_sock_bound_ioctl(sk, cmd, arg);
1105
1106 done:
1107         release_sock(sk);
1108         return err;
1109 }
1110
1111 #ifdef CONFIG_COMPAT
1112 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1113                                  unsigned long arg)
1114 {
1115         switch (cmd) {
1116         case HCIDEVUP:
1117         case HCIDEVDOWN:
1118         case HCIDEVRESET:
1119         case HCIDEVRESTAT:
1120                 return hci_sock_ioctl(sock, cmd, arg);
1121         }
1122
1123         return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1124 }
1125 #endif
1126
1127 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1128                          int addr_len)
1129 {
1130         struct sockaddr_hci haddr;
1131         struct sock *sk = sock->sk;
1132         struct hci_dev *hdev = NULL;
1133         struct sk_buff *skb;
1134         int len, err = 0;
1135
1136         BT_DBG("sock %p sk %p", sock, sk);
1137
1138         if (!addr)
1139                 return -EINVAL;
1140
1141         memset(&haddr, 0, sizeof(haddr));
1142         len = min_t(unsigned int, sizeof(haddr), addr_len);
1143         memcpy(&haddr, addr, len);
1144
1145         if (haddr.hci_family != AF_BLUETOOTH)
1146                 return -EINVAL;
1147
1148         lock_sock(sk);
1149
1150         /* Allow detaching from dead device and attaching to alive device, if
1151          * the caller wants to re-bind (instead of close) this socket in
1152          * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1153          */
1154         hdev = hci_pi(sk)->hdev;
1155         if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1156                 hci_pi(sk)->hdev = NULL;
1157                 sk->sk_state = BT_OPEN;
1158                 hci_dev_put(hdev);
1159         }
1160         hdev = NULL;
1161
1162         if (sk->sk_state == BT_BOUND) {
1163                 err = -EALREADY;
1164                 goto done;
1165         }
1166
1167         switch (haddr.hci_channel) {
1168         case HCI_CHANNEL_RAW:
1169                 if (hci_pi(sk)->hdev) {
1170                         err = -EALREADY;
1171                         goto done;
1172                 }
1173
1174                 if (haddr.hci_dev != HCI_DEV_NONE) {
1175                         hdev = hci_dev_get(haddr.hci_dev);
1176                         if (!hdev) {
1177                                 err = -ENODEV;
1178                                 goto done;
1179                         }
1180
1181                         atomic_inc(&hdev->promisc);
1182                 }
1183
1184                 hci_pi(sk)->channel = haddr.hci_channel;
1185
1186                 if (!hci_sock_gen_cookie(sk)) {
1187                         /* In the case when a cookie has already been assigned,
1188                          * then there has been already an ioctl issued against
1189                          * an unbound socket and with that triggered an open
1190                          * notification. Send a close notification first to
1191                          * allow the state transition to bounded.
1192                          */
1193                         skb = create_monitor_ctrl_close(sk);
1194                         if (skb) {
1195                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1196                                                     HCI_SOCK_TRUSTED, NULL);
1197                                 kfree_skb(skb);
1198                         }
1199                 }
1200
1201                 if (capable(CAP_NET_ADMIN))
1202                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1203
1204                 hci_pi(sk)->hdev = hdev;
1205
1206                 /* Send event to monitor */
1207                 skb = create_monitor_ctrl_open(sk);
1208                 if (skb) {
1209                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1210                                             HCI_SOCK_TRUSTED, NULL);
1211                         kfree_skb(skb);
1212                 }
1213                 break;
1214
1215         case HCI_CHANNEL_USER:
1216                 if (hci_pi(sk)->hdev) {
1217                         err = -EALREADY;
1218                         goto done;
1219                 }
1220
1221                 if (haddr.hci_dev == HCI_DEV_NONE) {
1222                         err = -EINVAL;
1223                         goto done;
1224                 }
1225
1226                 if (!capable(CAP_NET_ADMIN)) {
1227                         err = -EPERM;
1228                         goto done;
1229                 }
1230
1231                 hdev = hci_dev_get(haddr.hci_dev);
1232                 if (!hdev) {
1233                         err = -ENODEV;
1234                         goto done;
1235                 }
1236
1237                 if (test_bit(HCI_INIT, &hdev->flags) ||
1238                     hci_dev_test_flag(hdev, HCI_SETUP) ||
1239                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1240                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1241                      test_bit(HCI_UP, &hdev->flags))) {
1242                         err = -EBUSY;
1243                         hci_dev_put(hdev);
1244                         goto done;
1245                 }
1246
1247                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1248                         err = -EUSERS;
1249                         hci_dev_put(hdev);
1250                         goto done;
1251                 }
1252
1253                 mgmt_index_removed(hdev);
1254
1255                 err = hci_dev_open(hdev->id);
1256                 if (err) {
1257                         if (err == -EALREADY) {
1258                                 /* In case the transport is already up and
1259                                  * running, clear the error here.
1260                                  *
1261                                  * This can happen when opening a user
1262                                  * channel and HCI_AUTO_OFF grace period
1263                                  * is still active.
1264                                  */
1265                                 err = 0;
1266                         } else {
1267                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1268                                 mgmt_index_added(hdev);
1269                                 hci_dev_put(hdev);
1270                                 goto done;
1271                         }
1272                 }
1273
1274                 hci_pi(sk)->channel = haddr.hci_channel;
1275
1276                 if (!hci_sock_gen_cookie(sk)) {
1277                         /* In the case when a cookie has already been assigned,
1278                          * this socket will transition from a raw socket into
1279                          * a user channel socket. For a clean transition, send
1280                          * the close notification first.
1281                          */
1282                         skb = create_monitor_ctrl_close(sk);
1283                         if (skb) {
1284                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1285                                                     HCI_SOCK_TRUSTED, NULL);
1286                                 kfree_skb(skb);
1287                         }
1288                 }
1289
1290                 /* The user channel is restricted to CAP_NET_ADMIN
1291                  * capabilities and with that implicitly trusted.
1292                  */
1293                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1294
1295                 hci_pi(sk)->hdev = hdev;
1296
1297                 /* Send event to monitor */
1298                 skb = create_monitor_ctrl_open(sk);
1299                 if (skb) {
1300                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1301                                             HCI_SOCK_TRUSTED, NULL);
1302                         kfree_skb(skb);
1303                 }
1304
1305                 atomic_inc(&hdev->promisc);
1306                 break;
1307
1308         case HCI_CHANNEL_MONITOR:
1309                 if (haddr.hci_dev != HCI_DEV_NONE) {
1310                         err = -EINVAL;
1311                         goto done;
1312                 }
1313
1314                 if (!capable(CAP_NET_RAW)) {
1315                         err = -EPERM;
1316                         goto done;
1317                 }
1318
1319                 hci_pi(sk)->channel = haddr.hci_channel;
1320
1321                 /* The monitor interface is restricted to CAP_NET_RAW
1322                  * capabilities and with that implicitly trusted.
1323                  */
1324                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1325
1326                 send_monitor_note(sk, "Linux version %s (%s)",
1327                                   init_utsname()->release,
1328                                   init_utsname()->machine);
1329                 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1330                                   BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1331                 send_monitor_replay(sk);
1332                 send_monitor_control_replay(sk);
1333
1334                 atomic_inc(&monitor_promisc);
1335                 break;
1336
1337         case HCI_CHANNEL_LOGGING:
1338                 if (haddr.hci_dev != HCI_DEV_NONE) {
1339                         err = -EINVAL;
1340                         goto done;
1341                 }
1342
1343                 if (!capable(CAP_NET_ADMIN)) {
1344                         err = -EPERM;
1345                         goto done;
1346                 }
1347
1348                 hci_pi(sk)->channel = haddr.hci_channel;
1349                 break;
1350
1351         default:
1352                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1353                         err = -EINVAL;
1354                         goto done;
1355                 }
1356
1357                 if (haddr.hci_dev != HCI_DEV_NONE) {
1358                         err = -EINVAL;
1359                         goto done;
1360                 }
1361
1362                 /* Users with CAP_NET_ADMIN capabilities are allowed
1363                  * access to all management commands and events. For
1364                  * untrusted users the interface is restricted and
1365                  * also only untrusted events are sent.
1366                  */
1367                 if (capable(CAP_NET_ADMIN))
1368                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1369
1370                 hci_pi(sk)->channel = haddr.hci_channel;
1371
1372                 /* At the moment the index and unconfigured index events
1373                  * are enabled unconditionally. Setting them on each
1374                  * socket when binding keeps this functionality. They
1375                  * however might be cleared later and then sending of these
1376                  * events will be disabled, but that is then intentional.
1377                  *
1378                  * This also enables generic events that are safe to be
1379                  * received by untrusted users. Example for such events
1380                  * are changes to settings, class of device, name etc.
1381                  */
1382                 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1383                         if (!hci_sock_gen_cookie(sk)) {
1384                                 /* In the case when a cookie has already been
1385                                  * assigned, this socket will transition from
1386                                  * a raw socket into a control socket. To
1387                                  * allow for a clean transition, send the
1388                                  * close notification first.
1389                                  */
1390                                 skb = create_monitor_ctrl_close(sk);
1391                                 if (skb) {
1392                                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1393                                                             HCI_SOCK_TRUSTED, NULL);
1394                                         kfree_skb(skb);
1395                                 }
1396                         }
1397
1398                         /* Send event to monitor */
1399                         skb = create_monitor_ctrl_open(sk);
1400                         if (skb) {
1401                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1402                                                     HCI_SOCK_TRUSTED, NULL);
1403                                 kfree_skb(skb);
1404                         }
1405
1406                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1407                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1408                         hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1409                         hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1410                         hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1411                         hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1412                 }
1413                 break;
1414         }
1415
1416         /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1417         if (!hci_pi(sk)->mtu)
1418                 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1419
1420         sk->sk_state = BT_BOUND;
1421
1422 done:
1423         release_sock(sk);
1424         return err;
1425 }
1426
1427 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1428                             int peer)
1429 {
1430         struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1431         struct sock *sk = sock->sk;
1432         struct hci_dev *hdev;
1433         int err = 0;
1434
1435         BT_DBG("sock %p sk %p", sock, sk);
1436
1437         if (peer)
1438                 return -EOPNOTSUPP;
1439
1440         lock_sock(sk);
1441
1442         hdev = hci_hdev_from_sock(sk);
1443         if (IS_ERR(hdev)) {
1444                 err = PTR_ERR(hdev);
1445                 goto done;
1446         }
1447
1448         haddr->hci_family = AF_BLUETOOTH;
1449         haddr->hci_dev    = hdev->id;
1450         haddr->hci_channel= hci_pi(sk)->channel;
1451         err = sizeof(*haddr);
1452
1453 done:
1454         release_sock(sk);
1455         return err;
1456 }
1457
1458 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1459                           struct sk_buff *skb)
1460 {
1461         __u8 mask = hci_pi(sk)->cmsg_mask;
1462
1463         if (mask & HCI_CMSG_DIR) {
1464                 int incoming = bt_cb(skb)->incoming;
1465                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1466                          &incoming);
1467         }
1468
1469         if (mask & HCI_CMSG_TSTAMP) {
1470 #ifdef CONFIG_COMPAT
1471                 struct old_timeval32 ctv;
1472 #endif
1473                 struct __kernel_old_timeval tv;
1474                 void *data;
1475                 int len;
1476
1477                 skb_get_timestamp(skb, &tv);
1478
1479                 data = &tv;
1480                 len = sizeof(tv);
1481 #ifdef CONFIG_COMPAT
1482                 if (!COMPAT_USE_64BIT_TIME &&
1483                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
1484                         ctv.tv_sec = tv.tv_sec;
1485                         ctv.tv_usec = tv.tv_usec;
1486                         data = &ctv;
1487                         len = sizeof(ctv);
1488                 }
1489 #endif
1490
1491                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1492         }
1493 }
1494
1495 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1496                             size_t len, int flags)
1497 {
1498         struct sock *sk = sock->sk;
1499         struct sk_buff *skb;
1500         int copied, err;
1501         unsigned int skblen;
1502
1503         BT_DBG("sock %p, sk %p", sock, sk);
1504
1505         if (flags & MSG_OOB)
1506                 return -EOPNOTSUPP;
1507
1508         if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1509                 return -EOPNOTSUPP;
1510
1511         if (sk->sk_state == BT_CLOSED)
1512                 return 0;
1513
1514         skb = skb_recv_datagram(sk, flags, &err);
1515         if (!skb)
1516                 return err;
1517
1518         skblen = skb->len;
1519         copied = skb->len;
1520         if (len < copied) {
1521                 msg->msg_flags |= MSG_TRUNC;
1522                 copied = len;
1523         }
1524
1525         skb_reset_transport_header(skb);
1526         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1527
1528         switch (hci_pi(sk)->channel) {
1529         case HCI_CHANNEL_RAW:
1530                 hci_sock_cmsg(sk, msg, skb);
1531                 break;
1532         case HCI_CHANNEL_USER:
1533         case HCI_CHANNEL_MONITOR:
1534                 sock_recv_timestamp(msg, sk, skb);
1535                 break;
1536         default:
1537                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1538                         sock_recv_timestamp(msg, sk, skb);
1539                 break;
1540         }
1541
1542         skb_free_datagram(sk, skb);
1543
1544         if (flags & MSG_TRUNC)
1545                 copied = skblen;
1546
1547         return err ? : copied;
1548 }
1549
1550 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1551                         struct sk_buff *skb)
1552 {
1553         u8 *cp;
1554         struct mgmt_hdr *hdr;
1555         u16 opcode, index, len;
1556         struct hci_dev *hdev = NULL;
1557         const struct hci_mgmt_handler *handler;
1558         bool var_len, no_hdev;
1559         int err;
1560
1561         BT_DBG("got %d bytes", skb->len);
1562
1563         if (skb->len < sizeof(*hdr))
1564                 return -EINVAL;
1565
1566         hdr = (void *)skb->data;
1567         opcode = __le16_to_cpu(hdr->opcode);
1568         index = __le16_to_cpu(hdr->index);
1569         len = __le16_to_cpu(hdr->len);
1570
1571         if (len != skb->len - sizeof(*hdr)) {
1572                 err = -EINVAL;
1573                 goto done;
1574         }
1575
1576         if (chan->channel == HCI_CHANNEL_CONTROL) {
1577                 struct sk_buff *cmd;
1578
1579                 /* Send event to monitor */
1580                 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1581                                                   skb->data + sizeof(*hdr));
1582                 if (cmd) {
1583                         hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1584                                             HCI_SOCK_TRUSTED, NULL);
1585                         kfree_skb(cmd);
1586                 }
1587         }
1588
1589         if (opcode >= chan->handler_count ||
1590             chan->handlers[opcode].func == NULL) {
1591                 BT_DBG("Unknown op %u", opcode);
1592                 err = mgmt_cmd_status(sk, index, opcode,
1593                                       MGMT_STATUS_UNKNOWN_COMMAND);
1594                 goto done;
1595         }
1596
1597         handler = &chan->handlers[opcode];
1598
1599         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1600             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1601                 err = mgmt_cmd_status(sk, index, opcode,
1602                                       MGMT_STATUS_PERMISSION_DENIED);
1603                 goto done;
1604         }
1605
1606         if (index != MGMT_INDEX_NONE) {
1607                 hdev = hci_dev_get(index);
1608                 if (!hdev) {
1609                         err = mgmt_cmd_status(sk, index, opcode,
1610                                               MGMT_STATUS_INVALID_INDEX);
1611                         goto done;
1612                 }
1613
1614                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1615                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1616                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1617                         err = mgmt_cmd_status(sk, index, opcode,
1618                                               MGMT_STATUS_INVALID_INDEX);
1619                         goto done;
1620                 }
1621
1622                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1623                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1624                         err = mgmt_cmd_status(sk, index, opcode,
1625                                               MGMT_STATUS_INVALID_INDEX);
1626                         goto done;
1627                 }
1628         }
1629
1630         if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1631                 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1632                 if (no_hdev != !hdev) {
1633                         err = mgmt_cmd_status(sk, index, opcode,
1634                                               MGMT_STATUS_INVALID_INDEX);
1635                         goto done;
1636                 }
1637         }
1638
1639         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1640         if ((var_len && len < handler->data_len) ||
1641             (!var_len && len != handler->data_len)) {
1642                 err = mgmt_cmd_status(sk, index, opcode,
1643                                       MGMT_STATUS_INVALID_PARAMS);
1644                 goto done;
1645         }
1646
1647         if (hdev && chan->hdev_init)
1648                 chan->hdev_init(sk, hdev);
1649
1650         cp = skb->data + sizeof(*hdr);
1651
1652         err = handler->func(sk, hdev, cp, len);
1653         if (err < 0)
1654                 goto done;
1655
1656         err = skb->len;
1657
1658 done:
1659         if (hdev)
1660                 hci_dev_put(hdev);
1661
1662         return err;
1663 }
1664
1665 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1666                              unsigned int flags)
1667 {
1668         struct hci_mon_hdr *hdr;
1669         struct hci_dev *hdev;
1670         u16 index;
1671         int err;
1672
1673         /* The logging frame consists at minimum of the standard header,
1674          * the priority byte, the ident length byte and at least one string
1675          * terminator NUL byte. Anything shorter are invalid packets.
1676          */
1677         if (skb->len < sizeof(*hdr) + 3)
1678                 return -EINVAL;
1679
1680         hdr = (void *)skb->data;
1681
1682         if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1683                 return -EINVAL;
1684
1685         if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1686                 __u8 priority = skb->data[sizeof(*hdr)];
1687                 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1688
1689                 /* Only the priorities 0-7 are valid and with that any other
1690                  * value results in an invalid packet.
1691                  *
1692                  * The priority byte is followed by an ident length byte and
1693                  * the NUL terminated ident string. Check that the ident
1694                  * length is not overflowing the packet and also that the
1695                  * ident string itself is NUL terminated. In case the ident
1696                  * length is zero, the length value actually doubles as NUL
1697                  * terminator identifier.
1698                  *
1699                  * The message follows the ident string (if present) and
1700                  * must be NUL terminated. Otherwise it is not a valid packet.
1701                  */
1702                 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1703                     ident_len > skb->len - sizeof(*hdr) - 3 ||
1704                     skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1705                         return -EINVAL;
1706         } else {
1707                 return -EINVAL;
1708         }
1709
1710         index = __le16_to_cpu(hdr->index);
1711
1712         if (index != MGMT_INDEX_NONE) {
1713                 hdev = hci_dev_get(index);
1714                 if (!hdev)
1715                         return -ENODEV;
1716         } else {
1717                 hdev = NULL;
1718         }
1719
1720         hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1721
1722         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1723         err = skb->len;
1724
1725         if (hdev)
1726                 hci_dev_put(hdev);
1727
1728         return err;
1729 }
1730
1731 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1732                             size_t len)
1733 {
1734         struct sock *sk = sock->sk;
1735         struct hci_mgmt_chan *chan;
1736         struct hci_dev *hdev;
1737         struct sk_buff *skb;
1738         int err;
1739         const unsigned int flags = msg->msg_flags;
1740
1741         BT_DBG("sock %p sk %p", sock, sk);
1742
1743         if (flags & MSG_OOB)
1744                 return -EOPNOTSUPP;
1745
1746         if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1747                 return -EINVAL;
1748
1749         if (len < 4 || len > hci_pi(sk)->mtu)
1750                 return -EINVAL;
1751
1752         skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1753         if (IS_ERR(skb))
1754                 return PTR_ERR(skb);
1755
1756         lock_sock(sk);
1757
1758         switch (hci_pi(sk)->channel) {
1759         case HCI_CHANNEL_RAW:
1760         case HCI_CHANNEL_USER:
1761                 break;
1762         case HCI_CHANNEL_MONITOR:
1763                 err = -EOPNOTSUPP;
1764                 goto drop;
1765         case HCI_CHANNEL_LOGGING:
1766                 err = hci_logging_frame(sk, skb, flags);
1767                 goto drop;
1768         default:
1769                 mutex_lock(&mgmt_chan_list_lock);
1770                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1771                 if (chan)
1772                         err = hci_mgmt_cmd(chan, sk, skb);
1773                 else
1774                         err = -EINVAL;
1775
1776                 mutex_unlock(&mgmt_chan_list_lock);
1777                 goto drop;
1778         }
1779
1780         hdev = hci_hdev_from_sock(sk);
1781         if (IS_ERR(hdev)) {
1782                 err = PTR_ERR(hdev);
1783                 goto drop;
1784         }
1785
1786         if (!test_bit(HCI_UP, &hdev->flags)) {
1787                 err = -ENETDOWN;
1788                 goto drop;
1789         }
1790
1791         hci_skb_pkt_type(skb) = skb->data[0];
1792         skb_pull(skb, 1);
1793
1794         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1795                 /* No permission check is needed for user channel
1796                  * since that gets enforced when binding the socket.
1797                  *
1798                  * However check that the packet type is valid.
1799                  */
1800                 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1801                     hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1802                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1803                     hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1804                         err = -EINVAL;
1805                         goto drop;
1806                 }
1807
1808                 skb_queue_tail(&hdev->raw_q, skb);
1809                 queue_work(hdev->workqueue, &hdev->tx_work);
1810         } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1811                 u16 opcode = get_unaligned_le16(skb->data);
1812                 u16 ogf = hci_opcode_ogf(opcode);
1813                 u16 ocf = hci_opcode_ocf(opcode);
1814
1815                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1816                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1817                                    &hci_sec_filter.ocf_mask[ogf])) &&
1818                     !capable(CAP_NET_RAW)) {
1819                         err = -EPERM;
1820                         goto drop;
1821                 }
1822
1823                 /* Since the opcode has already been extracted here, store
1824                  * a copy of the value for later use by the drivers.
1825                  */
1826                 hci_skb_opcode(skb) = opcode;
1827
1828                 if (ogf == 0x3f) {
1829                         skb_queue_tail(&hdev->raw_q, skb);
1830                         queue_work(hdev->workqueue, &hdev->tx_work);
1831                 } else {
1832                         /* Stand-alone HCI commands must be flagged as
1833                          * single-command requests.
1834                          */
1835                         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1836
1837                         skb_queue_tail(&hdev->cmd_q, skb);
1838                         queue_work(hdev->workqueue, &hdev->cmd_work);
1839                 }
1840         } else {
1841                 if (!capable(CAP_NET_RAW)) {
1842                         err = -EPERM;
1843                         goto drop;
1844                 }
1845
1846                 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1847                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1848                     hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1849                         err = -EINVAL;
1850                         goto drop;
1851                 }
1852
1853                 skb_queue_tail(&hdev->raw_q, skb);
1854                 queue_work(hdev->workqueue, &hdev->tx_work);
1855         }
1856
1857         err = len;
1858
1859 done:
1860         release_sock(sk);
1861         return err;
1862
1863 drop:
1864         kfree_skb(skb);
1865         goto done;
1866 }
1867
1868 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1869                                    sockptr_t optval, unsigned int len)
1870 {
1871         struct hci_ufilter uf = { .opcode = 0 };
1872         struct sock *sk = sock->sk;
1873         int err = 0, opt = 0;
1874
1875         BT_DBG("sk %p, opt %d", sk, optname);
1876
1877         lock_sock(sk);
1878
1879         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1880                 err = -EBADFD;
1881                 goto done;
1882         }
1883
1884         switch (optname) {
1885         case HCI_DATA_DIR:
1886                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1887                         err = -EFAULT;
1888                         break;
1889                 }
1890
1891                 if (opt)
1892                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1893                 else
1894                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1895                 break;
1896
1897         case HCI_TIME_STAMP:
1898                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1899                         err = -EFAULT;
1900                         break;
1901                 }
1902
1903                 if (opt)
1904                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1905                 else
1906                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1907                 break;
1908
1909         case HCI_FILTER:
1910                 {
1911                         struct hci_filter *f = &hci_pi(sk)->filter;
1912
1913                         uf.type_mask = f->type_mask;
1914                         uf.opcode    = f->opcode;
1915                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1916                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1917                 }
1918
1919                 len = min_t(unsigned int, len, sizeof(uf));
1920                 if (copy_from_sockptr(&uf, optval, len)) {
1921                         err = -EFAULT;
1922                         break;
1923                 }
1924
1925                 if (!capable(CAP_NET_RAW)) {
1926                         uf.type_mask &= hci_sec_filter.type_mask;
1927                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1928                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1929                 }
1930
1931                 {
1932                         struct hci_filter *f = &hci_pi(sk)->filter;
1933
1934                         f->type_mask = uf.type_mask;
1935                         f->opcode    = uf.opcode;
1936                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1937                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1938                 }
1939                 break;
1940
1941         default:
1942                 err = -ENOPROTOOPT;
1943                 break;
1944         }
1945
1946 done:
1947         release_sock(sk);
1948         return err;
1949 }
1950
1951 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1952                                sockptr_t optval, unsigned int len)
1953 {
1954         struct sock *sk = sock->sk;
1955         int err = 0;
1956         u16 opt;
1957
1958         BT_DBG("sk %p, opt %d", sk, optname);
1959
1960         if (level == SOL_HCI)
1961                 return hci_sock_setsockopt_old(sock, level, optname, optval,
1962                                                len);
1963
1964         if (level != SOL_BLUETOOTH)
1965                 return -ENOPROTOOPT;
1966
1967         lock_sock(sk);
1968
1969         switch (optname) {
1970         case BT_SNDMTU:
1971         case BT_RCVMTU:
1972                 switch (hci_pi(sk)->channel) {
1973                 /* Don't allow changing MTU for channels that are meant for HCI
1974                  * traffic only.
1975                  */
1976                 case HCI_CHANNEL_RAW:
1977                 case HCI_CHANNEL_USER:
1978                         err = -ENOPROTOOPT;
1979                         goto done;
1980                 }
1981
1982                 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1983                         err = -EFAULT;
1984                         break;
1985                 }
1986
1987                 hci_pi(sk)->mtu = opt;
1988                 break;
1989
1990         default:
1991                 err = -ENOPROTOOPT;
1992                 break;
1993         }
1994
1995 done:
1996         release_sock(sk);
1997         return err;
1998 }
1999
2000 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2001                                    char __user *optval, int __user *optlen)
2002 {
2003         struct hci_ufilter uf;
2004         struct sock *sk = sock->sk;
2005         int len, opt, err = 0;
2006
2007         BT_DBG("sk %p, opt %d", sk, optname);
2008
2009         if (get_user(len, optlen))
2010                 return -EFAULT;
2011
2012         lock_sock(sk);
2013
2014         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2015                 err = -EBADFD;
2016                 goto done;
2017         }
2018
2019         switch (optname) {
2020         case HCI_DATA_DIR:
2021                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2022                         opt = 1;
2023                 else
2024                         opt = 0;
2025
2026                 if (put_user(opt, optval))
2027                         err = -EFAULT;
2028                 break;
2029
2030         case HCI_TIME_STAMP:
2031                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2032                         opt = 1;
2033                 else
2034                         opt = 0;
2035
2036                 if (put_user(opt, optval))
2037                         err = -EFAULT;
2038                 break;
2039
2040         case HCI_FILTER:
2041                 {
2042                         struct hci_filter *f = &hci_pi(sk)->filter;
2043
2044                         memset(&uf, 0, sizeof(uf));
2045                         uf.type_mask = f->type_mask;
2046                         uf.opcode    = f->opcode;
2047                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2048                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2049                 }
2050
2051                 len = min_t(unsigned int, len, sizeof(uf));
2052                 if (copy_to_user(optval, &uf, len))
2053                         err = -EFAULT;
2054                 break;
2055
2056         default:
2057                 err = -ENOPROTOOPT;
2058                 break;
2059         }
2060
2061 done:
2062         release_sock(sk);
2063         return err;
2064 }
2065
2066 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2067                                char __user *optval, int __user *optlen)
2068 {
2069         struct sock *sk = sock->sk;
2070         int err = 0;
2071
2072         BT_DBG("sk %p, opt %d", sk, optname);
2073
2074         if (level == SOL_HCI)
2075                 return hci_sock_getsockopt_old(sock, level, optname, optval,
2076                                                optlen);
2077
2078         if (level != SOL_BLUETOOTH)
2079                 return -ENOPROTOOPT;
2080
2081         lock_sock(sk);
2082
2083         switch (optname) {
2084         case BT_SNDMTU:
2085         case BT_RCVMTU:
2086                 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2087                         err = -EFAULT;
2088                 break;
2089
2090         default:
2091                 err = -ENOPROTOOPT;
2092                 break;
2093         }
2094
2095         release_sock(sk);
2096         return err;
2097 }
2098
2099 static void hci_sock_destruct(struct sock *sk)
2100 {
2101         mgmt_cleanup(sk);
2102         skb_queue_purge(&sk->sk_receive_queue);
2103         skb_queue_purge(&sk->sk_write_queue);
2104 }
2105
2106 static const struct proto_ops hci_sock_ops = {
2107         .family         = PF_BLUETOOTH,
2108         .owner          = THIS_MODULE,
2109         .release        = hci_sock_release,
2110         .bind           = hci_sock_bind,
2111         .getname        = hci_sock_getname,
2112         .sendmsg        = hci_sock_sendmsg,
2113         .recvmsg        = hci_sock_recvmsg,
2114         .ioctl          = hci_sock_ioctl,
2115 #ifdef CONFIG_COMPAT
2116         .compat_ioctl   = hci_sock_compat_ioctl,
2117 #endif
2118         .poll           = datagram_poll,
2119         .listen         = sock_no_listen,
2120         .shutdown       = sock_no_shutdown,
2121         .setsockopt     = hci_sock_setsockopt,
2122         .getsockopt     = hci_sock_getsockopt,
2123         .connect        = sock_no_connect,
2124         .socketpair     = sock_no_socketpair,
2125         .accept         = sock_no_accept,
2126         .mmap           = sock_no_mmap
2127 };
2128
2129 static struct proto hci_sk_proto = {
2130         .name           = "HCI",
2131         .owner          = THIS_MODULE,
2132         .obj_size       = sizeof(struct hci_pinfo)
2133 };
2134
2135 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2136                            int kern)
2137 {
2138         struct sock *sk;
2139
2140         BT_DBG("sock %p", sock);
2141
2142         if (sock->type != SOCK_RAW)
2143                 return -ESOCKTNOSUPPORT;
2144
2145         sock->ops = &hci_sock_ops;
2146
2147         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2148         if (!sk)
2149                 return -ENOMEM;
2150
2151         sock_init_data(sock, sk);
2152
2153         sock_reset_flag(sk, SOCK_ZAPPED);
2154
2155         sk->sk_protocol = protocol;
2156
2157         sock->state = SS_UNCONNECTED;
2158         sk->sk_state = BT_OPEN;
2159         sk->sk_destruct = hci_sock_destruct;
2160
2161         bt_sock_link(&hci_sk_list, sk);
2162         return 0;
2163 }
2164
2165 static const struct net_proto_family hci_sock_family_ops = {
2166         .family = PF_BLUETOOTH,
2167         .owner  = THIS_MODULE,
2168         .create = hci_sock_create,
2169 };
2170
2171 int __init hci_sock_init(void)
2172 {
2173         int err;
2174
2175         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2176
2177         err = proto_register(&hci_sk_proto, 0);
2178         if (err < 0)
2179                 return err;
2180
2181         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2182         if (err < 0) {
2183                 BT_ERR("HCI socket registration failed");
2184                 goto error;
2185         }
2186
2187         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2188         if (err < 0) {
2189                 BT_ERR("Failed to create HCI proc file");
2190                 bt_sock_unregister(BTPROTO_HCI);
2191                 goto error;
2192         }
2193
2194         BT_INFO("HCI socket layer initialized");
2195
2196         return 0;
2197
2198 error:
2199         proto_unregister(&hci_sk_proto);
2200         return err;
2201 }
2202
2203 void hci_sock_cleanup(void)
2204 {
2205         bt_procfs_cleanup(&init_net, "hci");
2206         bt_sock_unregister(BTPROTO_HCI);
2207         proto_unregister(&hci_sk_proto);
2208 }