GNU Linux-libre 4.14.324-gnu1
[releases.git] / net / vmw_vsock / vmci_transport.c
1 /*
2  * VMware vSockets Driver
3  *
4  * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation version 2 and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  */
15
16 #include <linux/types.h>
17 #include <linux/bitops.h>
18 #include <linux/cred.h>
19 #include <linux/init.h>
20 #include <linux/io.h>
21 #include <linux/kernel.h>
22 #include <linux/kmod.h>
23 #include <linux/list.h>
24 #include <linux/miscdevice.h>
25 #include <linux/module.h>
26 #include <linux/mutex.h>
27 #include <linux/net.h>
28 #include <linux/poll.h>
29 #include <linux/skbuff.h>
30 #include <linux/smp.h>
31 #include <linux/socket.h>
32 #include <linux/stddef.h>
33 #include <linux/unistd.h>
34 #include <linux/wait.h>
35 #include <linux/workqueue.h>
36 #include <net/sock.h>
37 #include <net/af_vsock.h>
38
39 #include "vmci_transport_notify.h"
40
41 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
42 static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
43 static void vmci_transport_peer_detach_cb(u32 sub_id,
44                                           const struct vmci_event_data *ed,
45                                           void *client_data);
46 static void vmci_transport_recv_pkt_work(struct work_struct *work);
47 static void vmci_transport_cleanup(struct work_struct *work);
48 static int vmci_transport_recv_listen(struct sock *sk,
49                                       struct vmci_transport_packet *pkt);
50 static int vmci_transport_recv_connecting_server(
51                                         struct sock *sk,
52                                         struct sock *pending,
53                                         struct vmci_transport_packet *pkt);
54 static int vmci_transport_recv_connecting_client(
55                                         struct sock *sk,
56                                         struct vmci_transport_packet *pkt);
57 static int vmci_transport_recv_connecting_client_negotiate(
58                                         struct sock *sk,
59                                         struct vmci_transport_packet *pkt);
60 static int vmci_transport_recv_connecting_client_invalid(
61                                         struct sock *sk,
62                                         struct vmci_transport_packet *pkt);
63 static int vmci_transport_recv_connected(struct sock *sk,
64                                          struct vmci_transport_packet *pkt);
65 static bool vmci_transport_old_proto_override(bool *old_pkt_proto);
66 static u16 vmci_transport_new_proto_supported_versions(void);
67 static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
68                                                   bool old_pkt_proto);
69
70 struct vmci_transport_recv_pkt_info {
71         struct work_struct work;
72         struct sock *sk;
73         struct vmci_transport_packet pkt;
74 };
75
76 static LIST_HEAD(vmci_transport_cleanup_list);
77 static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
78 static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
79
80 static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
81                                                            VMCI_INVALID_ID };
82 static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
83
84 static int PROTOCOL_OVERRIDE = -1;
85
86 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN   128
87 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE       262144
88 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX   262144
89
90 /* The default peer timeout indicates how long we will wait for a peer response
91  * to a control message.
92  */
93 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
94
95 /* Helper function to convert from a VMCI error code to a VSock error code. */
96
97 static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
98 {
99         switch (vmci_error) {
100         case VMCI_ERROR_NO_MEM:
101                 return -ENOMEM;
102         case VMCI_ERROR_DUPLICATE_ENTRY:
103         case VMCI_ERROR_ALREADY_EXISTS:
104                 return -EADDRINUSE;
105         case VMCI_ERROR_NO_ACCESS:
106                 return -EPERM;
107         case VMCI_ERROR_NO_RESOURCES:
108                 return -ENOBUFS;
109         case VMCI_ERROR_INVALID_RESOURCE:
110                 return -EHOSTUNREACH;
111         case VMCI_ERROR_INVALID_ARGS:
112         default:
113                 break;
114         }
115         return -EINVAL;
116 }
117
118 static u32 vmci_transport_peer_rid(u32 peer_cid)
119 {
120         if (VMADDR_CID_HYPERVISOR == peer_cid)
121                 return VMCI_TRANSPORT_HYPERVISOR_PACKET_RID;
122
123         return VMCI_TRANSPORT_PACKET_RID;
124 }
125
126 static inline void
127 vmci_transport_packet_init(struct vmci_transport_packet *pkt,
128                            struct sockaddr_vm *src,
129                            struct sockaddr_vm *dst,
130                            u8 type,
131                            u64 size,
132                            u64 mode,
133                            struct vmci_transport_waiting_info *wait,
134                            u16 proto,
135                            struct vmci_handle handle)
136 {
137         /* We register the stream control handler as an any cid handle so we
138          * must always send from a source address of VMADDR_CID_ANY
139          */
140         pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY,
141                                        VMCI_TRANSPORT_PACKET_RID);
142         pkt->dg.dst = vmci_make_handle(dst->svm_cid,
143                                        vmci_transport_peer_rid(dst->svm_cid));
144         pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg);
145         pkt->version = VMCI_TRANSPORT_PACKET_VERSION;
146         pkt->type = type;
147         pkt->src_port = src->svm_port;
148         pkt->dst_port = dst->svm_port;
149         memset(&pkt->proto, 0, sizeof(pkt->proto));
150         memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
151
152         switch (pkt->type) {
153         case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
154                 pkt->u.size = 0;
155                 break;
156
157         case VMCI_TRANSPORT_PACKET_TYPE_REQUEST:
158         case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
159                 pkt->u.size = size;
160                 break;
161
162         case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
163         case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
164                 pkt->u.handle = handle;
165                 break;
166
167         case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
168         case VMCI_TRANSPORT_PACKET_TYPE_READ:
169         case VMCI_TRANSPORT_PACKET_TYPE_RST:
170                 pkt->u.size = 0;
171                 break;
172
173         case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
174                 pkt->u.mode = mode;
175                 break;
176
177         case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
178         case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
179                 memcpy(&pkt->u.wait, wait, sizeof(pkt->u.wait));
180                 break;
181
182         case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2:
183         case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
184                 pkt->u.size = size;
185                 pkt->proto = proto;
186                 break;
187         }
188 }
189
190 static inline void
191 vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt,
192                                     struct sockaddr_vm *local,
193                                     struct sockaddr_vm *remote)
194 {
195         vsock_addr_init(local, pkt->dg.dst.context, pkt->dst_port);
196         vsock_addr_init(remote, pkt->dg.src.context, pkt->src_port);
197 }
198
199 static int
200 __vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt,
201                                   struct sockaddr_vm *src,
202                                   struct sockaddr_vm *dst,
203                                   enum vmci_transport_packet_type type,
204                                   u64 size,
205                                   u64 mode,
206                                   struct vmci_transport_waiting_info *wait,
207                                   u16 proto,
208                                   struct vmci_handle handle,
209                                   bool convert_error)
210 {
211         int err;
212
213         vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait,
214                                    proto, handle);
215         err = vmci_datagram_send(&pkt->dg);
216         if (convert_error && (err < 0))
217                 return vmci_transport_error_to_vsock_error(err);
218
219         return err;
220 }
221
222 static int
223 vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt,
224                                       enum vmci_transport_packet_type type,
225                                       u64 size,
226                                       u64 mode,
227                                       struct vmci_transport_waiting_info *wait,
228                                       struct vmci_handle handle)
229 {
230         struct vmci_transport_packet reply;
231         struct sockaddr_vm src, dst;
232
233         if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) {
234                 return 0;
235         } else {
236                 vmci_transport_packet_get_addresses(pkt, &src, &dst);
237                 return __vmci_transport_send_control_pkt(&reply, &src, &dst,
238                                                          type,
239                                                          size, mode, wait,
240                                                          VSOCK_PROTO_INVALID,
241                                                          handle, true);
242         }
243 }
244
245 static int
246 vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
247                                    struct sockaddr_vm *dst,
248                                    enum vmci_transport_packet_type type,
249                                    u64 size,
250                                    u64 mode,
251                                    struct vmci_transport_waiting_info *wait,
252                                    struct vmci_handle handle)
253 {
254         /* Note that it is safe to use a single packet across all CPUs since
255          * two tasklets of the same type are guaranteed to not ever run
256          * simultaneously. If that ever changes, or VMCI stops using tasklets,
257          * we can use per-cpu packets.
258          */
259         static struct vmci_transport_packet pkt;
260
261         return __vmci_transport_send_control_pkt(&pkt, src, dst, type,
262                                                  size, mode, wait,
263                                                  VSOCK_PROTO_INVALID, handle,
264                                                  false);
265 }
266
267 static int
268 vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
269                                       struct sockaddr_vm *dst,
270                                       enum vmci_transport_packet_type type,
271                                       u64 size,
272                                       u64 mode,
273                                       struct vmci_transport_waiting_info *wait,
274                                       u16 proto,
275                                       struct vmci_handle handle)
276 {
277         struct vmci_transport_packet *pkt;
278         int err;
279
280         pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
281         if (!pkt)
282                 return -ENOMEM;
283
284         err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
285                                                 mode, wait, proto, handle,
286                                                 true);
287         kfree(pkt);
288
289         return err;
290 }
291
292 static int
293 vmci_transport_send_control_pkt(struct sock *sk,
294                                 enum vmci_transport_packet_type type,
295                                 u64 size,
296                                 u64 mode,
297                                 struct vmci_transport_waiting_info *wait,
298                                 u16 proto,
299                                 struct vmci_handle handle)
300 {
301         struct vsock_sock *vsk;
302
303         vsk = vsock_sk(sk);
304
305         if (!vsock_addr_bound(&vsk->local_addr))
306                 return -EINVAL;
307
308         if (!vsock_addr_bound(&vsk->remote_addr))
309                 return -EINVAL;
310
311         return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
312                                                      &vsk->remote_addr,
313                                                      type, size, mode,
314                                                      wait, proto, handle);
315 }
316
317 static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
318                                         struct sockaddr_vm *src,
319                                         struct vmci_transport_packet *pkt)
320 {
321         if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
322                 return 0;
323         return vmci_transport_send_control_pkt_bh(
324                                         dst, src,
325                                         VMCI_TRANSPORT_PACKET_TYPE_RST, 0,
326                                         0, NULL, VMCI_INVALID_HANDLE);
327 }
328
329 static int vmci_transport_send_reset(struct sock *sk,
330                                      struct vmci_transport_packet *pkt)
331 {
332         struct sockaddr_vm *dst_ptr;
333         struct sockaddr_vm dst;
334         struct vsock_sock *vsk;
335
336         if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
337                 return 0;
338
339         vsk = vsock_sk(sk);
340
341         if (!vsock_addr_bound(&vsk->local_addr))
342                 return -EINVAL;
343
344         if (vsock_addr_bound(&vsk->remote_addr)) {
345                 dst_ptr = &vsk->remote_addr;
346         } else {
347                 vsock_addr_init(&dst, pkt->dg.src.context,
348                                 pkt->src_port);
349                 dst_ptr = &dst;
350         }
351         return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
352                                              VMCI_TRANSPORT_PACKET_TYPE_RST,
353                                              0, 0, NULL, VSOCK_PROTO_INVALID,
354                                              VMCI_INVALID_HANDLE);
355 }
356
357 static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
358 {
359         return vmci_transport_send_control_pkt(
360                                         sk,
361                                         VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
362                                         size, 0, NULL,
363                                         VSOCK_PROTO_INVALID,
364                                         VMCI_INVALID_HANDLE);
365 }
366
367 static int vmci_transport_send_negotiate2(struct sock *sk, size_t size,
368                                           u16 version)
369 {
370         return vmci_transport_send_control_pkt(
371                                         sk,
372                                         VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
373                                         size, 0, NULL, version,
374                                         VMCI_INVALID_HANDLE);
375 }
376
377 static int vmci_transport_send_qp_offer(struct sock *sk,
378                                         struct vmci_handle handle)
379 {
380         return vmci_transport_send_control_pkt(
381                                         sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0,
382                                         0, NULL,
383                                         VSOCK_PROTO_INVALID, handle);
384 }
385
386 static int vmci_transport_send_attach(struct sock *sk,
387                                       struct vmci_handle handle)
388 {
389         return vmci_transport_send_control_pkt(
390                                         sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
391                                         0, 0, NULL, VSOCK_PROTO_INVALID,
392                                         handle);
393 }
394
395 static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt)
396 {
397         return vmci_transport_reply_control_pkt_fast(
398                                                 pkt,
399                                                 VMCI_TRANSPORT_PACKET_TYPE_RST,
400                                                 0, 0, NULL,
401                                                 VMCI_INVALID_HANDLE);
402 }
403
404 static int vmci_transport_send_invalid_bh(struct sockaddr_vm *dst,
405                                           struct sockaddr_vm *src)
406 {
407         return vmci_transport_send_control_pkt_bh(
408                                         dst, src,
409                                         VMCI_TRANSPORT_PACKET_TYPE_INVALID,
410                                         0, 0, NULL, VMCI_INVALID_HANDLE);
411 }
412
413 int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
414                                  struct sockaddr_vm *src)
415 {
416         return vmci_transport_send_control_pkt_bh(
417                                         dst, src,
418                                         VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
419                                         0, NULL, VMCI_INVALID_HANDLE);
420 }
421
422 int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
423                                 struct sockaddr_vm *src)
424 {
425         return vmci_transport_send_control_pkt_bh(
426                                         dst, src,
427                                         VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
428                                         0, NULL, VMCI_INVALID_HANDLE);
429 }
430
431 int vmci_transport_send_wrote(struct sock *sk)
432 {
433         return vmci_transport_send_control_pkt(
434                                         sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
435                                         0, NULL, VSOCK_PROTO_INVALID,
436                                         VMCI_INVALID_HANDLE);
437 }
438
439 int vmci_transport_send_read(struct sock *sk)
440 {
441         return vmci_transport_send_control_pkt(
442                                         sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
443                                         0, NULL, VSOCK_PROTO_INVALID,
444                                         VMCI_INVALID_HANDLE);
445 }
446
447 int vmci_transport_send_waiting_write(struct sock *sk,
448                                       struct vmci_transport_waiting_info *wait)
449 {
450         return vmci_transport_send_control_pkt(
451                                 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
452                                 0, 0, wait, VSOCK_PROTO_INVALID,
453                                 VMCI_INVALID_HANDLE);
454 }
455
456 int vmci_transport_send_waiting_read(struct sock *sk,
457                                      struct vmci_transport_waiting_info *wait)
458 {
459         return vmci_transport_send_control_pkt(
460                                 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
461                                 0, 0, wait, VSOCK_PROTO_INVALID,
462                                 VMCI_INVALID_HANDLE);
463 }
464
465 static int vmci_transport_shutdown(struct vsock_sock *vsk, int mode)
466 {
467         return vmci_transport_send_control_pkt(
468                                         &vsk->sk,
469                                         VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
470                                         0, mode, NULL,
471                                         VSOCK_PROTO_INVALID,
472                                         VMCI_INVALID_HANDLE);
473 }
474
475 static int vmci_transport_send_conn_request(struct sock *sk, size_t size)
476 {
477         return vmci_transport_send_control_pkt(sk,
478                                         VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
479                                         size, 0, NULL,
480                                         VSOCK_PROTO_INVALID,
481                                         VMCI_INVALID_HANDLE);
482 }
483
484 static int vmci_transport_send_conn_request2(struct sock *sk, size_t size,
485                                              u16 version)
486 {
487         return vmci_transport_send_control_pkt(
488                                         sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
489                                         size, 0, NULL, version,
490                                         VMCI_INVALID_HANDLE);
491 }
492
493 static struct sock *vmci_transport_get_pending(
494                                         struct sock *listener,
495                                         struct vmci_transport_packet *pkt)
496 {
497         struct vsock_sock *vlistener;
498         struct vsock_sock *vpending;
499         struct sock *pending;
500         struct sockaddr_vm src;
501
502         vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
503
504         vlistener = vsock_sk(listener);
505
506         list_for_each_entry(vpending, &vlistener->pending_links,
507                             pending_links) {
508                 if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
509                     pkt->dst_port == vpending->local_addr.svm_port) {
510                         pending = sk_vsock(vpending);
511                         sock_hold(pending);
512                         goto found;
513                 }
514         }
515
516         pending = NULL;
517 found:
518         return pending;
519
520 }
521
522 static void vmci_transport_release_pending(struct sock *pending)
523 {
524         sock_put(pending);
525 }
526
527 /* We allow two kinds of sockets to communicate with a restricted VM: 1)
528  * trusted sockets 2) sockets from applications running as the same user as the
529  * VM (this is only true for the host side and only when using hosted products)
530  */
531
532 static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid)
533 {
534         return vsock->trusted ||
535                vmci_is_context_owner(peer_cid, vsock->owner->uid);
536 }
537
538 /* We allow sending datagrams to and receiving datagrams from a restricted VM
539  * only if it is trusted as described in vmci_transport_is_trusted.
540  */
541
542 static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid)
543 {
544         if (VMADDR_CID_HYPERVISOR == peer_cid)
545                 return true;
546
547         if (vsock->cached_peer != peer_cid) {
548                 vsock->cached_peer = peer_cid;
549                 if (!vmci_transport_is_trusted(vsock, peer_cid) &&
550                     (vmci_context_get_priv_flags(peer_cid) &
551                      VMCI_PRIVILEGE_FLAG_RESTRICTED)) {
552                         vsock->cached_peer_allow_dgram = false;
553                 } else {
554                         vsock->cached_peer_allow_dgram = true;
555                 }
556         }
557
558         return vsock->cached_peer_allow_dgram;
559 }
560
561 static int
562 vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
563                                 struct vmci_handle *handle,
564                                 u64 produce_size,
565                                 u64 consume_size,
566                                 u32 peer, u32 flags, bool trusted)
567 {
568         int err = 0;
569
570         if (trusted) {
571                 /* Try to allocate our queue pair as trusted. This will only
572                  * work if vsock is running in the host.
573                  */
574
575                 err = vmci_qpair_alloc(qpair, handle, produce_size,
576                                        consume_size,
577                                        peer, flags,
578                                        VMCI_PRIVILEGE_FLAG_TRUSTED);
579                 if (err != VMCI_ERROR_NO_ACCESS)
580                         goto out;
581
582         }
583
584         err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size,
585                                peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
586 out:
587         if (err < 0) {
588                 pr_err_once("Could not attach to queue pair with %d\n", err);
589                 err = vmci_transport_error_to_vsock_error(err);
590         }
591
592         return err;
593 }
594
595 static int
596 vmci_transport_datagram_create_hnd(u32 resource_id,
597                                    u32 flags,
598                                    vmci_datagram_recv_cb recv_cb,
599                                    void *client_data,
600                                    struct vmci_handle *out_handle)
601 {
602         int err = 0;
603
604         /* Try to allocate our datagram handler as trusted. This will only work
605          * if vsock is running in the host.
606          */
607
608         err = vmci_datagram_create_handle_priv(resource_id, flags,
609                                                VMCI_PRIVILEGE_FLAG_TRUSTED,
610                                                recv_cb,
611                                                client_data, out_handle);
612
613         if (err == VMCI_ERROR_NO_ACCESS)
614                 err = vmci_datagram_create_handle(resource_id, flags,
615                                                   recv_cb, client_data,
616                                                   out_handle);
617
618         return err;
619 }
620
621 /* This is invoked as part of a tasklet that's scheduled when the VMCI
622  * interrupt fires.  This is run in bottom-half context and if it ever needs to
623  * sleep it should defer that work to a work queue.
624  */
625
626 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
627 {
628         struct sock *sk;
629         size_t size;
630         struct sk_buff *skb;
631         struct vsock_sock *vsk;
632
633         sk = (struct sock *)data;
634
635         /* This handler is privileged when this module is running on the host.
636          * We will get datagrams from all endpoints (even VMs that are in a
637          * restricted context). If we get one from a restricted context then
638          * the destination socket must be trusted.
639          *
640          * NOTE: We access the socket struct without holding the lock here.
641          * This is ok because the field we are interested is never modified
642          * outside of the create and destruct socket functions.
643          */
644         vsk = vsock_sk(sk);
645         if (!vmci_transport_allow_dgram(vsk, dg->src.context))
646                 return VMCI_ERROR_NO_ACCESS;
647
648         size = VMCI_DG_SIZE(dg);
649
650         /* Attach the packet to the socket's receive queue as an sk_buff. */
651         skb = alloc_skb(size, GFP_ATOMIC);
652         if (!skb)
653                 return VMCI_ERROR_NO_MEM;
654
655         /* sk_receive_skb() will do a sock_put(), so hold here. */
656         sock_hold(sk);
657         skb_put(skb, size);
658         memcpy(skb->data, dg, size);
659         sk_receive_skb(sk, skb, 0);
660
661         return VMCI_SUCCESS;
662 }
663
664 static bool vmci_transport_stream_allow(u32 cid, u32 port)
665 {
666         static const u32 non_socket_contexts[] = {
667                 VMADDR_CID_RESERVED,
668         };
669         int i;
670
671         BUILD_BUG_ON(sizeof(cid) != sizeof(*non_socket_contexts));
672
673         for (i = 0; i < ARRAY_SIZE(non_socket_contexts); i++) {
674                 if (cid == non_socket_contexts[i])
675                         return false;
676         }
677
678         return true;
679 }
680
681 /* This is invoked as part of a tasklet that's scheduled when the VMCI
682  * interrupt fires.  This is run in bottom-half context but it defers most of
683  * its work to the packet handling work queue.
684  */
685
686 static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
687 {
688         struct sock *sk;
689         struct sockaddr_vm dst;
690         struct sockaddr_vm src;
691         struct vmci_transport_packet *pkt;
692         struct vsock_sock *vsk;
693         bool bh_process_pkt;
694         int err;
695
696         sk = NULL;
697         err = VMCI_SUCCESS;
698         bh_process_pkt = false;
699
700         /* Ignore incoming packets from contexts without sockets, or resources
701          * that aren't vsock implementations.
702          */
703
704         if (!vmci_transport_stream_allow(dg->src.context, -1)
705             || vmci_transport_peer_rid(dg->src.context) != dg->src.resource)
706                 return VMCI_ERROR_NO_ACCESS;
707
708         if (VMCI_DG_SIZE(dg) < sizeof(*pkt))
709                 /* Drop datagrams that do not contain full VSock packets. */
710                 return VMCI_ERROR_INVALID_ARGS;
711
712         pkt = (struct vmci_transport_packet *)dg;
713
714         /* Find the socket that should handle this packet.  First we look for a
715          * connected socket and if there is none we look for a socket bound to
716          * the destintation address.
717          */
718         vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
719         vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
720
721         sk = vsock_find_connected_socket(&src, &dst);
722         if (!sk) {
723                 sk = vsock_find_bound_socket(&dst);
724                 if (!sk) {
725                         /* We could not find a socket for this specified
726                          * address.  If this packet is a RST, we just drop it.
727                          * If it is another packet, we send a RST.  Note that
728                          * we do not send a RST reply to RSTs so that we do not
729                          * continually send RSTs between two endpoints.
730                          *
731                          * Note that since this is a reply, dst is src and src
732                          * is dst.
733                          */
734                         if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
735                                 pr_err("unable to send reset\n");
736
737                         err = VMCI_ERROR_NOT_FOUND;
738                         goto out;
739                 }
740         }
741
742         /* If the received packet type is beyond all types known to this
743          * implementation, reply with an invalid message.  Hopefully this will
744          * help when implementing backwards compatibility in the future.
745          */
746         if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) {
747                 vmci_transport_send_invalid_bh(&dst, &src);
748                 err = VMCI_ERROR_INVALID_ARGS;
749                 goto out;
750         }
751
752         /* This handler is privileged when this module is running on the host.
753          * We will get datagram connect requests from all endpoints (even VMs
754          * that are in a restricted context). If we get one from a restricted
755          * context then the destination socket must be trusted.
756          *
757          * NOTE: We access the socket struct without holding the lock here.
758          * This is ok because the field we are interested is never modified
759          * outside of the create and destruct socket functions.
760          */
761         vsk = vsock_sk(sk);
762         if (!vmci_transport_allow_dgram(vsk, pkt->dg.src.context)) {
763                 err = VMCI_ERROR_NO_ACCESS;
764                 goto out;
765         }
766
767         /* We do most everything in a work queue, but let's fast path the
768          * notification of reads and writes to help data transfer performance.
769          * We can only do this if there is no process context code executing
770          * for this socket since that may change the state.
771          */
772         bh_lock_sock(sk);
773
774         if (!sock_owned_by_user(sk)) {
775                 /* The local context ID may be out of date, update it. */
776                 vsk->local_addr.svm_cid = dst.svm_cid;
777
778                 if (sk->sk_state == TCP_ESTABLISHED)
779                         vmci_trans(vsk)->notify_ops->handle_notify_pkt(
780                                         sk, pkt, true, &dst, &src,
781                                         &bh_process_pkt);
782         }
783
784         bh_unlock_sock(sk);
785
786         if (!bh_process_pkt) {
787                 struct vmci_transport_recv_pkt_info *recv_pkt_info;
788
789                 recv_pkt_info = kmalloc(sizeof(*recv_pkt_info), GFP_ATOMIC);
790                 if (!recv_pkt_info) {
791                         if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
792                                 pr_err("unable to send reset\n");
793
794                         err = VMCI_ERROR_NO_MEM;
795                         goto out;
796                 }
797
798                 recv_pkt_info->sk = sk;
799                 memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt));
800                 INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work);
801
802                 schedule_work(&recv_pkt_info->work);
803                 /* Clear sk so that the reference count incremented by one of
804                  * the Find functions above is not decremented below.  We need
805                  * that reference count for the packet handler we've scheduled
806                  * to run.
807                  */
808                 sk = NULL;
809         }
810
811 out:
812         if (sk)
813                 sock_put(sk);
814
815         return err;
816 }
817
818 static void vmci_transport_handle_detach(struct sock *sk)
819 {
820         struct vsock_sock *vsk;
821
822         vsk = vsock_sk(sk);
823         if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
824                 sock_set_flag(sk, SOCK_DONE);
825
826                 /* On a detach the peer will not be sending or receiving
827                  * anymore.
828                  */
829                 vsk->peer_shutdown = SHUTDOWN_MASK;
830
831                 /* We should not be sending anymore since the peer won't be
832                  * there to receive, but we can still receive if there is data
833                  * left in our consume queue.
834                  */
835                 if (vsock_stream_has_data(vsk) <= 0) {
836                         sk->sk_state = TCP_CLOSE;
837
838                         if (sk->sk_state == TCP_SYN_SENT) {
839                                 /* The peer may detach from a queue pair while
840                                  * we are still in the connecting state, i.e.,
841                                  * if the peer VM is killed after attaching to
842                                  * a queue pair, but before we complete the
843                                  * handshake. In that case, we treat the detach
844                                  * event like a reset.
845                                  */
846
847                                 sk->sk_err = ECONNRESET;
848                                 sk->sk_error_report(sk);
849                                 return;
850                         }
851                 }
852                 sk->sk_state_change(sk);
853         }
854 }
855
856 static void vmci_transport_peer_detach_cb(u32 sub_id,
857                                           const struct vmci_event_data *e_data,
858                                           void *client_data)
859 {
860         struct vmci_transport *trans = client_data;
861         const struct vmci_event_payload_qp *e_payload;
862
863         e_payload = vmci_event_data_const_payload(e_data);
864
865         /* XXX This is lame, we should provide a way to lookup sockets by
866          * qp_handle.
867          */
868         if (vmci_handle_is_invalid(e_payload->handle) ||
869             !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
870                 return;
871
872         /* We don't ask for delayed CBs when we subscribe to this event (we
873          * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
874          * guarantees in that case about what context we might be running in,
875          * so it could be BH or process, blockable or non-blockable.  So we
876          * need to account for all possible contexts here.
877          */
878         spin_lock_bh(&trans->lock);
879         if (!trans->sk)
880                 goto out;
881
882         /* Apart from here, trans->lock is only grabbed as part of sk destruct,
883          * where trans->sk isn't locked.
884          */
885         bh_lock_sock(trans->sk);
886
887         vmci_transport_handle_detach(trans->sk);
888
889         bh_unlock_sock(trans->sk);
890  out:
891         spin_unlock_bh(&trans->lock);
892 }
893
894 static void vmci_transport_qp_resumed_cb(u32 sub_id,
895                                          const struct vmci_event_data *e_data,
896                                          void *client_data)
897 {
898         vsock_for_each_connected_socket(vmci_transport_handle_detach);
899 }
900
901 static void vmci_transport_recv_pkt_work(struct work_struct *work)
902 {
903         struct vmci_transport_recv_pkt_info *recv_pkt_info;
904         struct vmci_transport_packet *pkt;
905         struct sock *sk;
906
907         recv_pkt_info =
908                 container_of(work, struct vmci_transport_recv_pkt_info, work);
909         sk = recv_pkt_info->sk;
910         pkt = &recv_pkt_info->pkt;
911
912         lock_sock(sk);
913
914         /* The local context ID may be out of date. */
915         vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
916
917         switch (sk->sk_state) {
918         case TCP_LISTEN:
919                 vmci_transport_recv_listen(sk, pkt);
920                 break;
921         case TCP_SYN_SENT:
922                 /* Processing of pending connections for servers goes through
923                  * the listening socket, so see vmci_transport_recv_listen()
924                  * for that path.
925                  */
926                 vmci_transport_recv_connecting_client(sk, pkt);
927                 break;
928         case TCP_ESTABLISHED:
929                 vmci_transport_recv_connected(sk, pkt);
930                 break;
931         default:
932                 /* Because this function does not run in the same context as
933                  * vmci_transport_recv_stream_cb it is possible that the
934                  * socket has closed. We need to let the other side know or it
935                  * could be sitting in a connect and hang forever. Send a
936                  * reset to prevent that.
937                  */
938                 vmci_transport_send_reset(sk, pkt);
939                 break;
940         }
941
942         release_sock(sk);
943         kfree(recv_pkt_info);
944         /* Release reference obtained in the stream callback when we fetched
945          * this socket out of the bound or connected list.
946          */
947         sock_put(sk);
948 }
949
950 static int vmci_transport_recv_listen(struct sock *sk,
951                                       struct vmci_transport_packet *pkt)
952 {
953         struct sock *pending;
954         struct vsock_sock *vpending;
955         int err;
956         u64 qp_size;
957         bool old_request = false;
958         bool old_pkt_proto = false;
959
960         err = 0;
961
962         /* Because we are in the listen state, we could be receiving a packet
963          * for ourself or any previous connection requests that we received.
964          * If it's the latter, we try to find a socket in our list of pending
965          * connections and, if we do, call the appropriate handler for the
966          * state that that socket is in.  Otherwise we try to service the
967          * connection request.
968          */
969         pending = vmci_transport_get_pending(sk, pkt);
970         if (pending) {
971                 lock_sock(pending);
972
973                 /* The local context ID may be out of date. */
974                 vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
975
976                 switch (pending->sk_state) {
977                 case TCP_SYN_SENT:
978                         err = vmci_transport_recv_connecting_server(sk,
979                                                                     pending,
980                                                                     pkt);
981                         break;
982                 default:
983                         vmci_transport_send_reset(pending, pkt);
984                         err = -EINVAL;
985                 }
986
987                 if (err < 0)
988                         vsock_remove_pending(sk, pending);
989
990                 release_sock(pending);
991                 vmci_transport_release_pending(pending);
992
993                 return err;
994         }
995
996         /* The listen state only accepts connection requests.  Reply with a
997          * reset unless we received a reset.
998          */
999
1000         if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST ||
1001               pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) {
1002                 vmci_transport_reply_reset(pkt);
1003                 return -EINVAL;
1004         }
1005
1006         if (pkt->u.size == 0) {
1007                 vmci_transport_reply_reset(pkt);
1008                 return -EINVAL;
1009         }
1010
1011         /* If this socket can't accommodate this connection request, we send a
1012          * reset.  Otherwise we create and initialize a child socket and reply
1013          * with a connection negotiation.
1014          */
1015         if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
1016                 vmci_transport_reply_reset(pkt);
1017                 return -ECONNREFUSED;
1018         }
1019
1020         pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
1021                                  sk->sk_type, 0);
1022         if (!pending) {
1023                 vmci_transport_send_reset(sk, pkt);
1024                 return -ENOMEM;
1025         }
1026
1027         vpending = vsock_sk(pending);
1028
1029         vsock_addr_init(&vpending->local_addr, pkt->dg.dst.context,
1030                         pkt->dst_port);
1031         vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
1032                         pkt->src_port);
1033
1034         /* If the proposed size fits within our min/max, accept it. Otherwise
1035          * propose our own size.
1036          */
1037         if (pkt->u.size >= vmci_trans(vpending)->queue_pair_min_size &&
1038             pkt->u.size <= vmci_trans(vpending)->queue_pair_max_size) {
1039                 qp_size = pkt->u.size;
1040         } else {
1041                 qp_size = vmci_trans(vpending)->queue_pair_size;
1042         }
1043
1044         /* Figure out if we are using old or new requests based on the
1045          * overrides pkt types sent by our peer.
1046          */
1047         if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1048                 old_request = old_pkt_proto;
1049         } else {
1050                 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST)
1051                         old_request = true;
1052                 else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)
1053                         old_request = false;
1054
1055         }
1056
1057         if (old_request) {
1058                 /* Handle a REQUEST (or override) */
1059                 u16 version = VSOCK_PROTO_INVALID;
1060                 if (vmci_transport_proto_to_notify_struct(
1061                         pending, &version, true))
1062                         err = vmci_transport_send_negotiate(pending, qp_size);
1063                 else
1064                         err = -EINVAL;
1065
1066         } else {
1067                 /* Handle a REQUEST2 (or override) */
1068                 int proto_int = pkt->proto;
1069                 int pos;
1070                 u16 active_proto_version = 0;
1071
1072                 /* The list of possible protocols is the intersection of all
1073                  * protocols the client supports ... plus all the protocols we
1074                  * support.
1075                  */
1076                 proto_int &= vmci_transport_new_proto_supported_versions();
1077
1078                 /* We choose the highest possible protocol version and use that
1079                  * one.
1080                  */
1081                 pos = fls(proto_int);
1082                 if (pos) {
1083                         active_proto_version = (1 << (pos - 1));
1084                         if (vmci_transport_proto_to_notify_struct(
1085                                 pending, &active_proto_version, false))
1086                                 err = vmci_transport_send_negotiate2(pending,
1087                                                         qp_size,
1088                                                         active_proto_version);
1089                         else
1090                                 err = -EINVAL;
1091
1092                 } else {
1093                         err = -EINVAL;
1094                 }
1095         }
1096
1097         if (err < 0) {
1098                 vmci_transport_send_reset(sk, pkt);
1099                 sock_put(pending);
1100                 err = vmci_transport_error_to_vsock_error(err);
1101                 goto out;
1102         }
1103
1104         vsock_add_pending(sk, pending);
1105         sk->sk_ack_backlog++;
1106
1107         pending->sk_state = TCP_SYN_SENT;
1108         vmci_trans(vpending)->produce_size =
1109                 vmci_trans(vpending)->consume_size = qp_size;
1110         vmci_trans(vpending)->queue_pair_size = qp_size;
1111
1112         vmci_trans(vpending)->notify_ops->process_request(pending);
1113
1114         /* We might never receive another message for this socket and it's not
1115          * connected to any process, so we have to ensure it gets cleaned up
1116          * ourself.  Our delayed work function will take care of that.  Note
1117          * that we do not ever cancel this function since we have few
1118          * guarantees about its state when calling cancel_delayed_work().
1119          * Instead we hold a reference on the socket for that function and make
1120          * it capable of handling cases where it needs to do nothing but
1121          * release that reference.
1122          */
1123         vpending->listener = sk;
1124         sock_hold(sk);
1125         sock_hold(pending);
1126         schedule_delayed_work(&vpending->pending_work, HZ);
1127
1128 out:
1129         return err;
1130 }
1131
1132 static int
1133 vmci_transport_recv_connecting_server(struct sock *listener,
1134                                       struct sock *pending,
1135                                       struct vmci_transport_packet *pkt)
1136 {
1137         struct vsock_sock *vpending;
1138         struct vmci_handle handle;
1139         struct vmci_qp *qpair;
1140         bool is_local;
1141         u32 flags;
1142         u32 detach_sub_id;
1143         int err;
1144         int skerr;
1145
1146         vpending = vsock_sk(pending);
1147         detach_sub_id = VMCI_INVALID_ID;
1148
1149         switch (pkt->type) {
1150         case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
1151                 if (vmci_handle_is_invalid(pkt->u.handle)) {
1152                         vmci_transport_send_reset(pending, pkt);
1153                         skerr = EPROTO;
1154                         err = -EINVAL;
1155                         goto destroy;
1156                 }
1157                 break;
1158         default:
1159                 /* Close and cleanup the connection. */
1160                 vmci_transport_send_reset(pending, pkt);
1161                 skerr = EPROTO;
1162                 err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL;
1163                 goto destroy;
1164         }
1165
1166         /* In order to complete the connection we need to attach to the offered
1167          * queue pair and send an attach notification.  We also subscribe to the
1168          * detach event so we know when our peer goes away, and we do that
1169          * before attaching so we don't miss an event.  If all this succeeds,
1170          * we update our state and wakeup anything waiting in accept() for a
1171          * connection.
1172          */
1173
1174         /* We don't care about attach since we ensure the other side has
1175          * attached by specifying the ATTACH_ONLY flag below.
1176          */
1177         err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1178                                    vmci_transport_peer_detach_cb,
1179                                    vmci_trans(vpending), &detach_sub_id);
1180         if (err < VMCI_SUCCESS) {
1181                 vmci_transport_send_reset(pending, pkt);
1182                 err = vmci_transport_error_to_vsock_error(err);
1183                 skerr = -err;
1184                 goto destroy;
1185         }
1186
1187         vmci_trans(vpending)->detach_sub_id = detach_sub_id;
1188
1189         /* Now attach to the queue pair the client created. */
1190         handle = pkt->u.handle;
1191
1192         /* vpending->local_addr always has a context id so we do not need to
1193          * worry about VMADDR_CID_ANY in this case.
1194          */
1195         is_local =
1196             vpending->remote_addr.svm_cid == vpending->local_addr.svm_cid;
1197         flags = VMCI_QPFLAG_ATTACH_ONLY;
1198         flags |= is_local ? VMCI_QPFLAG_LOCAL : 0;
1199
1200         err = vmci_transport_queue_pair_alloc(
1201                                         &qpair,
1202                                         &handle,
1203                                         vmci_trans(vpending)->produce_size,
1204                                         vmci_trans(vpending)->consume_size,
1205                                         pkt->dg.src.context,
1206                                         flags,
1207                                         vmci_transport_is_trusted(
1208                                                 vpending,
1209                                                 vpending->remote_addr.svm_cid));
1210         if (err < 0) {
1211                 vmci_transport_send_reset(pending, pkt);
1212                 skerr = -err;
1213                 goto destroy;
1214         }
1215
1216         vmci_trans(vpending)->qp_handle = handle;
1217         vmci_trans(vpending)->qpair = qpair;
1218
1219         /* When we send the attach message, we must be ready to handle incoming
1220          * control messages on the newly connected socket. So we move the
1221          * pending socket to the connected state before sending the attach
1222          * message. Otherwise, an incoming packet triggered by the attach being
1223          * received by the peer may be processed concurrently with what happens
1224          * below after sending the attach message, and that incoming packet
1225          * will find the listening socket instead of the (currently) pending
1226          * socket. Note that enqueueing the socket increments the reference
1227          * count, so even if a reset comes before the connection is accepted,
1228          * the socket will be valid until it is removed from the queue.
1229          *
1230          * If we fail sending the attach below, we remove the socket from the
1231          * connected list and move the socket to TCP_CLOSE before
1232          * releasing the lock, so a pending slow path processing of an incoming
1233          * packet will not see the socket in the connected state in that case.
1234          */
1235         pending->sk_state = TCP_ESTABLISHED;
1236
1237         vsock_insert_connected(vpending);
1238
1239         /* Notify our peer of our attach. */
1240         err = vmci_transport_send_attach(pending, handle);
1241         if (err < 0) {
1242                 vsock_remove_connected(vpending);
1243                 pr_err("Could not send attach\n");
1244                 vmci_transport_send_reset(pending, pkt);
1245                 err = vmci_transport_error_to_vsock_error(err);
1246                 skerr = -err;
1247                 goto destroy;
1248         }
1249
1250         /* We have a connection. Move the now connected socket from the
1251          * listener's pending list to the accept queue so callers of accept()
1252          * can find it.
1253          */
1254         vsock_remove_pending(listener, pending);
1255         vsock_enqueue_accept(listener, pending);
1256
1257         /* Callers of accept() will be be waiting on the listening socket, not
1258          * the pending socket.
1259          */
1260         listener->sk_data_ready(listener);
1261
1262         return 0;
1263
1264 destroy:
1265         pending->sk_err = skerr;
1266         pending->sk_state = TCP_CLOSE;
1267         /* As long as we drop our reference, all necessary cleanup will handle
1268          * when the cleanup function drops its reference and our destruct
1269          * implementation is called.  Note that since the listen handler will
1270          * remove pending from the pending list upon our failure, the cleanup
1271          * function won't drop the additional reference, which is why we do it
1272          * here.
1273          */
1274         sock_put(pending);
1275
1276         return err;
1277 }
1278
1279 static int
1280 vmci_transport_recv_connecting_client(struct sock *sk,
1281                                       struct vmci_transport_packet *pkt)
1282 {
1283         struct vsock_sock *vsk;
1284         int err;
1285         int skerr;
1286
1287         vsk = vsock_sk(sk);
1288
1289         switch (pkt->type) {
1290         case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
1291                 if (vmci_handle_is_invalid(pkt->u.handle) ||
1292                     !vmci_handle_is_equal(pkt->u.handle,
1293                                           vmci_trans(vsk)->qp_handle)) {
1294                         skerr = EPROTO;
1295                         err = -EINVAL;
1296                         goto destroy;
1297                 }
1298
1299                 /* Signify the socket is connected and wakeup the waiter in
1300                  * connect(). Also place the socket in the connected table for
1301                  * accounting (it can already be found since it's in the bound
1302                  * table).
1303                  */
1304                 sk->sk_state = TCP_ESTABLISHED;
1305                 sk->sk_socket->state = SS_CONNECTED;
1306                 vsock_insert_connected(vsk);
1307                 sk->sk_state_change(sk);
1308
1309                 break;
1310         case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
1311         case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
1312                 if (pkt->u.size == 0
1313                     || pkt->dg.src.context != vsk->remote_addr.svm_cid
1314                     || pkt->src_port != vsk->remote_addr.svm_port
1315                     || !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)
1316                     || vmci_trans(vsk)->qpair
1317                     || vmci_trans(vsk)->produce_size != 0
1318                     || vmci_trans(vsk)->consume_size != 0
1319                     || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
1320                         skerr = EPROTO;
1321                         err = -EINVAL;
1322
1323                         goto destroy;
1324                 }
1325
1326                 err = vmci_transport_recv_connecting_client_negotiate(sk, pkt);
1327                 if (err) {
1328                         skerr = -err;
1329                         goto destroy;
1330                 }
1331
1332                 break;
1333         case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
1334                 err = vmci_transport_recv_connecting_client_invalid(sk, pkt);
1335                 if (err) {
1336                         skerr = -err;
1337                         goto destroy;
1338                 }
1339
1340                 break;
1341         case VMCI_TRANSPORT_PACKET_TYPE_RST:
1342                 /* Older versions of the linux code (WS 6.5 / ESX 4.0) used to
1343                  * continue processing here after they sent an INVALID packet.
1344                  * This meant that we got a RST after the INVALID. We ignore a
1345                  * RST after an INVALID. The common code doesn't send the RST
1346                  * ... so we can hang if an old version of the common code
1347                  * fails between getting a REQUEST and sending an OFFER back.
1348                  * Not much we can do about it... except hope that it doesn't
1349                  * happen.
1350                  */
1351                 if (vsk->ignore_connecting_rst) {
1352                         vsk->ignore_connecting_rst = false;
1353                 } else {
1354                         skerr = ECONNRESET;
1355                         err = 0;
1356                         goto destroy;
1357                 }
1358
1359                 break;
1360         default:
1361                 /* Close and cleanup the connection. */
1362                 skerr = EPROTO;
1363                 err = -EINVAL;
1364                 goto destroy;
1365         }
1366
1367         return 0;
1368
1369 destroy:
1370         vmci_transport_send_reset(sk, pkt);
1371
1372         sk->sk_state = TCP_CLOSE;
1373         sk->sk_err = skerr;
1374         sk->sk_error_report(sk);
1375         return err;
1376 }
1377
1378 static int vmci_transport_recv_connecting_client_negotiate(
1379                                         struct sock *sk,
1380                                         struct vmci_transport_packet *pkt)
1381 {
1382         int err;
1383         struct vsock_sock *vsk;
1384         struct vmci_handle handle;
1385         struct vmci_qp *qpair;
1386         u32 detach_sub_id;
1387         bool is_local;
1388         u32 flags;
1389         bool old_proto = true;
1390         bool old_pkt_proto;
1391         u16 version;
1392
1393         vsk = vsock_sk(sk);
1394         handle = VMCI_INVALID_HANDLE;
1395         detach_sub_id = VMCI_INVALID_ID;
1396
1397         /* If we have gotten here then we should be past the point where old
1398          * linux vsock could have sent the bogus rst.
1399          */
1400         vsk->sent_request = false;
1401         vsk->ignore_connecting_rst = false;
1402
1403         /* Verify that we're OK with the proposed queue pair size */
1404         if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size ||
1405             pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) {
1406                 err = -EINVAL;
1407                 goto destroy;
1408         }
1409
1410         /* At this point we know the CID the peer is using to talk to us. */
1411
1412         if (vsk->local_addr.svm_cid == VMADDR_CID_ANY)
1413                 vsk->local_addr.svm_cid = pkt->dg.dst.context;
1414
1415         /* Setup the notify ops to be the highest supported version that both
1416          * the server and the client support.
1417          */
1418
1419         if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1420                 old_proto = old_pkt_proto;
1421         } else {
1422                 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE)
1423                         old_proto = true;
1424                 else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2)
1425                         old_proto = false;
1426
1427         }
1428
1429         if (old_proto)
1430                 version = VSOCK_PROTO_INVALID;
1431         else
1432                 version = pkt->proto;
1433
1434         if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) {
1435                 err = -EINVAL;
1436                 goto destroy;
1437         }
1438
1439         /* Subscribe to detach events first.
1440          *
1441          * XXX We attach once for each queue pair created for now so it is easy
1442          * to find the socket (it's provided), but later we should only
1443          * subscribe once and add a way to lookup sockets by queue pair handle.
1444          */
1445         err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1446                                    vmci_transport_peer_detach_cb,
1447                                    vmci_trans(vsk), &detach_sub_id);
1448         if (err < VMCI_SUCCESS) {
1449                 err = vmci_transport_error_to_vsock_error(err);
1450                 goto destroy;
1451         }
1452
1453         /* Make VMCI select the handle for us. */
1454         handle = VMCI_INVALID_HANDLE;
1455         is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid;
1456         flags = is_local ? VMCI_QPFLAG_LOCAL : 0;
1457
1458         err = vmci_transport_queue_pair_alloc(&qpair,
1459                                               &handle,
1460                                               pkt->u.size,
1461                                               pkt->u.size,
1462                                               vsk->remote_addr.svm_cid,
1463                                               flags,
1464                                               vmci_transport_is_trusted(
1465                                                   vsk,
1466                                                   vsk->
1467                                                   remote_addr.svm_cid));
1468         if (err < 0)
1469                 goto destroy;
1470
1471         err = vmci_transport_send_qp_offer(sk, handle);
1472         if (err < 0) {
1473                 err = vmci_transport_error_to_vsock_error(err);
1474                 goto destroy;
1475         }
1476
1477         vmci_trans(vsk)->qp_handle = handle;
1478         vmci_trans(vsk)->qpair = qpair;
1479
1480         vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
1481                 pkt->u.size;
1482
1483         vmci_trans(vsk)->detach_sub_id = detach_sub_id;
1484
1485         vmci_trans(vsk)->notify_ops->process_negotiate(sk);
1486
1487         return 0;
1488
1489 destroy:
1490         if (detach_sub_id != VMCI_INVALID_ID)
1491                 vmci_event_unsubscribe(detach_sub_id);
1492
1493         if (!vmci_handle_is_invalid(handle))
1494                 vmci_qpair_detach(&qpair);
1495
1496         return err;
1497 }
1498
1499 static int
1500 vmci_transport_recv_connecting_client_invalid(struct sock *sk,
1501                                               struct vmci_transport_packet *pkt)
1502 {
1503         int err = 0;
1504         struct vsock_sock *vsk = vsock_sk(sk);
1505
1506         if (vsk->sent_request) {
1507                 vsk->sent_request = false;
1508                 vsk->ignore_connecting_rst = true;
1509
1510                 err = vmci_transport_send_conn_request(
1511                         sk, vmci_trans(vsk)->queue_pair_size);
1512                 if (err < 0)
1513                         err = vmci_transport_error_to_vsock_error(err);
1514                 else
1515                         err = 0;
1516
1517         }
1518
1519         return err;
1520 }
1521
1522 static int vmci_transport_recv_connected(struct sock *sk,
1523                                          struct vmci_transport_packet *pkt)
1524 {
1525         struct vsock_sock *vsk;
1526         bool pkt_processed = false;
1527
1528         /* In cases where we are closing the connection, it's sufficient to
1529          * mark the state change (and maybe error) and wake up any waiting
1530          * threads. Since this is a connected socket, it's owned by a user
1531          * process and will be cleaned up when the failure is passed back on
1532          * the current or next system call.  Our system call implementations
1533          * must therefore check for error and state changes on entry and when
1534          * being awoken.
1535          */
1536         switch (pkt->type) {
1537         case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
1538                 if (pkt->u.mode) {
1539                         vsk = vsock_sk(sk);
1540
1541                         vsk->peer_shutdown |= pkt->u.mode;
1542                         sk->sk_state_change(sk);
1543                 }
1544                 break;
1545
1546         case VMCI_TRANSPORT_PACKET_TYPE_RST:
1547                 vsk = vsock_sk(sk);
1548                 /* It is possible that we sent our peer a message (e.g a
1549                  * WAITING_READ) right before we got notified that the peer had
1550                  * detached. If that happens then we can get a RST pkt back
1551                  * from our peer even though there is data available for us to
1552                  * read. In that case, don't shutdown the socket completely but
1553                  * instead allow the local client to finish reading data off
1554                  * the queuepair. Always treat a RST pkt in connected mode like
1555                  * a clean shutdown.
1556                  */
1557                 sock_set_flag(sk, SOCK_DONE);
1558                 vsk->peer_shutdown = SHUTDOWN_MASK;
1559                 if (vsock_stream_has_data(vsk) <= 0)
1560                         sk->sk_state = TCP_CLOSING;
1561
1562                 sk->sk_state_change(sk);
1563                 break;
1564
1565         default:
1566                 vsk = vsock_sk(sk);
1567                 vmci_trans(vsk)->notify_ops->handle_notify_pkt(
1568                                 sk, pkt, false, NULL, NULL,
1569                                 &pkt_processed);
1570                 if (!pkt_processed)
1571                         return -EINVAL;
1572
1573                 break;
1574         }
1575
1576         return 0;
1577 }
1578
1579 static int vmci_transport_socket_init(struct vsock_sock *vsk,
1580                                       struct vsock_sock *psk)
1581 {
1582         vsk->trans = kmalloc(sizeof(struct vmci_transport), GFP_KERNEL);
1583         if (!vsk->trans)
1584                 return -ENOMEM;
1585
1586         vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1587         vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
1588         vmci_trans(vsk)->qpair = NULL;
1589         vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
1590         vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
1591         vmci_trans(vsk)->notify_ops = NULL;
1592         INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
1593         vmci_trans(vsk)->sk = &vsk->sk;
1594         spin_lock_init(&vmci_trans(vsk)->lock);
1595         if (psk) {
1596                 vmci_trans(vsk)->queue_pair_size =
1597                         vmci_trans(psk)->queue_pair_size;
1598                 vmci_trans(vsk)->queue_pair_min_size =
1599                         vmci_trans(psk)->queue_pair_min_size;
1600                 vmci_trans(vsk)->queue_pair_max_size =
1601                         vmci_trans(psk)->queue_pair_max_size;
1602         } else {
1603                 vmci_trans(vsk)->queue_pair_size =
1604                         VMCI_TRANSPORT_DEFAULT_QP_SIZE;
1605                 vmci_trans(vsk)->queue_pair_min_size =
1606                          VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN;
1607                 vmci_trans(vsk)->queue_pair_max_size =
1608                         VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX;
1609         }
1610
1611         return 0;
1612 }
1613
1614 static void vmci_transport_free_resources(struct list_head *transport_list)
1615 {
1616         while (!list_empty(transport_list)) {
1617                 struct vmci_transport *transport =
1618                     list_first_entry(transport_list, struct vmci_transport,
1619                                      elem);
1620                 list_del(&transport->elem);
1621
1622                 if (transport->detach_sub_id != VMCI_INVALID_ID) {
1623                         vmci_event_unsubscribe(transport->detach_sub_id);
1624                         transport->detach_sub_id = VMCI_INVALID_ID;
1625                 }
1626
1627                 if (!vmci_handle_is_invalid(transport->qp_handle)) {
1628                         vmci_qpair_detach(&transport->qpair);
1629                         transport->qp_handle = VMCI_INVALID_HANDLE;
1630                         transport->produce_size = 0;
1631                         transport->consume_size = 0;
1632                 }
1633
1634                 kfree(transport);
1635         }
1636 }
1637
1638 static void vmci_transport_cleanup(struct work_struct *work)
1639 {
1640         LIST_HEAD(pending);
1641
1642         spin_lock_bh(&vmci_transport_cleanup_lock);
1643         list_replace_init(&vmci_transport_cleanup_list, &pending);
1644         spin_unlock_bh(&vmci_transport_cleanup_lock);
1645         vmci_transport_free_resources(&pending);
1646 }
1647
1648 static void vmci_transport_destruct(struct vsock_sock *vsk)
1649 {
1650         /* transport can be NULL if we hit a failure at init() time */
1651         if (!vmci_trans(vsk))
1652                 return;
1653
1654         /* Ensure that the detach callback doesn't use the sk/vsk
1655          * we are about to destruct.
1656          */
1657         spin_lock_bh(&vmci_trans(vsk)->lock);
1658         vmci_trans(vsk)->sk = NULL;
1659         spin_unlock_bh(&vmci_trans(vsk)->lock);
1660
1661         if (vmci_trans(vsk)->notify_ops)
1662                 vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
1663
1664         spin_lock_bh(&vmci_transport_cleanup_lock);
1665         list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
1666         spin_unlock_bh(&vmci_transport_cleanup_lock);
1667         schedule_work(&vmci_transport_cleanup_work);
1668
1669         vsk->trans = NULL;
1670 }
1671
1672 static void vmci_transport_release(struct vsock_sock *vsk)
1673 {
1674         vsock_remove_sock(vsk);
1675
1676         if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) {
1677                 vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle);
1678                 vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1679         }
1680 }
1681
1682 static int vmci_transport_dgram_bind(struct vsock_sock *vsk,
1683                                      struct sockaddr_vm *addr)
1684 {
1685         u32 port;
1686         u32 flags;
1687         int err;
1688
1689         /* VMCI will select a resource ID for us if we provide
1690          * VMCI_INVALID_ID.
1691          */
1692         port = addr->svm_port == VMADDR_PORT_ANY ?
1693                         VMCI_INVALID_ID : addr->svm_port;
1694
1695         if (port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE))
1696                 return -EACCES;
1697
1698         flags = addr->svm_cid == VMADDR_CID_ANY ?
1699                                 VMCI_FLAG_ANYCID_DG_HND : 0;
1700
1701         err = vmci_transport_datagram_create_hnd(port, flags,
1702                                                  vmci_transport_recv_dgram_cb,
1703                                                  &vsk->sk,
1704                                                  &vmci_trans(vsk)->dg_handle);
1705         if (err < VMCI_SUCCESS)
1706                 return vmci_transport_error_to_vsock_error(err);
1707         vsock_addr_init(&vsk->local_addr, addr->svm_cid,
1708                         vmci_trans(vsk)->dg_handle.resource);
1709
1710         return 0;
1711 }
1712
1713 static int vmci_transport_dgram_enqueue(
1714         struct vsock_sock *vsk,
1715         struct sockaddr_vm *remote_addr,
1716         struct msghdr *msg,
1717         size_t len)
1718 {
1719         int err;
1720         struct vmci_datagram *dg;
1721
1722         if (len > VMCI_MAX_DG_PAYLOAD_SIZE)
1723                 return -EMSGSIZE;
1724
1725         if (!vmci_transport_allow_dgram(vsk, remote_addr->svm_cid))
1726                 return -EPERM;
1727
1728         /* Allocate a buffer for the user's message and our packet header. */
1729         dg = kmalloc(len + sizeof(*dg), GFP_KERNEL);
1730         if (!dg)
1731                 return -ENOMEM;
1732
1733         err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
1734         if (err) {
1735                 kfree(dg);
1736                 return err;
1737         }
1738
1739         dg->dst = vmci_make_handle(remote_addr->svm_cid,
1740                                    remote_addr->svm_port);
1741         dg->src = vmci_make_handle(vsk->local_addr.svm_cid,
1742                                    vsk->local_addr.svm_port);
1743         dg->payload_size = len;
1744
1745         err = vmci_datagram_send(dg);
1746         kfree(dg);
1747         if (err < 0)
1748                 return vmci_transport_error_to_vsock_error(err);
1749
1750         return err - sizeof(*dg);
1751 }
1752
1753 static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
1754                                         struct msghdr *msg, size_t len,
1755                                         int flags)
1756 {
1757         int err;
1758         int noblock;
1759         struct vmci_datagram *dg;
1760         size_t payload_len;
1761         struct sk_buff *skb;
1762
1763         noblock = flags & MSG_DONTWAIT;
1764
1765         if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1766                 return -EOPNOTSUPP;
1767
1768         /* Retrieve the head sk_buff from the socket's receive queue. */
1769         err = 0;
1770         skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
1771         if (!skb)
1772                 return err;
1773
1774         dg = (struct vmci_datagram *)skb->data;
1775         if (!dg)
1776                 /* err is 0, meaning we read zero bytes. */
1777                 goto out;
1778
1779         payload_len = dg->payload_size;
1780         /* Ensure the sk_buff matches the payload size claimed in the packet. */
1781         if (payload_len != skb->len - sizeof(*dg)) {
1782                 err = -EINVAL;
1783                 goto out;
1784         }
1785
1786         if (payload_len > len) {
1787                 payload_len = len;
1788                 msg->msg_flags |= MSG_TRUNC;
1789         }
1790
1791         /* Place the datagram payload in the user's iovec. */
1792         err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len);
1793         if (err)
1794                 goto out;
1795
1796         if (msg->msg_name) {
1797                 /* Provide the address of the sender. */
1798                 DECLARE_SOCKADDR(struct sockaddr_vm *, vm_addr, msg->msg_name);
1799                 vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
1800                 msg->msg_namelen = sizeof(*vm_addr);
1801         }
1802         err = payload_len;
1803
1804 out:
1805         skb_free_datagram(&vsk->sk, skb);
1806         return err;
1807 }
1808
1809 static bool vmci_transport_dgram_allow(u32 cid, u32 port)
1810 {
1811         if (cid == VMADDR_CID_HYPERVISOR) {
1812                 /* Registrations of PBRPC Servers do not modify VMX/Hypervisor
1813                  * state and are allowed.
1814                  */
1815                 return port == VMCI_UNITY_PBRPC_REGISTER;
1816         }
1817
1818         return true;
1819 }
1820
1821 static int vmci_transport_connect(struct vsock_sock *vsk)
1822 {
1823         int err;
1824         bool old_pkt_proto = false;
1825         struct sock *sk = &vsk->sk;
1826
1827         if (vmci_transport_old_proto_override(&old_pkt_proto) &&
1828                 old_pkt_proto) {
1829                 err = vmci_transport_send_conn_request(
1830                         sk, vmci_trans(vsk)->queue_pair_size);
1831                 if (err < 0) {
1832                         sk->sk_state = TCP_CLOSE;
1833                         return err;
1834                 }
1835         } else {
1836                 int supported_proto_versions =
1837                         vmci_transport_new_proto_supported_versions();
1838                 err = vmci_transport_send_conn_request2(
1839                                 sk, vmci_trans(vsk)->queue_pair_size,
1840                                 supported_proto_versions);
1841                 if (err < 0) {
1842                         sk->sk_state = TCP_CLOSE;
1843                         return err;
1844                 }
1845
1846                 vsk->sent_request = true;
1847         }
1848
1849         return err;
1850 }
1851
1852 static ssize_t vmci_transport_stream_dequeue(
1853         struct vsock_sock *vsk,
1854         struct msghdr *msg,
1855         size_t len,
1856         int flags)
1857 {
1858         if (flags & MSG_PEEK)
1859                 return vmci_qpair_peekv(vmci_trans(vsk)->qpair, msg, len, 0);
1860         else
1861                 return vmci_qpair_dequev(vmci_trans(vsk)->qpair, msg, len, 0);
1862 }
1863
1864 static ssize_t vmci_transport_stream_enqueue(
1865         struct vsock_sock *vsk,
1866         struct msghdr *msg,
1867         size_t len)
1868 {
1869         return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
1870 }
1871
1872 static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
1873 {
1874         return vmci_qpair_consume_buf_ready(vmci_trans(vsk)->qpair);
1875 }
1876
1877 static s64 vmci_transport_stream_has_space(struct vsock_sock *vsk)
1878 {
1879         return vmci_qpair_produce_free_space(vmci_trans(vsk)->qpair);
1880 }
1881
1882 static u64 vmci_transport_stream_rcvhiwat(struct vsock_sock *vsk)
1883 {
1884         return vmci_trans(vsk)->consume_size;
1885 }
1886
1887 static bool vmci_transport_stream_is_active(struct vsock_sock *vsk)
1888 {
1889         return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle);
1890 }
1891
1892 static u64 vmci_transport_get_buffer_size(struct vsock_sock *vsk)
1893 {
1894         return vmci_trans(vsk)->queue_pair_size;
1895 }
1896
1897 static u64 vmci_transport_get_min_buffer_size(struct vsock_sock *vsk)
1898 {
1899         return vmci_trans(vsk)->queue_pair_min_size;
1900 }
1901
1902 static u64 vmci_transport_get_max_buffer_size(struct vsock_sock *vsk)
1903 {
1904         return vmci_trans(vsk)->queue_pair_max_size;
1905 }
1906
1907 static void vmci_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
1908 {
1909         if (val < vmci_trans(vsk)->queue_pair_min_size)
1910                 vmci_trans(vsk)->queue_pair_min_size = val;
1911         if (val > vmci_trans(vsk)->queue_pair_max_size)
1912                 vmci_trans(vsk)->queue_pair_max_size = val;
1913         vmci_trans(vsk)->queue_pair_size = val;
1914 }
1915
1916 static void vmci_transport_set_min_buffer_size(struct vsock_sock *vsk,
1917                                                u64 val)
1918 {
1919         if (val > vmci_trans(vsk)->queue_pair_size)
1920                 vmci_trans(vsk)->queue_pair_size = val;
1921         vmci_trans(vsk)->queue_pair_min_size = val;
1922 }
1923
1924 static void vmci_transport_set_max_buffer_size(struct vsock_sock *vsk,
1925                                                u64 val)
1926 {
1927         if (val < vmci_trans(vsk)->queue_pair_size)
1928                 vmci_trans(vsk)->queue_pair_size = val;
1929         vmci_trans(vsk)->queue_pair_max_size = val;
1930 }
1931
1932 static int vmci_transport_notify_poll_in(
1933         struct vsock_sock *vsk,
1934         size_t target,
1935         bool *data_ready_now)
1936 {
1937         return vmci_trans(vsk)->notify_ops->poll_in(
1938                         &vsk->sk, target, data_ready_now);
1939 }
1940
1941 static int vmci_transport_notify_poll_out(
1942         struct vsock_sock *vsk,
1943         size_t target,
1944         bool *space_available_now)
1945 {
1946         return vmci_trans(vsk)->notify_ops->poll_out(
1947                         &vsk->sk, target, space_available_now);
1948 }
1949
1950 static int vmci_transport_notify_recv_init(
1951         struct vsock_sock *vsk,
1952         size_t target,
1953         struct vsock_transport_recv_notify_data *data)
1954 {
1955         return vmci_trans(vsk)->notify_ops->recv_init(
1956                         &vsk->sk, target,
1957                         (struct vmci_transport_recv_notify_data *)data);
1958 }
1959
1960 static int vmci_transport_notify_recv_pre_block(
1961         struct vsock_sock *vsk,
1962         size_t target,
1963         struct vsock_transport_recv_notify_data *data)
1964 {
1965         return vmci_trans(vsk)->notify_ops->recv_pre_block(
1966                         &vsk->sk, target,
1967                         (struct vmci_transport_recv_notify_data *)data);
1968 }
1969
1970 static int vmci_transport_notify_recv_pre_dequeue(
1971         struct vsock_sock *vsk,
1972         size_t target,
1973         struct vsock_transport_recv_notify_data *data)
1974 {
1975         return vmci_trans(vsk)->notify_ops->recv_pre_dequeue(
1976                         &vsk->sk, target,
1977                         (struct vmci_transport_recv_notify_data *)data);
1978 }
1979
1980 static int vmci_transport_notify_recv_post_dequeue(
1981         struct vsock_sock *vsk,
1982         size_t target,
1983         ssize_t copied,
1984         bool data_read,
1985         struct vsock_transport_recv_notify_data *data)
1986 {
1987         return vmci_trans(vsk)->notify_ops->recv_post_dequeue(
1988                         &vsk->sk, target, copied, data_read,
1989                         (struct vmci_transport_recv_notify_data *)data);
1990 }
1991
1992 static int vmci_transport_notify_send_init(
1993         struct vsock_sock *vsk,
1994         struct vsock_transport_send_notify_data *data)
1995 {
1996         return vmci_trans(vsk)->notify_ops->send_init(
1997                         &vsk->sk,
1998                         (struct vmci_transport_send_notify_data *)data);
1999 }
2000
2001 static int vmci_transport_notify_send_pre_block(
2002         struct vsock_sock *vsk,
2003         struct vsock_transport_send_notify_data *data)
2004 {
2005         return vmci_trans(vsk)->notify_ops->send_pre_block(
2006                         &vsk->sk,
2007                         (struct vmci_transport_send_notify_data *)data);
2008 }
2009
2010 static int vmci_transport_notify_send_pre_enqueue(
2011         struct vsock_sock *vsk,
2012         struct vsock_transport_send_notify_data *data)
2013 {
2014         return vmci_trans(vsk)->notify_ops->send_pre_enqueue(
2015                         &vsk->sk,
2016                         (struct vmci_transport_send_notify_data *)data);
2017 }
2018
2019 static int vmci_transport_notify_send_post_enqueue(
2020         struct vsock_sock *vsk,
2021         ssize_t written,
2022         struct vsock_transport_send_notify_data *data)
2023 {
2024         return vmci_trans(vsk)->notify_ops->send_post_enqueue(
2025                         &vsk->sk, written,
2026                         (struct vmci_transport_send_notify_data *)data);
2027 }
2028
2029 static bool vmci_transport_old_proto_override(bool *old_pkt_proto)
2030 {
2031         if (PROTOCOL_OVERRIDE != -1) {
2032                 if (PROTOCOL_OVERRIDE == 0)
2033                         *old_pkt_proto = true;
2034                 else
2035                         *old_pkt_proto = false;
2036
2037                 pr_info("Proto override in use\n");
2038                 return true;
2039         }
2040
2041         return false;
2042 }
2043
2044 static bool vmci_transport_proto_to_notify_struct(struct sock *sk,
2045                                                   u16 *proto,
2046                                                   bool old_pkt_proto)
2047 {
2048         struct vsock_sock *vsk = vsock_sk(sk);
2049
2050         if (old_pkt_proto) {
2051                 if (*proto != VSOCK_PROTO_INVALID) {
2052                         pr_err("Can't set both an old and new protocol\n");
2053                         return false;
2054                 }
2055                 vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_ops;
2056                 goto exit;
2057         }
2058
2059         switch (*proto) {
2060         case VSOCK_PROTO_PKT_ON_NOTIFY:
2061                 vmci_trans(vsk)->notify_ops =
2062                         &vmci_transport_notify_pkt_q_state_ops;
2063                 break;
2064         default:
2065                 pr_err("Unknown notify protocol version\n");
2066                 return false;
2067         }
2068
2069 exit:
2070         vmci_trans(vsk)->notify_ops->socket_init(sk);
2071         return true;
2072 }
2073
2074 static u16 vmci_transport_new_proto_supported_versions(void)
2075 {
2076         if (PROTOCOL_OVERRIDE != -1)
2077                 return PROTOCOL_OVERRIDE;
2078
2079         return VSOCK_PROTO_ALL_SUPPORTED;
2080 }
2081
2082 static u32 vmci_transport_get_local_cid(void)
2083 {
2084         return vmci_get_context_id();
2085 }
2086
2087 static const struct vsock_transport vmci_transport = {
2088         .init = vmci_transport_socket_init,
2089         .destruct = vmci_transport_destruct,
2090         .release = vmci_transport_release,
2091         .connect = vmci_transport_connect,
2092         .dgram_bind = vmci_transport_dgram_bind,
2093         .dgram_dequeue = vmci_transport_dgram_dequeue,
2094         .dgram_enqueue = vmci_transport_dgram_enqueue,
2095         .dgram_allow = vmci_transport_dgram_allow,
2096         .stream_dequeue = vmci_transport_stream_dequeue,
2097         .stream_enqueue = vmci_transport_stream_enqueue,
2098         .stream_has_data = vmci_transport_stream_has_data,
2099         .stream_has_space = vmci_transport_stream_has_space,
2100         .stream_rcvhiwat = vmci_transport_stream_rcvhiwat,
2101         .stream_is_active = vmci_transport_stream_is_active,
2102         .stream_allow = vmci_transport_stream_allow,
2103         .notify_poll_in = vmci_transport_notify_poll_in,
2104         .notify_poll_out = vmci_transport_notify_poll_out,
2105         .notify_recv_init = vmci_transport_notify_recv_init,
2106         .notify_recv_pre_block = vmci_transport_notify_recv_pre_block,
2107         .notify_recv_pre_dequeue = vmci_transport_notify_recv_pre_dequeue,
2108         .notify_recv_post_dequeue = vmci_transport_notify_recv_post_dequeue,
2109         .notify_send_init = vmci_transport_notify_send_init,
2110         .notify_send_pre_block = vmci_transport_notify_send_pre_block,
2111         .notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue,
2112         .notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue,
2113         .shutdown = vmci_transport_shutdown,
2114         .set_buffer_size = vmci_transport_set_buffer_size,
2115         .set_min_buffer_size = vmci_transport_set_min_buffer_size,
2116         .set_max_buffer_size = vmci_transport_set_max_buffer_size,
2117         .get_buffer_size = vmci_transport_get_buffer_size,
2118         .get_min_buffer_size = vmci_transport_get_min_buffer_size,
2119         .get_max_buffer_size = vmci_transport_get_max_buffer_size,
2120         .get_local_cid = vmci_transport_get_local_cid,
2121 };
2122
2123 static int __init vmci_transport_init(void)
2124 {
2125         int err;
2126
2127         /* Create the datagram handle that we will use to send and receive all
2128          * VSocket control messages for this context.
2129          */
2130         err = vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID,
2131                                                  VMCI_FLAG_ANYCID_DG_HND,
2132                                                  vmci_transport_recv_stream_cb,
2133                                                  NULL,
2134                                                  &vmci_transport_stream_handle);
2135         if (err < VMCI_SUCCESS) {
2136                 pr_err("Unable to create datagram handle. (%d)\n", err);
2137                 return vmci_transport_error_to_vsock_error(err);
2138         }
2139
2140         err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED,
2141                                    vmci_transport_qp_resumed_cb,
2142                                    NULL, &vmci_transport_qp_resumed_sub_id);
2143         if (err < VMCI_SUCCESS) {
2144                 pr_err("Unable to subscribe to resumed event. (%d)\n", err);
2145                 err = vmci_transport_error_to_vsock_error(err);
2146                 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2147                 goto err_destroy_stream_handle;
2148         }
2149
2150         err = vsock_core_init(&vmci_transport);
2151         if (err < 0)
2152                 goto err_unsubscribe;
2153
2154         return 0;
2155
2156 err_unsubscribe:
2157         vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2158 err_destroy_stream_handle:
2159         vmci_datagram_destroy_handle(vmci_transport_stream_handle);
2160         return err;
2161 }
2162 module_init(vmci_transport_init);
2163
2164 static void __exit vmci_transport_exit(void)
2165 {
2166         cancel_work_sync(&vmci_transport_cleanup_work);
2167         vmci_transport_free_resources(&vmci_transport_cleanup_list);
2168
2169         if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
2170                 if (vmci_datagram_destroy_handle(
2171                         vmci_transport_stream_handle) != VMCI_SUCCESS)
2172                         pr_err("Couldn't destroy datagram handle\n");
2173                 vmci_transport_stream_handle = VMCI_INVALID_HANDLE;
2174         }
2175
2176         if (vmci_transport_qp_resumed_sub_id != VMCI_INVALID_ID) {
2177                 vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2178                 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2179         }
2180
2181         vsock_core_exit();
2182 }
2183 module_exit(vmci_transport_exit);
2184
2185 MODULE_AUTHOR("VMware, Inc.");
2186 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2187 MODULE_VERSION("1.0.4.0-k");
2188 MODULE_LICENSE("GPL v2");
2189 MODULE_ALIAS("vmware_vsock");
2190 MODULE_ALIAS_NETPROTO(PF_VSOCK);