2 * VMware vSockets Driver
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/types.h>
17 #include <linux/bitops.h>
18 #include <linux/cred.h>
19 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/kmod.h>
23 #include <linux/list.h>
24 #include <linux/miscdevice.h>
25 #include <linux/module.h>
26 #include <linux/mutex.h>
27 #include <linux/net.h>
28 #include <linux/poll.h>
29 #include <linux/skbuff.h>
30 #include <linux/smp.h>
31 #include <linux/socket.h>
32 #include <linux/stddef.h>
33 #include <linux/unistd.h>
34 #include <linux/wait.h>
35 #include <linux/workqueue.h>
37 #include <net/af_vsock.h>
39 #include "vmci_transport_notify.h"
41 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
42 static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
43 static void vmci_transport_peer_detach_cb(u32 sub_id,
44 const struct vmci_event_data *ed,
46 static void vmci_transport_recv_pkt_work(struct work_struct *work);
47 static void vmci_transport_cleanup(struct work_struct *work);
48 static int vmci_transport_recv_listen(struct sock *sk,
49 struct vmci_transport_packet *pkt);
50 static int vmci_transport_recv_connecting_server(
53 struct vmci_transport_packet *pkt);
54 static int vmci_transport_recv_connecting_client(
56 struct vmci_transport_packet *pkt);
57 static int vmci_transport_recv_connecting_client_negotiate(
59 struct vmci_transport_packet *pkt);
60 static int vmci_transport_recv_connecting_client_invalid(
62 struct vmci_transport_packet *pkt);
63 static int vmci_transport_recv_connected(struct sock *sk,
64 struct vmci_transport_packet *pkt);
65 static bool vmci_transport_old_proto_override(bool *old_pkt_proto);
66 static u16 vmci_transport_new_proto_supported_versions(void);
67 static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
70 struct vmci_transport_recv_pkt_info {
71 struct work_struct work;
73 struct vmci_transport_packet pkt;
76 static LIST_HEAD(vmci_transport_cleanup_list);
77 static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
78 static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
80 static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
82 static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
84 static int PROTOCOL_OVERRIDE = -1;
86 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN 128
87 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE 262144
88 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX 262144
90 /* The default peer timeout indicates how long we will wait for a peer response
91 * to a control message.
93 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
95 /* Helper function to convert from a VMCI error code to a VSock error code. */
97 static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
101 switch (vmci_error) {
102 case VMCI_ERROR_NO_MEM:
105 case VMCI_ERROR_DUPLICATE_ENTRY:
106 case VMCI_ERROR_ALREADY_EXISTS:
109 case VMCI_ERROR_NO_ACCESS:
112 case VMCI_ERROR_NO_RESOURCES:
115 case VMCI_ERROR_INVALID_RESOURCE:
118 case VMCI_ERROR_INVALID_ARGS:
123 return err > 0 ? -err : err;
126 static u32 vmci_transport_peer_rid(u32 peer_cid)
128 if (VMADDR_CID_HYPERVISOR == peer_cid)
129 return VMCI_TRANSPORT_HYPERVISOR_PACKET_RID;
131 return VMCI_TRANSPORT_PACKET_RID;
135 vmci_transport_packet_init(struct vmci_transport_packet *pkt,
136 struct sockaddr_vm *src,
137 struct sockaddr_vm *dst,
141 struct vmci_transport_waiting_info *wait,
143 struct vmci_handle handle)
145 /* We register the stream control handler as an any cid handle so we
146 * must always send from a source address of VMADDR_CID_ANY
148 pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY,
149 VMCI_TRANSPORT_PACKET_RID);
150 pkt->dg.dst = vmci_make_handle(dst->svm_cid,
151 vmci_transport_peer_rid(dst->svm_cid));
152 pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg);
153 pkt->version = VMCI_TRANSPORT_PACKET_VERSION;
155 pkt->src_port = src->svm_port;
156 pkt->dst_port = dst->svm_port;
157 memset(&pkt->proto, 0, sizeof(pkt->proto));
158 memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
161 case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
165 case VMCI_TRANSPORT_PACKET_TYPE_REQUEST:
166 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
170 case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
171 case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
172 pkt->u.handle = handle;
175 case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
176 case VMCI_TRANSPORT_PACKET_TYPE_READ:
177 case VMCI_TRANSPORT_PACKET_TYPE_RST:
181 case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
185 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
186 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
187 memcpy(&pkt->u.wait, wait, sizeof(pkt->u.wait));
190 case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2:
191 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
199 vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt,
200 struct sockaddr_vm *local,
201 struct sockaddr_vm *remote)
203 vsock_addr_init(local, pkt->dg.dst.context, pkt->dst_port);
204 vsock_addr_init(remote, pkt->dg.src.context, pkt->src_port);
208 __vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt,
209 struct sockaddr_vm *src,
210 struct sockaddr_vm *dst,
211 enum vmci_transport_packet_type type,
214 struct vmci_transport_waiting_info *wait,
216 struct vmci_handle handle,
221 vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait,
223 err = vmci_datagram_send(&pkt->dg);
224 if (convert_error && (err < 0))
225 return vmci_transport_error_to_vsock_error(err);
231 vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt,
232 enum vmci_transport_packet_type type,
235 struct vmci_transport_waiting_info *wait,
236 struct vmci_handle handle)
238 struct vmci_transport_packet reply;
239 struct sockaddr_vm src, dst;
241 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) {
244 vmci_transport_packet_get_addresses(pkt, &src, &dst);
245 return __vmci_transport_send_control_pkt(&reply, &src, &dst,
254 vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
255 struct sockaddr_vm *dst,
256 enum vmci_transport_packet_type type,
259 struct vmci_transport_waiting_info *wait,
260 struct vmci_handle handle)
262 /* Note that it is safe to use a single packet across all CPUs since
263 * two tasklets of the same type are guaranteed to not ever run
264 * simultaneously. If that ever changes, or VMCI stops using tasklets,
265 * we can use per-cpu packets.
267 static struct vmci_transport_packet pkt;
269 return __vmci_transport_send_control_pkt(&pkt, src, dst, type,
271 VSOCK_PROTO_INVALID, handle,
276 vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
277 struct sockaddr_vm *dst,
278 enum vmci_transport_packet_type type,
281 struct vmci_transport_waiting_info *wait,
283 struct vmci_handle handle)
285 struct vmci_transport_packet *pkt;
288 pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
292 err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
293 mode, wait, proto, handle,
301 vmci_transport_send_control_pkt(struct sock *sk,
302 enum vmci_transport_packet_type type,
305 struct vmci_transport_waiting_info *wait,
307 struct vmci_handle handle)
309 struct vsock_sock *vsk;
313 if (!vsock_addr_bound(&vsk->local_addr))
316 if (!vsock_addr_bound(&vsk->remote_addr))
319 return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
322 wait, proto, handle);
325 static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
326 struct sockaddr_vm *src,
327 struct vmci_transport_packet *pkt)
329 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
331 return vmci_transport_send_control_pkt_bh(
333 VMCI_TRANSPORT_PACKET_TYPE_RST, 0,
334 0, NULL, VMCI_INVALID_HANDLE);
337 static int vmci_transport_send_reset(struct sock *sk,
338 struct vmci_transport_packet *pkt)
340 struct sockaddr_vm *dst_ptr;
341 struct sockaddr_vm dst;
342 struct vsock_sock *vsk;
344 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
349 if (!vsock_addr_bound(&vsk->local_addr))
352 if (vsock_addr_bound(&vsk->remote_addr)) {
353 dst_ptr = &vsk->remote_addr;
355 vsock_addr_init(&dst, pkt->dg.src.context,
359 return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
360 VMCI_TRANSPORT_PACKET_TYPE_RST,
361 0, 0, NULL, VSOCK_PROTO_INVALID,
362 VMCI_INVALID_HANDLE);
365 static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
367 return vmci_transport_send_control_pkt(
369 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
372 VMCI_INVALID_HANDLE);
375 static int vmci_transport_send_negotiate2(struct sock *sk, size_t size,
378 return vmci_transport_send_control_pkt(
380 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
381 size, 0, NULL, version,
382 VMCI_INVALID_HANDLE);
385 static int vmci_transport_send_qp_offer(struct sock *sk,
386 struct vmci_handle handle)
388 return vmci_transport_send_control_pkt(
389 sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0,
391 VSOCK_PROTO_INVALID, handle);
394 static int vmci_transport_send_attach(struct sock *sk,
395 struct vmci_handle handle)
397 return vmci_transport_send_control_pkt(
398 sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
399 0, 0, NULL, VSOCK_PROTO_INVALID,
403 static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt)
405 return vmci_transport_reply_control_pkt_fast(
407 VMCI_TRANSPORT_PACKET_TYPE_RST,
409 VMCI_INVALID_HANDLE);
412 static int vmci_transport_send_invalid_bh(struct sockaddr_vm *dst,
413 struct sockaddr_vm *src)
415 return vmci_transport_send_control_pkt_bh(
417 VMCI_TRANSPORT_PACKET_TYPE_INVALID,
418 0, 0, NULL, VMCI_INVALID_HANDLE);
421 int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
422 struct sockaddr_vm *src)
424 return vmci_transport_send_control_pkt_bh(
426 VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
427 0, NULL, VMCI_INVALID_HANDLE);
430 int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
431 struct sockaddr_vm *src)
433 return vmci_transport_send_control_pkt_bh(
435 VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
436 0, NULL, VMCI_INVALID_HANDLE);
439 int vmci_transport_send_wrote(struct sock *sk)
441 return vmci_transport_send_control_pkt(
442 sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
443 0, NULL, VSOCK_PROTO_INVALID,
444 VMCI_INVALID_HANDLE);
447 int vmci_transport_send_read(struct sock *sk)
449 return vmci_transport_send_control_pkt(
450 sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
451 0, NULL, VSOCK_PROTO_INVALID,
452 VMCI_INVALID_HANDLE);
455 int vmci_transport_send_waiting_write(struct sock *sk,
456 struct vmci_transport_waiting_info *wait)
458 return vmci_transport_send_control_pkt(
459 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
460 0, 0, wait, VSOCK_PROTO_INVALID,
461 VMCI_INVALID_HANDLE);
464 int vmci_transport_send_waiting_read(struct sock *sk,
465 struct vmci_transport_waiting_info *wait)
467 return vmci_transport_send_control_pkt(
468 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
469 0, 0, wait, VSOCK_PROTO_INVALID,
470 VMCI_INVALID_HANDLE);
473 static int vmci_transport_shutdown(struct vsock_sock *vsk, int mode)
475 return vmci_transport_send_control_pkt(
477 VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
480 VMCI_INVALID_HANDLE);
483 static int vmci_transport_send_conn_request(struct sock *sk, size_t size)
485 return vmci_transport_send_control_pkt(sk,
486 VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
489 VMCI_INVALID_HANDLE);
492 static int vmci_transport_send_conn_request2(struct sock *sk, size_t size,
495 return vmci_transport_send_control_pkt(
496 sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
497 size, 0, NULL, version,
498 VMCI_INVALID_HANDLE);
501 static struct sock *vmci_transport_get_pending(
502 struct sock *listener,
503 struct vmci_transport_packet *pkt)
505 struct vsock_sock *vlistener;
506 struct vsock_sock *vpending;
507 struct sock *pending;
508 struct sockaddr_vm src;
510 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
512 vlistener = vsock_sk(listener);
514 list_for_each_entry(vpending, &vlistener->pending_links,
516 if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
517 pkt->dst_port == vpending->local_addr.svm_port) {
518 pending = sk_vsock(vpending);
530 static void vmci_transport_release_pending(struct sock *pending)
535 /* We allow two kinds of sockets to communicate with a restricted VM: 1)
536 * trusted sockets 2) sockets from applications running as the same user as the
537 * VM (this is only true for the host side and only when using hosted products)
540 static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid)
542 return vsock->trusted ||
543 vmci_is_context_owner(peer_cid, vsock->owner->uid);
546 /* We allow sending datagrams to and receiving datagrams from a restricted VM
547 * only if it is trusted as described in vmci_transport_is_trusted.
550 static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid)
552 if (VMADDR_CID_HYPERVISOR == peer_cid)
555 if (vsock->cached_peer != peer_cid) {
556 vsock->cached_peer = peer_cid;
557 if (!vmci_transport_is_trusted(vsock, peer_cid) &&
558 (vmci_context_get_priv_flags(peer_cid) &
559 VMCI_PRIVILEGE_FLAG_RESTRICTED)) {
560 vsock->cached_peer_allow_dgram = false;
562 vsock->cached_peer_allow_dgram = true;
566 return vsock->cached_peer_allow_dgram;
570 vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
571 struct vmci_handle *handle,
574 u32 peer, u32 flags, bool trusted)
579 /* Try to allocate our queue pair as trusted. This will only
580 * work if vsock is running in the host.
583 err = vmci_qpair_alloc(qpair, handle, produce_size,
586 VMCI_PRIVILEGE_FLAG_TRUSTED);
587 if (err != VMCI_ERROR_NO_ACCESS)
592 err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size,
593 peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
596 pr_err_once("Could not attach to queue pair with %d\n", err);
597 err = vmci_transport_error_to_vsock_error(err);
604 vmci_transport_datagram_create_hnd(u32 resource_id,
606 vmci_datagram_recv_cb recv_cb,
608 struct vmci_handle *out_handle)
612 /* Try to allocate our datagram handler as trusted. This will only work
613 * if vsock is running in the host.
616 err = vmci_datagram_create_handle_priv(resource_id, flags,
617 VMCI_PRIVILEGE_FLAG_TRUSTED,
619 client_data, out_handle);
621 if (err == VMCI_ERROR_NO_ACCESS)
622 err = vmci_datagram_create_handle(resource_id, flags,
623 recv_cb, client_data,
629 /* This is invoked as part of a tasklet that's scheduled when the VMCI
630 * interrupt fires. This is run in bottom-half context and if it ever needs to
631 * sleep it should defer that work to a work queue.
634 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
639 struct vsock_sock *vsk;
641 sk = (struct sock *)data;
643 /* This handler is privileged when this module is running on the host.
644 * We will get datagrams from all endpoints (even VMs that are in a
645 * restricted context). If we get one from a restricted context then
646 * the destination socket must be trusted.
648 * NOTE: We access the socket struct without holding the lock here.
649 * This is ok because the field we are interested is never modified
650 * outside of the create and destruct socket functions.
653 if (!vmci_transport_allow_dgram(vsk, dg->src.context))
654 return VMCI_ERROR_NO_ACCESS;
656 size = VMCI_DG_SIZE(dg);
658 /* Attach the packet to the socket's receive queue as an sk_buff. */
659 skb = alloc_skb(size, GFP_ATOMIC);
661 return VMCI_ERROR_NO_MEM;
663 /* sk_receive_skb() will do a sock_put(), so hold here. */
666 memcpy(skb->data, dg, size);
667 sk_receive_skb(sk, skb, 0);
672 static bool vmci_transport_stream_allow(u32 cid, u32 port)
674 static const u32 non_socket_contexts[] = {
679 BUILD_BUG_ON(sizeof(cid) != sizeof(*non_socket_contexts));
681 for (i = 0; i < ARRAY_SIZE(non_socket_contexts); i++) {
682 if (cid == non_socket_contexts[i])
689 /* This is invoked as part of a tasklet that's scheduled when the VMCI
690 * interrupt fires. This is run in bottom-half context but it defers most of
691 * its work to the packet handling work queue.
694 static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
697 struct sockaddr_vm dst;
698 struct sockaddr_vm src;
699 struct vmci_transport_packet *pkt;
700 struct vsock_sock *vsk;
706 bh_process_pkt = false;
708 /* Ignore incoming packets from contexts without sockets, or resources
709 * that aren't vsock implementations.
712 if (!vmci_transport_stream_allow(dg->src.context, -1)
713 || vmci_transport_peer_rid(dg->src.context) != dg->src.resource)
714 return VMCI_ERROR_NO_ACCESS;
716 if (VMCI_DG_SIZE(dg) < sizeof(*pkt))
717 /* Drop datagrams that do not contain full VSock packets. */
718 return VMCI_ERROR_INVALID_ARGS;
720 pkt = (struct vmci_transport_packet *)dg;
722 /* Find the socket that should handle this packet. First we look for a
723 * connected socket and if there is none we look for a socket bound to
724 * the destintation address.
726 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
727 vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
729 sk = vsock_find_connected_socket(&src, &dst);
731 sk = vsock_find_bound_socket(&dst);
733 /* We could not find a socket for this specified
734 * address. If this packet is a RST, we just drop it.
735 * If it is another packet, we send a RST. Note that
736 * we do not send a RST reply to RSTs so that we do not
737 * continually send RSTs between two endpoints.
739 * Note that since this is a reply, dst is src and src
742 if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
743 pr_err("unable to send reset\n");
745 err = VMCI_ERROR_NOT_FOUND;
750 /* If the received packet type is beyond all types known to this
751 * implementation, reply with an invalid message. Hopefully this will
752 * help when implementing backwards compatibility in the future.
754 if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) {
755 vmci_transport_send_invalid_bh(&dst, &src);
756 err = VMCI_ERROR_INVALID_ARGS;
760 /* This handler is privileged when this module is running on the host.
761 * We will get datagram connect requests from all endpoints (even VMs
762 * that are in a restricted context). If we get one from a restricted
763 * context then the destination socket must be trusted.
765 * NOTE: We access the socket struct without holding the lock here.
766 * This is ok because the field we are interested is never modified
767 * outside of the create and destruct socket functions.
770 if (!vmci_transport_allow_dgram(vsk, pkt->dg.src.context)) {
771 err = VMCI_ERROR_NO_ACCESS;
775 /* We do most everything in a work queue, but let's fast path the
776 * notification of reads and writes to help data transfer performance.
777 * We can only do this if there is no process context code executing
778 * for this socket since that may change the state.
782 if (!sock_owned_by_user(sk)) {
783 /* The local context ID may be out of date, update it. */
784 vsk->local_addr.svm_cid = dst.svm_cid;
786 if (sk->sk_state == SS_CONNECTED)
787 vmci_trans(vsk)->notify_ops->handle_notify_pkt(
788 sk, pkt, true, &dst, &src,
794 if (!bh_process_pkt) {
795 struct vmci_transport_recv_pkt_info *recv_pkt_info;
797 recv_pkt_info = kmalloc(sizeof(*recv_pkt_info), GFP_ATOMIC);
798 if (!recv_pkt_info) {
799 if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
800 pr_err("unable to send reset\n");
802 err = VMCI_ERROR_NO_MEM;
806 recv_pkt_info->sk = sk;
807 memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt));
808 INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work);
810 schedule_work(&recv_pkt_info->work);
811 /* Clear sk so that the reference count incremented by one of
812 * the Find functions above is not decremented below. We need
813 * that reference count for the packet handler we've scheduled
826 static void vmci_transport_handle_detach(struct sock *sk)
828 struct vsock_sock *vsk;
831 if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
832 sock_set_flag(sk, SOCK_DONE);
834 /* On a detach the peer will not be sending or receiving
837 vsk->peer_shutdown = SHUTDOWN_MASK;
839 /* We should not be sending anymore since the peer won't be
840 * there to receive, but we can still receive if there is data
841 * left in our consume queue.
843 if (vsock_stream_has_data(vsk) <= 0) {
844 if (sk->sk_state == SS_CONNECTING) {
845 /* The peer may detach from a queue pair while
846 * we are still in the connecting state, i.e.,
847 * if the peer VM is killed after attaching to
848 * a queue pair, but before we complete the
849 * handshake. In that case, we treat the detach
850 * event like a reset.
853 sk->sk_state = SS_UNCONNECTED;
854 sk->sk_err = ECONNRESET;
855 sk->sk_error_report(sk);
858 sk->sk_state = SS_UNCONNECTED;
860 sk->sk_state_change(sk);
864 static void vmci_transport_peer_detach_cb(u32 sub_id,
865 const struct vmci_event_data *e_data,
868 struct vmci_transport *trans = client_data;
869 const struct vmci_event_payload_qp *e_payload;
871 e_payload = vmci_event_data_const_payload(e_data);
873 /* XXX This is lame, we should provide a way to lookup sockets by
876 if (vmci_handle_is_invalid(e_payload->handle) ||
877 !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
880 /* We don't ask for delayed CBs when we subscribe to this event (we
881 * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
882 * guarantees in that case about what context we might be running in,
883 * so it could be BH or process, blockable or non-blockable. So we
884 * need to account for all possible contexts here.
886 spin_lock_bh(&trans->lock);
890 /* Apart from here, trans->lock is only grabbed as part of sk destruct,
891 * where trans->sk isn't locked.
893 bh_lock_sock(trans->sk);
895 vmci_transport_handle_detach(trans->sk);
897 bh_unlock_sock(trans->sk);
899 spin_unlock_bh(&trans->lock);
902 static void vmci_transport_qp_resumed_cb(u32 sub_id,
903 const struct vmci_event_data *e_data,
906 vsock_for_each_connected_socket(vmci_transport_handle_detach);
909 static void vmci_transport_recv_pkt_work(struct work_struct *work)
911 struct vmci_transport_recv_pkt_info *recv_pkt_info;
912 struct vmci_transport_packet *pkt;
916 container_of(work, struct vmci_transport_recv_pkt_info, work);
917 sk = recv_pkt_info->sk;
918 pkt = &recv_pkt_info->pkt;
922 /* The local context ID may be out of date. */
923 vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
925 switch (sk->sk_state) {
926 case VSOCK_SS_LISTEN:
927 vmci_transport_recv_listen(sk, pkt);
930 /* Processing of pending connections for servers goes through
931 * the listening socket, so see vmci_transport_recv_listen()
934 vmci_transport_recv_connecting_client(sk, pkt);
937 vmci_transport_recv_connected(sk, pkt);
940 /* Because this function does not run in the same context as
941 * vmci_transport_recv_stream_cb it is possible that the
942 * socket has closed. We need to let the other side know or it
943 * could be sitting in a connect and hang forever. Send a
944 * reset to prevent that.
946 vmci_transport_send_reset(sk, pkt);
951 kfree(recv_pkt_info);
952 /* Release reference obtained in the stream callback when we fetched
953 * this socket out of the bound or connected list.
958 static int vmci_transport_recv_listen(struct sock *sk,
959 struct vmci_transport_packet *pkt)
961 struct sock *pending;
962 struct vsock_sock *vpending;
965 bool old_request = false;
966 bool old_pkt_proto = false;
970 /* Because we are in the listen state, we could be receiving a packet
971 * for ourself or any previous connection requests that we received.
972 * If it's the latter, we try to find a socket in our list of pending
973 * connections and, if we do, call the appropriate handler for the
974 * state that that socket is in. Otherwise we try to service the
975 * connection request.
977 pending = vmci_transport_get_pending(sk, pkt);
981 /* The local context ID may be out of date. */
982 vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
984 switch (pending->sk_state) {
986 err = vmci_transport_recv_connecting_server(sk,
991 vmci_transport_send_reset(pending, pkt);
996 vsock_remove_pending(sk, pending);
998 release_sock(pending);
999 vmci_transport_release_pending(pending);
1004 /* The listen state only accepts connection requests. Reply with a
1005 * reset unless we received a reset.
1008 if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST ||
1009 pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) {
1010 vmci_transport_reply_reset(pkt);
1014 if (pkt->u.size == 0) {
1015 vmci_transport_reply_reset(pkt);
1019 /* If this socket can't accommodate this connection request, we send a
1020 * reset. Otherwise we create and initialize a child socket and reply
1021 * with a connection negotiation.
1023 if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
1024 vmci_transport_reply_reset(pkt);
1025 return -ECONNREFUSED;
1028 pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
1031 vmci_transport_send_reset(sk, pkt);
1035 vpending = vsock_sk(pending);
1037 vsock_addr_init(&vpending->local_addr, pkt->dg.dst.context,
1039 vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
1042 /* If the proposed size fits within our min/max, accept it. Otherwise
1043 * propose our own size.
1045 if (pkt->u.size >= vmci_trans(vpending)->queue_pair_min_size &&
1046 pkt->u.size <= vmci_trans(vpending)->queue_pair_max_size) {
1047 qp_size = pkt->u.size;
1049 qp_size = vmci_trans(vpending)->queue_pair_size;
1052 /* Figure out if we are using old or new requests based on the
1053 * overrides pkt types sent by our peer.
1055 if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1056 old_request = old_pkt_proto;
1058 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST)
1060 else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)
1061 old_request = false;
1066 /* Handle a REQUEST (or override) */
1067 u16 version = VSOCK_PROTO_INVALID;
1068 if (vmci_transport_proto_to_notify_struct(
1069 pending, &version, true))
1070 err = vmci_transport_send_negotiate(pending, qp_size);
1075 /* Handle a REQUEST2 (or override) */
1076 int proto_int = pkt->proto;
1078 u16 active_proto_version = 0;
1080 /* The list of possible protocols is the intersection of all
1081 * protocols the client supports ... plus all the protocols we
1084 proto_int &= vmci_transport_new_proto_supported_versions();
1086 /* We choose the highest possible protocol version and use that
1089 pos = fls(proto_int);
1091 active_proto_version = (1 << (pos - 1));
1092 if (vmci_transport_proto_to_notify_struct(
1093 pending, &active_proto_version, false))
1094 err = vmci_transport_send_negotiate2(pending,
1096 active_proto_version);
1106 vmci_transport_send_reset(sk, pkt);
1108 err = vmci_transport_error_to_vsock_error(err);
1112 vsock_add_pending(sk, pending);
1113 sk->sk_ack_backlog++;
1115 pending->sk_state = SS_CONNECTING;
1116 vmci_trans(vpending)->produce_size =
1117 vmci_trans(vpending)->consume_size = qp_size;
1118 vmci_trans(vpending)->queue_pair_size = qp_size;
1120 vmci_trans(vpending)->notify_ops->process_request(pending);
1122 /* We might never receive another message for this socket and it's not
1123 * connected to any process, so we have to ensure it gets cleaned up
1124 * ourself. Our delayed work function will take care of that. Note
1125 * that we do not ever cancel this function since we have few
1126 * guarantees about its state when calling cancel_delayed_work().
1127 * Instead we hold a reference on the socket for that function and make
1128 * it capable of handling cases where it needs to do nothing but
1129 * release that reference.
1131 vpending->listener = sk;
1134 schedule_delayed_work(&vpending->pending_work, HZ);
1141 vmci_transport_recv_connecting_server(struct sock *listener,
1142 struct sock *pending,
1143 struct vmci_transport_packet *pkt)
1145 struct vsock_sock *vpending;
1146 struct vmci_handle handle;
1147 struct vmci_qp *qpair;
1154 vpending = vsock_sk(pending);
1155 detach_sub_id = VMCI_INVALID_ID;
1157 switch (pkt->type) {
1158 case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
1159 if (vmci_handle_is_invalid(pkt->u.handle)) {
1160 vmci_transport_send_reset(pending, pkt);
1167 /* Close and cleanup the connection. */
1168 vmci_transport_send_reset(pending, pkt);
1170 err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL;
1174 /* In order to complete the connection we need to attach to the offered
1175 * queue pair and send an attach notification. We also subscribe to the
1176 * detach event so we know when our peer goes away, and we do that
1177 * before attaching so we don't miss an event. If all this succeeds,
1178 * we update our state and wakeup anything waiting in accept() for a
1182 /* We don't care about attach since we ensure the other side has
1183 * attached by specifying the ATTACH_ONLY flag below.
1185 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1186 vmci_transport_peer_detach_cb,
1187 vmci_trans(vpending), &detach_sub_id);
1188 if (err < VMCI_SUCCESS) {
1189 vmci_transport_send_reset(pending, pkt);
1190 err = vmci_transport_error_to_vsock_error(err);
1195 vmci_trans(vpending)->detach_sub_id = detach_sub_id;
1197 /* Now attach to the queue pair the client created. */
1198 handle = pkt->u.handle;
1200 /* vpending->local_addr always has a context id so we do not need to
1201 * worry about VMADDR_CID_ANY in this case.
1204 vpending->remote_addr.svm_cid == vpending->local_addr.svm_cid;
1205 flags = VMCI_QPFLAG_ATTACH_ONLY;
1206 flags |= is_local ? VMCI_QPFLAG_LOCAL : 0;
1208 err = vmci_transport_queue_pair_alloc(
1211 vmci_trans(vpending)->produce_size,
1212 vmci_trans(vpending)->consume_size,
1213 pkt->dg.src.context,
1215 vmci_transport_is_trusted(
1217 vpending->remote_addr.svm_cid));
1219 vmci_transport_send_reset(pending, pkt);
1224 vmci_trans(vpending)->qp_handle = handle;
1225 vmci_trans(vpending)->qpair = qpair;
1227 /* When we send the attach message, we must be ready to handle incoming
1228 * control messages on the newly connected socket. So we move the
1229 * pending socket to the connected state before sending the attach
1230 * message. Otherwise, an incoming packet triggered by the attach being
1231 * received by the peer may be processed concurrently with what happens
1232 * below after sending the attach message, and that incoming packet
1233 * will find the listening socket instead of the (currently) pending
1234 * socket. Note that enqueueing the socket increments the reference
1235 * count, so even if a reset comes before the connection is accepted,
1236 * the socket will be valid until it is removed from the queue.
1238 * If we fail sending the attach below, we remove the socket from the
1239 * connected list and move the socket to SS_UNCONNECTED before
1240 * releasing the lock, so a pending slow path processing of an incoming
1241 * packet will not see the socket in the connected state in that case.
1243 pending->sk_state = SS_CONNECTED;
1245 vsock_insert_connected(vpending);
1247 /* Notify our peer of our attach. */
1248 err = vmci_transport_send_attach(pending, handle);
1250 vsock_remove_connected(vpending);
1251 pr_err("Could not send attach\n");
1252 vmci_transport_send_reset(pending, pkt);
1253 err = vmci_transport_error_to_vsock_error(err);
1258 /* We have a connection. Move the now connected socket from the
1259 * listener's pending list to the accept queue so callers of accept()
1262 vsock_remove_pending(listener, pending);
1263 vsock_enqueue_accept(listener, pending);
1265 /* Callers of accept() will be be waiting on the listening socket, not
1266 * the pending socket.
1268 listener->sk_data_ready(listener);
1273 pending->sk_err = skerr;
1274 pending->sk_state = SS_UNCONNECTED;
1275 /* As long as we drop our reference, all necessary cleanup will handle
1276 * when the cleanup function drops its reference and our destruct
1277 * implementation is called. Note that since the listen handler will
1278 * remove pending from the pending list upon our failure, the cleanup
1279 * function won't drop the additional reference, which is why we do it
1288 vmci_transport_recv_connecting_client(struct sock *sk,
1289 struct vmci_transport_packet *pkt)
1291 struct vsock_sock *vsk;
1297 switch (pkt->type) {
1298 case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
1299 if (vmci_handle_is_invalid(pkt->u.handle) ||
1300 !vmci_handle_is_equal(pkt->u.handle,
1301 vmci_trans(vsk)->qp_handle)) {
1307 /* Signify the socket is connected and wakeup the waiter in
1308 * connect(). Also place the socket in the connected table for
1309 * accounting (it can already be found since it's in the bound
1312 sk->sk_state = SS_CONNECTED;
1313 sk->sk_socket->state = SS_CONNECTED;
1314 vsock_insert_connected(vsk);
1315 sk->sk_state_change(sk);
1318 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
1319 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
1320 if (pkt->u.size == 0
1321 || pkt->dg.src.context != vsk->remote_addr.svm_cid
1322 || pkt->src_port != vsk->remote_addr.svm_port
1323 || !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)
1324 || vmci_trans(vsk)->qpair
1325 || vmci_trans(vsk)->produce_size != 0
1326 || vmci_trans(vsk)->consume_size != 0
1327 || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
1334 err = vmci_transport_recv_connecting_client_negotiate(sk, pkt);
1341 case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
1342 err = vmci_transport_recv_connecting_client_invalid(sk, pkt);
1349 case VMCI_TRANSPORT_PACKET_TYPE_RST:
1350 /* Older versions of the linux code (WS 6.5 / ESX 4.0) used to
1351 * continue processing here after they sent an INVALID packet.
1352 * This meant that we got a RST after the INVALID. We ignore a
1353 * RST after an INVALID. The common code doesn't send the RST
1354 * ... so we can hang if an old version of the common code
1355 * fails between getting a REQUEST and sending an OFFER back.
1356 * Not much we can do about it... except hope that it doesn't
1359 if (vsk->ignore_connecting_rst) {
1360 vsk->ignore_connecting_rst = false;
1369 /* Close and cleanup the connection. */
1378 vmci_transport_send_reset(sk, pkt);
1380 sk->sk_state = SS_UNCONNECTED;
1382 sk->sk_error_report(sk);
1386 static int vmci_transport_recv_connecting_client_negotiate(
1388 struct vmci_transport_packet *pkt)
1391 struct vsock_sock *vsk;
1392 struct vmci_handle handle;
1393 struct vmci_qp *qpair;
1397 bool old_proto = true;
1402 handle = VMCI_INVALID_HANDLE;
1403 detach_sub_id = VMCI_INVALID_ID;
1405 /* If we have gotten here then we should be past the point where old
1406 * linux vsock could have sent the bogus rst.
1408 vsk->sent_request = false;
1409 vsk->ignore_connecting_rst = false;
1411 /* Verify that we're OK with the proposed queue pair size */
1412 if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size ||
1413 pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) {
1418 /* At this point we know the CID the peer is using to talk to us. */
1420 if (vsk->local_addr.svm_cid == VMADDR_CID_ANY)
1421 vsk->local_addr.svm_cid = pkt->dg.dst.context;
1423 /* Setup the notify ops to be the highest supported version that both
1424 * the server and the client support.
1427 if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1428 old_proto = old_pkt_proto;
1430 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE)
1432 else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2)
1438 version = VSOCK_PROTO_INVALID;
1440 version = pkt->proto;
1442 if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) {
1447 /* Subscribe to detach events first.
1449 * XXX We attach once for each queue pair created for now so it is easy
1450 * to find the socket (it's provided), but later we should only
1451 * subscribe once and add a way to lookup sockets by queue pair handle.
1453 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1454 vmci_transport_peer_detach_cb,
1455 vmci_trans(vsk), &detach_sub_id);
1456 if (err < VMCI_SUCCESS) {
1457 err = vmci_transport_error_to_vsock_error(err);
1461 /* Make VMCI select the handle for us. */
1462 handle = VMCI_INVALID_HANDLE;
1463 is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid;
1464 flags = is_local ? VMCI_QPFLAG_LOCAL : 0;
1466 err = vmci_transport_queue_pair_alloc(&qpair,
1470 vsk->remote_addr.svm_cid,
1472 vmci_transport_is_trusted(
1475 remote_addr.svm_cid));
1479 err = vmci_transport_send_qp_offer(sk, handle);
1481 err = vmci_transport_error_to_vsock_error(err);
1485 vmci_trans(vsk)->qp_handle = handle;
1486 vmci_trans(vsk)->qpair = qpair;
1488 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
1491 vmci_trans(vsk)->detach_sub_id = detach_sub_id;
1493 vmci_trans(vsk)->notify_ops->process_negotiate(sk);
1498 if (detach_sub_id != VMCI_INVALID_ID)
1499 vmci_event_unsubscribe(detach_sub_id);
1501 if (!vmci_handle_is_invalid(handle))
1502 vmci_qpair_detach(&qpair);
1508 vmci_transport_recv_connecting_client_invalid(struct sock *sk,
1509 struct vmci_transport_packet *pkt)
1512 struct vsock_sock *vsk = vsock_sk(sk);
1514 if (vsk->sent_request) {
1515 vsk->sent_request = false;
1516 vsk->ignore_connecting_rst = true;
1518 err = vmci_transport_send_conn_request(
1519 sk, vmci_trans(vsk)->queue_pair_size);
1521 err = vmci_transport_error_to_vsock_error(err);
1530 static int vmci_transport_recv_connected(struct sock *sk,
1531 struct vmci_transport_packet *pkt)
1533 struct vsock_sock *vsk;
1534 bool pkt_processed = false;
1536 /* In cases where we are closing the connection, it's sufficient to
1537 * mark the state change (and maybe error) and wake up any waiting
1538 * threads. Since this is a connected socket, it's owned by a user
1539 * process and will be cleaned up when the failure is passed back on
1540 * the current or next system call. Our system call implementations
1541 * must therefore check for error and state changes on entry and when
1544 switch (pkt->type) {
1545 case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
1549 vsk->peer_shutdown |= pkt->u.mode;
1550 sk->sk_state_change(sk);
1554 case VMCI_TRANSPORT_PACKET_TYPE_RST:
1556 /* It is possible that we sent our peer a message (e.g a
1557 * WAITING_READ) right before we got notified that the peer had
1558 * detached. If that happens then we can get a RST pkt back
1559 * from our peer even though there is data available for us to
1560 * read. In that case, don't shutdown the socket completely but
1561 * instead allow the local client to finish reading data off
1562 * the queuepair. Always treat a RST pkt in connected mode like
1565 sock_set_flag(sk, SOCK_DONE);
1566 vsk->peer_shutdown = SHUTDOWN_MASK;
1567 if (vsock_stream_has_data(vsk) <= 0)
1568 sk->sk_state = SS_DISCONNECTING;
1570 sk->sk_state_change(sk);
1575 vmci_trans(vsk)->notify_ops->handle_notify_pkt(
1576 sk, pkt, false, NULL, NULL,
1587 static int vmci_transport_socket_init(struct vsock_sock *vsk,
1588 struct vsock_sock *psk)
1590 vsk->trans = kmalloc(sizeof(struct vmci_transport), GFP_KERNEL);
1594 vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1595 vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
1596 vmci_trans(vsk)->qpair = NULL;
1597 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
1598 vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
1599 vmci_trans(vsk)->notify_ops = NULL;
1600 INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
1601 vmci_trans(vsk)->sk = &vsk->sk;
1602 spin_lock_init(&vmci_trans(vsk)->lock);
1604 vmci_trans(vsk)->queue_pair_size =
1605 vmci_trans(psk)->queue_pair_size;
1606 vmci_trans(vsk)->queue_pair_min_size =
1607 vmci_trans(psk)->queue_pair_min_size;
1608 vmci_trans(vsk)->queue_pair_max_size =
1609 vmci_trans(psk)->queue_pair_max_size;
1611 vmci_trans(vsk)->queue_pair_size =
1612 VMCI_TRANSPORT_DEFAULT_QP_SIZE;
1613 vmci_trans(vsk)->queue_pair_min_size =
1614 VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN;
1615 vmci_trans(vsk)->queue_pair_max_size =
1616 VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX;
1622 static void vmci_transport_free_resources(struct list_head *transport_list)
1624 while (!list_empty(transport_list)) {
1625 struct vmci_transport *transport =
1626 list_first_entry(transport_list, struct vmci_transport,
1628 list_del(&transport->elem);
1630 if (transport->detach_sub_id != VMCI_INVALID_ID) {
1631 vmci_event_unsubscribe(transport->detach_sub_id);
1632 transport->detach_sub_id = VMCI_INVALID_ID;
1635 if (!vmci_handle_is_invalid(transport->qp_handle)) {
1636 vmci_qpair_detach(&transport->qpair);
1637 transport->qp_handle = VMCI_INVALID_HANDLE;
1638 transport->produce_size = 0;
1639 transport->consume_size = 0;
1646 static void vmci_transport_cleanup(struct work_struct *work)
1650 spin_lock_bh(&vmci_transport_cleanup_lock);
1651 list_replace_init(&vmci_transport_cleanup_list, &pending);
1652 spin_unlock_bh(&vmci_transport_cleanup_lock);
1653 vmci_transport_free_resources(&pending);
1656 static void vmci_transport_destruct(struct vsock_sock *vsk)
1658 /* transport can be NULL if we hit a failure at init() time */
1659 if (!vmci_trans(vsk))
1662 /* Ensure that the detach callback doesn't use the sk/vsk
1663 * we are about to destruct.
1665 spin_lock_bh(&vmci_trans(vsk)->lock);
1666 vmci_trans(vsk)->sk = NULL;
1667 spin_unlock_bh(&vmci_trans(vsk)->lock);
1669 if (vmci_trans(vsk)->notify_ops)
1670 vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
1672 spin_lock_bh(&vmci_transport_cleanup_lock);
1673 list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
1674 spin_unlock_bh(&vmci_transport_cleanup_lock);
1675 schedule_work(&vmci_transport_cleanup_work);
1680 static void vmci_transport_release(struct vsock_sock *vsk)
1682 vsock_remove_sock(vsk);
1684 if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) {
1685 vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle);
1686 vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1690 static int vmci_transport_dgram_bind(struct vsock_sock *vsk,
1691 struct sockaddr_vm *addr)
1697 /* VMCI will select a resource ID for us if we provide
1700 port = addr->svm_port == VMADDR_PORT_ANY ?
1701 VMCI_INVALID_ID : addr->svm_port;
1703 if (port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE))
1706 flags = addr->svm_cid == VMADDR_CID_ANY ?
1707 VMCI_FLAG_ANYCID_DG_HND : 0;
1709 err = vmci_transport_datagram_create_hnd(port, flags,
1710 vmci_transport_recv_dgram_cb,
1712 &vmci_trans(vsk)->dg_handle);
1713 if (err < VMCI_SUCCESS)
1714 return vmci_transport_error_to_vsock_error(err);
1715 vsock_addr_init(&vsk->local_addr, addr->svm_cid,
1716 vmci_trans(vsk)->dg_handle.resource);
1721 static int vmci_transport_dgram_enqueue(
1722 struct vsock_sock *vsk,
1723 struct sockaddr_vm *remote_addr,
1728 struct vmci_datagram *dg;
1730 if (len > VMCI_MAX_DG_PAYLOAD_SIZE)
1733 if (!vmci_transport_allow_dgram(vsk, remote_addr->svm_cid))
1736 /* Allocate a buffer for the user's message and our packet header. */
1737 dg = kmalloc(len + sizeof(*dg), GFP_KERNEL);
1741 memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
1743 dg->dst = vmci_make_handle(remote_addr->svm_cid,
1744 remote_addr->svm_port);
1745 dg->src = vmci_make_handle(vsk->local_addr.svm_cid,
1746 vsk->local_addr.svm_port);
1747 dg->payload_size = len;
1749 err = vmci_datagram_send(dg);
1752 return vmci_transport_error_to_vsock_error(err);
1754 return err - sizeof(*dg);
1757 static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
1758 struct msghdr *msg, size_t len,
1763 struct vmci_datagram *dg;
1765 struct sk_buff *skb;
1767 noblock = flags & MSG_DONTWAIT;
1769 if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1772 /* Retrieve the head sk_buff from the socket's receive queue. */
1774 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
1778 dg = (struct vmci_datagram *)skb->data;
1780 /* err is 0, meaning we read zero bytes. */
1783 payload_len = dg->payload_size;
1784 /* Ensure the sk_buff matches the payload size claimed in the packet. */
1785 if (payload_len != skb->len - sizeof(*dg)) {
1790 if (payload_len > len) {
1792 msg->msg_flags |= MSG_TRUNC;
1795 /* Place the datagram payload in the user's iovec. */
1796 err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len);
1800 if (msg->msg_name) {
1801 /* Provide the address of the sender. */
1802 DECLARE_SOCKADDR(struct sockaddr_vm *, vm_addr, msg->msg_name);
1803 vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
1804 msg->msg_namelen = sizeof(*vm_addr);
1809 skb_free_datagram(&vsk->sk, skb);
1813 static bool vmci_transport_dgram_allow(u32 cid, u32 port)
1815 if (cid == VMADDR_CID_HYPERVISOR) {
1816 /* Registrations of PBRPC Servers do not modify VMX/Hypervisor
1817 * state and are allowed.
1819 return port == VMCI_UNITY_PBRPC_REGISTER;
1825 static int vmci_transport_connect(struct vsock_sock *vsk)
1828 bool old_pkt_proto = false;
1829 struct sock *sk = &vsk->sk;
1831 if (vmci_transport_old_proto_override(&old_pkt_proto) &&
1833 err = vmci_transport_send_conn_request(
1834 sk, vmci_trans(vsk)->queue_pair_size);
1836 sk->sk_state = SS_UNCONNECTED;
1840 int supported_proto_versions =
1841 vmci_transport_new_proto_supported_versions();
1842 err = vmci_transport_send_conn_request2(
1843 sk, vmci_trans(vsk)->queue_pair_size,
1844 supported_proto_versions);
1846 sk->sk_state = SS_UNCONNECTED;
1850 vsk->sent_request = true;
1856 static ssize_t vmci_transport_stream_dequeue(
1857 struct vsock_sock *vsk,
1862 if (flags & MSG_PEEK)
1863 return vmci_qpair_peekv(vmci_trans(vsk)->qpair, msg, len, 0);
1865 return vmci_qpair_dequev(vmci_trans(vsk)->qpair, msg, len, 0);
1868 static ssize_t vmci_transport_stream_enqueue(
1869 struct vsock_sock *vsk,
1873 return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
1876 static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
1878 return vmci_qpair_consume_buf_ready(vmci_trans(vsk)->qpair);
1881 static s64 vmci_transport_stream_has_space(struct vsock_sock *vsk)
1883 return vmci_qpair_produce_free_space(vmci_trans(vsk)->qpair);
1886 static u64 vmci_transport_stream_rcvhiwat(struct vsock_sock *vsk)
1888 return vmci_trans(vsk)->consume_size;
1891 static bool vmci_transport_stream_is_active(struct vsock_sock *vsk)
1893 return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle);
1896 static u64 vmci_transport_get_buffer_size(struct vsock_sock *vsk)
1898 return vmci_trans(vsk)->queue_pair_size;
1901 static u64 vmci_transport_get_min_buffer_size(struct vsock_sock *vsk)
1903 return vmci_trans(vsk)->queue_pair_min_size;
1906 static u64 vmci_transport_get_max_buffer_size(struct vsock_sock *vsk)
1908 return vmci_trans(vsk)->queue_pair_max_size;
1911 static void vmci_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
1913 if (val < vmci_trans(vsk)->queue_pair_min_size)
1914 vmci_trans(vsk)->queue_pair_min_size = val;
1915 if (val > vmci_trans(vsk)->queue_pair_max_size)
1916 vmci_trans(vsk)->queue_pair_max_size = val;
1917 vmci_trans(vsk)->queue_pair_size = val;
1920 static void vmci_transport_set_min_buffer_size(struct vsock_sock *vsk,
1923 if (val > vmci_trans(vsk)->queue_pair_size)
1924 vmci_trans(vsk)->queue_pair_size = val;
1925 vmci_trans(vsk)->queue_pair_min_size = val;
1928 static void vmci_transport_set_max_buffer_size(struct vsock_sock *vsk,
1931 if (val < vmci_trans(vsk)->queue_pair_size)
1932 vmci_trans(vsk)->queue_pair_size = val;
1933 vmci_trans(vsk)->queue_pair_max_size = val;
1936 static int vmci_transport_notify_poll_in(
1937 struct vsock_sock *vsk,
1939 bool *data_ready_now)
1941 return vmci_trans(vsk)->notify_ops->poll_in(
1942 &vsk->sk, target, data_ready_now);
1945 static int vmci_transport_notify_poll_out(
1946 struct vsock_sock *vsk,
1948 bool *space_available_now)
1950 return vmci_trans(vsk)->notify_ops->poll_out(
1951 &vsk->sk, target, space_available_now);
1954 static int vmci_transport_notify_recv_init(
1955 struct vsock_sock *vsk,
1957 struct vsock_transport_recv_notify_data *data)
1959 return vmci_trans(vsk)->notify_ops->recv_init(
1961 (struct vmci_transport_recv_notify_data *)data);
1964 static int vmci_transport_notify_recv_pre_block(
1965 struct vsock_sock *vsk,
1967 struct vsock_transport_recv_notify_data *data)
1969 return vmci_trans(vsk)->notify_ops->recv_pre_block(
1971 (struct vmci_transport_recv_notify_data *)data);
1974 static int vmci_transport_notify_recv_pre_dequeue(
1975 struct vsock_sock *vsk,
1977 struct vsock_transport_recv_notify_data *data)
1979 return vmci_trans(vsk)->notify_ops->recv_pre_dequeue(
1981 (struct vmci_transport_recv_notify_data *)data);
1984 static int vmci_transport_notify_recv_post_dequeue(
1985 struct vsock_sock *vsk,
1989 struct vsock_transport_recv_notify_data *data)
1991 return vmci_trans(vsk)->notify_ops->recv_post_dequeue(
1992 &vsk->sk, target, copied, data_read,
1993 (struct vmci_transport_recv_notify_data *)data);
1996 static int vmci_transport_notify_send_init(
1997 struct vsock_sock *vsk,
1998 struct vsock_transport_send_notify_data *data)
2000 return vmci_trans(vsk)->notify_ops->send_init(
2002 (struct vmci_transport_send_notify_data *)data);
2005 static int vmci_transport_notify_send_pre_block(
2006 struct vsock_sock *vsk,
2007 struct vsock_transport_send_notify_data *data)
2009 return vmci_trans(vsk)->notify_ops->send_pre_block(
2011 (struct vmci_transport_send_notify_data *)data);
2014 static int vmci_transport_notify_send_pre_enqueue(
2015 struct vsock_sock *vsk,
2016 struct vsock_transport_send_notify_data *data)
2018 return vmci_trans(vsk)->notify_ops->send_pre_enqueue(
2020 (struct vmci_transport_send_notify_data *)data);
2023 static int vmci_transport_notify_send_post_enqueue(
2024 struct vsock_sock *vsk,
2026 struct vsock_transport_send_notify_data *data)
2028 return vmci_trans(vsk)->notify_ops->send_post_enqueue(
2030 (struct vmci_transport_send_notify_data *)data);
2033 static bool vmci_transport_old_proto_override(bool *old_pkt_proto)
2035 if (PROTOCOL_OVERRIDE != -1) {
2036 if (PROTOCOL_OVERRIDE == 0)
2037 *old_pkt_proto = true;
2039 *old_pkt_proto = false;
2041 pr_info("Proto override in use\n");
2048 static bool vmci_transport_proto_to_notify_struct(struct sock *sk,
2052 struct vsock_sock *vsk = vsock_sk(sk);
2054 if (old_pkt_proto) {
2055 if (*proto != VSOCK_PROTO_INVALID) {
2056 pr_err("Can't set both an old and new protocol\n");
2059 vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_ops;
2064 case VSOCK_PROTO_PKT_ON_NOTIFY:
2065 vmci_trans(vsk)->notify_ops =
2066 &vmci_transport_notify_pkt_q_state_ops;
2069 pr_err("Unknown notify protocol version\n");
2074 vmci_trans(vsk)->notify_ops->socket_init(sk);
2078 static u16 vmci_transport_new_proto_supported_versions(void)
2080 if (PROTOCOL_OVERRIDE != -1)
2081 return PROTOCOL_OVERRIDE;
2083 return VSOCK_PROTO_ALL_SUPPORTED;
2086 static u32 vmci_transport_get_local_cid(void)
2088 return vmci_get_context_id();
2091 static const struct vsock_transport vmci_transport = {
2092 .init = vmci_transport_socket_init,
2093 .destruct = vmci_transport_destruct,
2094 .release = vmci_transport_release,
2095 .connect = vmci_transport_connect,
2096 .dgram_bind = vmci_transport_dgram_bind,
2097 .dgram_dequeue = vmci_transport_dgram_dequeue,
2098 .dgram_enqueue = vmci_transport_dgram_enqueue,
2099 .dgram_allow = vmci_transport_dgram_allow,
2100 .stream_dequeue = vmci_transport_stream_dequeue,
2101 .stream_enqueue = vmci_transport_stream_enqueue,
2102 .stream_has_data = vmci_transport_stream_has_data,
2103 .stream_has_space = vmci_transport_stream_has_space,
2104 .stream_rcvhiwat = vmci_transport_stream_rcvhiwat,
2105 .stream_is_active = vmci_transport_stream_is_active,
2106 .stream_allow = vmci_transport_stream_allow,
2107 .notify_poll_in = vmci_transport_notify_poll_in,
2108 .notify_poll_out = vmci_transport_notify_poll_out,
2109 .notify_recv_init = vmci_transport_notify_recv_init,
2110 .notify_recv_pre_block = vmci_transport_notify_recv_pre_block,
2111 .notify_recv_pre_dequeue = vmci_transport_notify_recv_pre_dequeue,
2112 .notify_recv_post_dequeue = vmci_transport_notify_recv_post_dequeue,
2113 .notify_send_init = vmci_transport_notify_send_init,
2114 .notify_send_pre_block = vmci_transport_notify_send_pre_block,
2115 .notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue,
2116 .notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue,
2117 .shutdown = vmci_transport_shutdown,
2118 .set_buffer_size = vmci_transport_set_buffer_size,
2119 .set_min_buffer_size = vmci_transport_set_min_buffer_size,
2120 .set_max_buffer_size = vmci_transport_set_max_buffer_size,
2121 .get_buffer_size = vmci_transport_get_buffer_size,
2122 .get_min_buffer_size = vmci_transport_get_min_buffer_size,
2123 .get_max_buffer_size = vmci_transport_get_max_buffer_size,
2124 .get_local_cid = vmci_transport_get_local_cid,
2127 static int __init vmci_transport_init(void)
2131 /* Create the datagram handle that we will use to send and receive all
2132 * VSocket control messages for this context.
2134 err = vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID,
2135 VMCI_FLAG_ANYCID_DG_HND,
2136 vmci_transport_recv_stream_cb,
2138 &vmci_transport_stream_handle);
2139 if (err < VMCI_SUCCESS) {
2140 pr_err("Unable to create datagram handle. (%d)\n", err);
2141 return vmci_transport_error_to_vsock_error(err);
2144 err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED,
2145 vmci_transport_qp_resumed_cb,
2146 NULL, &vmci_transport_qp_resumed_sub_id);
2147 if (err < VMCI_SUCCESS) {
2148 pr_err("Unable to subscribe to resumed event. (%d)\n", err);
2149 err = vmci_transport_error_to_vsock_error(err);
2150 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2151 goto err_destroy_stream_handle;
2154 err = vsock_core_init(&vmci_transport);
2156 goto err_unsubscribe;
2161 vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2162 err_destroy_stream_handle:
2163 vmci_datagram_destroy_handle(vmci_transport_stream_handle);
2166 module_init(vmci_transport_init);
2168 static void __exit vmci_transport_exit(void)
2170 cancel_work_sync(&vmci_transport_cleanup_work);
2171 vmci_transport_free_resources(&vmci_transport_cleanup_list);
2173 if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
2174 if (vmci_datagram_destroy_handle(
2175 vmci_transport_stream_handle) != VMCI_SUCCESS)
2176 pr_err("Couldn't destroy datagram handle\n");
2177 vmci_transport_stream_handle = VMCI_INVALID_HANDLE;
2180 if (vmci_transport_qp_resumed_sub_id != VMCI_INVALID_ID) {
2181 vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2182 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2187 module_exit(vmci_transport_exit);
2189 MODULE_AUTHOR("VMware, Inc.");
2190 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2191 MODULE_VERSION("1.0.4.0-k");
2192 MODULE_LICENSE("GPL v2");
2193 MODULE_ALIAS("vmware_vsock");
2194 MODULE_ALIAS_NETPROTO(PF_VSOCK);