2 * vhost transport for vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
18 #include <linux/hashtable.h>
20 #include <net/af_vsock.h>
23 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
24 /* Max number of bytes transferred before requeueing the job.
25 * Using this limit prevents one virtqueue from starving others. */
26 #define VHOST_VSOCK_WEIGHT 0x80000
27 /* Max number of packets transferred before requeueing the job.
28 * Using this limit prevents one virtqueue from starving others with
31 #define VHOST_VSOCK_PKT_WEIGHT 256
34 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
37 /* Used to track all the vhost_vsock instances on the system. */
38 static DEFINE_SPINLOCK(vhost_vsock_lock);
39 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
43 struct vhost_virtqueue vqs[2];
45 /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
46 struct hlist_node hash;
48 struct vhost_work send_pkt_work;
49 spinlock_t send_pkt_list_lock;
50 struct list_head send_pkt_list; /* host->guest pending packets */
52 atomic_t queued_replies;
57 static u32 vhost_transport_get_local_cid(void)
59 return VHOST_VSOCK_DEFAULT_HOST_CID;
62 /* Callers that dereference the return value must hold vhost_vsock_lock or the
65 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
67 struct vhost_vsock *vsock;
69 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
70 u32 other_cid = vsock->guest_cid;
72 /* Skip instances that have no CID yet */
76 if (other_cid == guest_cid) {
85 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
86 struct vhost_virtqueue *vq)
88 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
90 bool restart_tx = false;
92 mutex_lock(&vq->mutex);
94 if (!vq->private_data)
97 /* Avoid further vmexits, we're already processing the virtqueue */
98 vhost_disable_notify(&vsock->dev, vq);
101 struct virtio_vsock_pkt *pkt;
102 struct iov_iter iov_iter;
108 spin_lock_bh(&vsock->send_pkt_list_lock);
109 if (list_empty(&vsock->send_pkt_list)) {
110 spin_unlock_bh(&vsock->send_pkt_list_lock);
111 vhost_enable_notify(&vsock->dev, vq);
115 pkt = list_first_entry(&vsock->send_pkt_list,
116 struct virtio_vsock_pkt, list);
117 list_del_init(&pkt->list);
118 spin_unlock_bh(&vsock->send_pkt_list_lock);
120 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
121 &out, &in, NULL, NULL);
123 spin_lock_bh(&vsock->send_pkt_list_lock);
124 list_add(&pkt->list, &vsock->send_pkt_list);
125 spin_unlock_bh(&vsock->send_pkt_list_lock);
129 if (head == vq->num) {
130 spin_lock_bh(&vsock->send_pkt_list_lock);
131 list_add(&pkt->list, &vsock->send_pkt_list);
132 spin_unlock_bh(&vsock->send_pkt_list_lock);
134 /* We cannot finish yet if more buffers snuck in while
135 * re-enabling notify.
137 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
138 vhost_disable_notify(&vsock->dev, vq);
145 virtio_transport_free_pkt(pkt);
146 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
150 len = iov_length(&vq->iov[out], in);
151 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
153 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
154 if (nbytes != sizeof(pkt->hdr)) {
155 virtio_transport_free_pkt(pkt);
156 vq_err(vq, "Faulted on copying pkt hdr\n");
160 nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
161 if (nbytes != pkt->len) {
162 virtio_transport_free_pkt(pkt);
163 vq_err(vq, "Faulted on copying pkt buf\n");
167 vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
173 val = atomic_dec_return(&vsock->queued_replies);
175 /* Do we have resources to resume tx processing? */
176 if (val + 1 == tx_vq->num)
180 virtio_transport_free_pkt(pkt);
183 vhost_signal(&vsock->dev, vq);
186 mutex_unlock(&vq->mutex);
189 vhost_poll_queue(&tx_vq->poll);
192 static void vhost_transport_send_pkt_work(struct vhost_work *work)
194 struct vhost_virtqueue *vq;
195 struct vhost_vsock *vsock;
197 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
198 vq = &vsock->vqs[VSOCK_VQ_RX];
200 vhost_transport_do_send_pkt(vsock, vq);
204 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
206 struct vhost_vsock *vsock;
207 struct vhost_virtqueue *vq;
212 /* Find the vhost_vsock according to guest context id */
213 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
216 virtio_transport_free_pkt(pkt);
220 vq = &vsock->vqs[VSOCK_VQ_RX];
223 atomic_inc(&vsock->queued_replies);
225 spin_lock_bh(&vsock->send_pkt_list_lock);
226 list_add_tail(&pkt->list, &vsock->send_pkt_list);
227 spin_unlock_bh(&vsock->send_pkt_list_lock);
229 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
236 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
238 struct vhost_vsock *vsock;
239 struct virtio_vsock_pkt *pkt, *n;
246 /* Find the vhost_vsock according to guest context id */
247 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
251 spin_lock_bh(&vsock->send_pkt_list_lock);
252 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
255 list_move(&pkt->list, &freeme);
257 spin_unlock_bh(&vsock->send_pkt_list_lock);
259 list_for_each_entry_safe(pkt, n, &freeme, list) {
262 list_del(&pkt->list);
263 virtio_transport_free_pkt(pkt);
267 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
270 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
271 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
272 vhost_poll_queue(&tx_vq->poll);
281 static struct virtio_vsock_pkt *
282 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
283 unsigned int out, unsigned int in)
285 struct virtio_vsock_pkt *pkt;
286 struct iov_iter iov_iter;
291 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
295 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
299 len = iov_length(vq->iov, out);
300 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
302 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
303 if (nbytes != sizeof(pkt->hdr)) {
304 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
305 sizeof(pkt->hdr), nbytes);
310 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
311 pkt->len = le32_to_cpu(pkt->hdr.len);
317 /* The pkt is too big */
318 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
323 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
329 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
330 if (nbytes != pkt->len) {
331 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
333 virtio_transport_free_pkt(pkt);
340 /* Is there space left for replies to rx packets? */
341 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
343 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
346 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
347 val = atomic_read(&vsock->queued_replies);
349 return val < vq->num;
352 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
354 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
356 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
358 struct virtio_vsock_pkt *pkt;
360 unsigned int out, in;
363 mutex_lock(&vq->mutex);
365 if (!vq->private_data)
368 vhost_disable_notify(&vsock->dev, vq);
372 if (!vhost_vsock_more_replies(vsock)) {
373 /* Stop tx until the device processes already
374 * pending replies. Leave tx virtqueue
375 * callbacks disabled.
377 goto no_more_replies;
380 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
381 &out, &in, NULL, NULL);
385 if (head == vq->num) {
386 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
387 vhost_disable_notify(&vsock->dev, vq);
393 pkt = vhost_vsock_alloc_pkt(vq, out, in);
395 vq_err(vq, "Faulted on pkt\n");
401 /* Only accept correctly addressed packets */
402 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
403 le64_to_cpu(pkt->hdr.dst_cid) ==
404 vhost_transport_get_local_cid())
405 virtio_transport_recv_pkt(pkt);
407 virtio_transport_free_pkt(pkt);
409 vhost_add_used(vq, head, 0);
415 vhost_signal(&vsock->dev, vq);
418 mutex_unlock(&vq->mutex);
421 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
423 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
425 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
428 vhost_transport_do_send_pkt(vsock, vq);
431 static int vhost_vsock_start(struct vhost_vsock *vsock)
433 struct vhost_virtqueue *vq;
437 mutex_lock(&vsock->dev.mutex);
439 ret = vhost_dev_check_owner(&vsock->dev);
443 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
446 mutex_lock(&vq->mutex);
448 if (!vhost_vq_access_ok(vq)) {
453 if (!vq->private_data) {
454 vq->private_data = vsock;
455 ret = vhost_vq_init_access(vq);
460 mutex_unlock(&vq->mutex);
463 /* Some packets may have been queued before the device was started,
464 * let's kick the send worker to send them.
466 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
468 mutex_unlock(&vsock->dev.mutex);
472 vq->private_data = NULL;
473 mutex_unlock(&vq->mutex);
475 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
478 mutex_lock(&vq->mutex);
479 vq->private_data = NULL;
480 mutex_unlock(&vq->mutex);
483 mutex_unlock(&vsock->dev.mutex);
487 static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
492 mutex_lock(&vsock->dev.mutex);
495 ret = vhost_dev_check_owner(&vsock->dev);
500 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
501 struct vhost_virtqueue *vq = &vsock->vqs[i];
503 mutex_lock(&vq->mutex);
504 vq->private_data = NULL;
505 mutex_unlock(&vq->mutex);
509 mutex_unlock(&vsock->dev.mutex);
513 static void vhost_vsock_free(struct vhost_vsock *vsock)
518 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
520 struct vhost_virtqueue **vqs;
521 struct vhost_vsock *vsock;
524 /* This struct is large and allocation could fail, fall back to vmalloc
525 * if there is no other way.
527 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
529 vsock = vmalloc(sizeof(*vsock));
534 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
540 vsock->guest_cid = 0; /* no CID assigned yet */
542 atomic_set(&vsock->queued_replies, 0);
544 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
545 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
546 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
547 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
549 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
550 VHOST_VSOCK_PKT_WEIGHT,
553 file->private_data = vsock;
554 spin_lock_init(&vsock->send_pkt_list_lock);
555 INIT_LIST_HEAD(&vsock->send_pkt_list);
556 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
560 vhost_vsock_free(vsock);
564 static void vhost_vsock_flush(struct vhost_vsock *vsock)
568 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
569 if (vsock->vqs[i].handle_kick)
570 vhost_poll_flush(&vsock->vqs[i].poll);
571 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
574 static void vhost_vsock_reset_orphans(struct sock *sk)
576 struct vsock_sock *vsk = vsock_sk(sk);
578 /* vmci_transport.c doesn't take sk_lock here either. At least we're
579 * under vsock_table_lock so the sock cannot disappear while we're
583 /* If the peer is still valid, no need to reset connection */
584 if (vhost_vsock_get(vsk->remote_addr.svm_cid))
587 /* If the close timeout is pending, let it expire. This avoids races
588 * with the timeout callback.
590 if (vsk->close_work_scheduled)
593 sock_set_flag(sk, SOCK_DONE);
594 vsk->peer_shutdown = SHUTDOWN_MASK;
595 sk->sk_state = SS_UNCONNECTED;
596 sk->sk_err = ECONNRESET;
597 sk->sk_error_report(sk);
600 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
602 struct vhost_vsock *vsock = file->private_data;
604 spin_lock_bh(&vhost_vsock_lock);
605 if (vsock->guest_cid)
606 hash_del_rcu(&vsock->hash);
607 spin_unlock_bh(&vhost_vsock_lock);
609 /* Wait for other CPUs to finish using vsock */
612 /* Iterating over all connections for all CIDs to find orphans is
613 * inefficient. Room for improvement here. */
614 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
616 /* Don't check the owner, because we are in the release path, so we
617 * need to stop the vsock device in any case.
618 * vhost_vsock_stop() can not fail in this case, so we don't need to
619 * check the return code.
621 vhost_vsock_stop(vsock, false);
622 vhost_vsock_flush(vsock);
623 vhost_dev_stop(&vsock->dev);
625 spin_lock_bh(&vsock->send_pkt_list_lock);
626 while (!list_empty(&vsock->send_pkt_list)) {
627 struct virtio_vsock_pkt *pkt;
629 pkt = list_first_entry(&vsock->send_pkt_list,
630 struct virtio_vsock_pkt, list);
631 list_del_init(&pkt->list);
632 virtio_transport_free_pkt(pkt);
634 spin_unlock_bh(&vsock->send_pkt_list_lock);
636 vhost_dev_cleanup(&vsock->dev, false);
637 kfree(vsock->dev.vqs);
638 vhost_vsock_free(vsock);
642 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
644 struct vhost_vsock *other;
646 /* Refuse reserved CIDs */
647 if (guest_cid <= VMADDR_CID_HOST ||
648 guest_cid == U32_MAX)
651 /* 64-bit CIDs are not yet supported */
652 if (guest_cid > U32_MAX)
655 /* Refuse if CID is already in use */
656 spin_lock_bh(&vhost_vsock_lock);
657 other = vhost_vsock_get(guest_cid);
658 if (other && other != vsock) {
659 spin_unlock_bh(&vhost_vsock_lock);
663 if (vsock->guest_cid)
664 hash_del_rcu(&vsock->hash);
666 vsock->guest_cid = guest_cid;
667 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
668 spin_unlock_bh(&vhost_vsock_lock);
673 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
675 struct vhost_virtqueue *vq;
678 if (features & ~VHOST_VSOCK_FEATURES)
681 mutex_lock(&vsock->dev.mutex);
682 if ((features & (1 << VHOST_F_LOG_ALL)) &&
683 !vhost_log_access_ok(&vsock->dev)) {
684 mutex_unlock(&vsock->dev.mutex);
688 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
690 mutex_lock(&vq->mutex);
691 vq->acked_features = features;
692 mutex_unlock(&vq->mutex);
694 mutex_unlock(&vsock->dev.mutex);
698 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
701 struct vhost_vsock *vsock = f->private_data;
702 void __user *argp = (void __user *)arg;
709 case VHOST_VSOCK_SET_GUEST_CID:
710 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
712 return vhost_vsock_set_cid(vsock, guest_cid);
713 case VHOST_VSOCK_SET_RUNNING:
714 if (copy_from_user(&start, argp, sizeof(start)))
717 return vhost_vsock_start(vsock);
719 return vhost_vsock_stop(vsock, true);
720 case VHOST_GET_FEATURES:
721 features = VHOST_VSOCK_FEATURES;
722 if (copy_to_user(argp, &features, sizeof(features)))
725 case VHOST_SET_FEATURES:
726 if (copy_from_user(&features, argp, sizeof(features)))
728 return vhost_vsock_set_features(vsock, features);
730 mutex_lock(&vsock->dev.mutex);
731 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
732 if (r == -ENOIOCTLCMD)
733 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
735 vhost_vsock_flush(vsock);
736 mutex_unlock(&vsock->dev.mutex);
741 static const struct file_operations vhost_vsock_fops = {
742 .owner = THIS_MODULE,
743 .open = vhost_vsock_dev_open,
744 .release = vhost_vsock_dev_release,
745 .llseek = noop_llseek,
746 .unlocked_ioctl = vhost_vsock_dev_ioctl,
749 static struct miscdevice vhost_vsock_misc = {
750 .minor = MISC_DYNAMIC_MINOR,
751 .name = "vhost-vsock",
752 .fops = &vhost_vsock_fops,
755 static struct virtio_transport vhost_transport = {
757 .get_local_cid = vhost_transport_get_local_cid,
759 .init = virtio_transport_do_socket_init,
760 .destruct = virtio_transport_destruct,
761 .release = virtio_transport_release,
762 .connect = virtio_transport_connect,
763 .shutdown = virtio_transport_shutdown,
764 .cancel_pkt = vhost_transport_cancel_pkt,
766 .dgram_enqueue = virtio_transport_dgram_enqueue,
767 .dgram_dequeue = virtio_transport_dgram_dequeue,
768 .dgram_bind = virtio_transport_dgram_bind,
769 .dgram_allow = virtio_transport_dgram_allow,
771 .stream_enqueue = virtio_transport_stream_enqueue,
772 .stream_dequeue = virtio_transport_stream_dequeue,
773 .stream_has_data = virtio_transport_stream_has_data,
774 .stream_has_space = virtio_transport_stream_has_space,
775 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
776 .stream_is_active = virtio_transport_stream_is_active,
777 .stream_allow = virtio_transport_stream_allow,
779 .notify_poll_in = virtio_transport_notify_poll_in,
780 .notify_poll_out = virtio_transport_notify_poll_out,
781 .notify_recv_init = virtio_transport_notify_recv_init,
782 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
783 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
784 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
785 .notify_send_init = virtio_transport_notify_send_init,
786 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
787 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
788 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
790 .set_buffer_size = virtio_transport_set_buffer_size,
791 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
792 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
793 .get_buffer_size = virtio_transport_get_buffer_size,
794 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
795 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
798 .send_pkt = vhost_transport_send_pkt,
801 static int __init vhost_vsock_init(void)
805 ret = vsock_core_init(&vhost_transport.transport);
808 return misc_register(&vhost_vsock_misc);
811 static void __exit vhost_vsock_exit(void)
813 misc_deregister(&vhost_vsock_misc);
817 module_init(vhost_vsock_init);
818 module_exit(vhost_vsock_exit);
819 MODULE_LICENSE("GPL v2");
820 MODULE_AUTHOR("Asias He");
821 MODULE_DESCRIPTION("vhost transport for vsock ");