2 * virtio transport for vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
9 * early virtio-vsock proof-of-concept bits.
11 * This work is licensed under the terms of the GNU GPL, version 2.
13 #include <linux/spinlock.h>
14 #include <linux/module.h>
15 #include <linux/list.h>
16 #include <linux/atomic.h>
17 #include <linux/virtio.h>
18 #include <linux/virtio_ids.h>
19 #include <linux/virtio_config.h>
20 #include <linux/virtio_vsock.h>
22 #include <linux/mutex.h>
23 #include <net/af_vsock.h>
25 static struct workqueue_struct *virtio_vsock_workqueue;
26 static struct virtio_vsock *the_virtio_vsock;
27 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
30 struct virtio_device *vdev;
31 struct virtqueue *vqs[VSOCK_VQ_MAX];
33 /* Virtqueue processing is deferred to a workqueue */
34 struct work_struct tx_work;
35 struct work_struct rx_work;
36 struct work_struct event_work;
38 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
39 * must be accessed with tx_lock held.
44 struct work_struct send_pkt_work;
45 spinlock_t send_pkt_list_lock;
46 struct list_head send_pkt_list;
48 struct work_struct loopback_work;
49 spinlock_t loopback_list_lock; /* protects loopback_list */
50 struct list_head loopback_list;
52 atomic_t queued_replies;
54 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
55 * must be accessed with rx_lock held.
62 /* The following fields are protected by event_lock.
63 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
65 struct mutex event_lock;
67 struct virtio_vsock_event event_list[8];
72 static u32 virtio_transport_get_local_cid(void)
74 struct virtio_vsock *vsock;
78 vsock = rcu_dereference(the_virtio_vsock);
84 ret = vsock->guest_cid;
90 static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
91 struct virtio_vsock_pkt *pkt)
95 spin_lock_bh(&vsock->loopback_list_lock);
96 list_add_tail(&pkt->list, &vsock->loopback_list);
97 spin_unlock_bh(&vsock->loopback_list_lock);
99 queue_work(virtio_vsock_workqueue, &vsock->loopback_work);
105 virtio_transport_send_pkt_work(struct work_struct *work)
107 struct virtio_vsock *vsock =
108 container_of(work, struct virtio_vsock, send_pkt_work);
109 struct virtqueue *vq;
111 bool restart_rx = false;
113 mutex_lock(&vsock->tx_lock);
118 vq = vsock->vqs[VSOCK_VQ_TX];
121 struct virtio_vsock_pkt *pkt;
122 struct scatterlist hdr, buf, *sgs[2];
123 int ret, in_sg = 0, out_sg = 0;
126 spin_lock_bh(&vsock->send_pkt_list_lock);
127 if (list_empty(&vsock->send_pkt_list)) {
128 spin_unlock_bh(&vsock->send_pkt_list_lock);
132 pkt = list_first_entry(&vsock->send_pkt_list,
133 struct virtio_vsock_pkt, list);
134 list_del_init(&pkt->list);
135 spin_unlock_bh(&vsock->send_pkt_list_lock);
137 virtio_transport_deliver_tap_pkt(pkt);
141 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
142 sgs[out_sg++] = &hdr;
144 sg_init_one(&buf, pkt->buf, pkt->len);
145 sgs[out_sg++] = &buf;
148 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
149 /* Usually this means that there is no more space available in
153 spin_lock_bh(&vsock->send_pkt_list_lock);
154 list_add(&pkt->list, &vsock->send_pkt_list);
155 spin_unlock_bh(&vsock->send_pkt_list_lock);
160 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
163 val = atomic_dec_return(&vsock->queued_replies);
165 /* Do we now have resources to resume rx processing? */
166 if (val + 1 == virtqueue_get_vring_size(rx_vq))
177 mutex_unlock(&vsock->tx_lock);
180 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
184 virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
186 struct virtio_vsock *vsock;
190 vsock = rcu_dereference(the_virtio_vsock);
192 virtio_transport_free_pkt(pkt);
197 if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
198 len = virtio_transport_send_pkt_loopback(vsock, pkt);
203 atomic_inc(&vsock->queued_replies);
205 spin_lock_bh(&vsock->send_pkt_list_lock);
206 list_add_tail(&pkt->list, &vsock->send_pkt_list);
207 spin_unlock_bh(&vsock->send_pkt_list_lock);
209 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
217 virtio_transport_cancel_pkt(struct vsock_sock *vsk)
219 struct virtio_vsock *vsock;
220 struct virtio_vsock_pkt *pkt, *n;
225 vsock = rcu_dereference(the_virtio_vsock);
231 spin_lock_bh(&vsock->send_pkt_list_lock);
232 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
235 list_move(&pkt->list, &freeme);
237 spin_unlock_bh(&vsock->send_pkt_list_lock);
239 list_for_each_entry_safe(pkt, n, &freeme, list) {
242 list_del(&pkt->list);
243 virtio_transport_free_pkt(pkt);
247 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
250 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
251 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
252 new_cnt < virtqueue_get_vring_size(rx_vq))
253 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
263 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
265 int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
266 struct virtio_vsock_pkt *pkt;
267 struct scatterlist hdr, buf, *sgs[2];
268 struct virtqueue *vq;
271 vq = vsock->vqs[VSOCK_VQ_RX];
274 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
278 pkt->buf = kmalloc(buf_len, GFP_KERNEL);
280 virtio_transport_free_pkt(pkt);
286 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
289 sg_init_one(&buf, pkt->buf, buf_len);
291 ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
293 virtio_transport_free_pkt(pkt);
297 } while (vq->num_free);
298 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
299 vsock->rx_buf_max_nr = vsock->rx_buf_nr;
303 static void virtio_transport_tx_work(struct work_struct *work)
305 struct virtio_vsock *vsock =
306 container_of(work, struct virtio_vsock, tx_work);
307 struct virtqueue *vq;
310 vq = vsock->vqs[VSOCK_VQ_TX];
311 mutex_lock(&vsock->tx_lock);
317 struct virtio_vsock_pkt *pkt;
320 virtqueue_disable_cb(vq);
321 while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
322 virtio_transport_free_pkt(pkt);
325 } while (!virtqueue_enable_cb(vq));
328 mutex_unlock(&vsock->tx_lock);
331 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
334 /* Is there space left for replies to rx packets? */
335 static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
337 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
340 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
341 val = atomic_read(&vsock->queued_replies);
343 return val < virtqueue_get_vring_size(vq);
346 /* event_lock must be held */
347 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
348 struct virtio_vsock_event *event)
350 struct scatterlist sg;
351 struct virtqueue *vq;
353 vq = vsock->vqs[VSOCK_VQ_EVENT];
355 sg_init_one(&sg, event, sizeof(*event));
357 return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
360 /* event_lock must be held */
361 static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
365 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
366 struct virtio_vsock_event *event = &vsock->event_list[i];
368 virtio_vsock_event_fill_one(vsock, event);
371 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
374 static void virtio_vsock_reset_sock(struct sock *sk)
376 /* vmci_transport.c doesn't take sk_lock here either. At least we're
377 * under vsock_table_lock so the sock cannot disappear while we're
381 sk->sk_state = TCP_CLOSE;
382 sk->sk_err = ECONNRESET;
383 sk->sk_error_report(sk);
386 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
388 struct virtio_device *vdev = vsock->vdev;
391 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
392 &guest_cid, sizeof(guest_cid));
393 vsock->guest_cid = le64_to_cpu(guest_cid);
396 /* event_lock must be held */
397 static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
398 struct virtio_vsock_event *event)
400 switch (le32_to_cpu(event->id)) {
401 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
402 virtio_vsock_update_guest_cid(vsock);
403 vsock_for_each_connected_socket(virtio_vsock_reset_sock);
408 static void virtio_transport_event_work(struct work_struct *work)
410 struct virtio_vsock *vsock =
411 container_of(work, struct virtio_vsock, event_work);
412 struct virtqueue *vq;
414 vq = vsock->vqs[VSOCK_VQ_EVENT];
416 mutex_lock(&vsock->event_lock);
418 if (!vsock->event_run)
422 struct virtio_vsock_event *event;
425 virtqueue_disable_cb(vq);
426 while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
427 if (len == sizeof(*event))
428 virtio_vsock_event_handle(vsock, event);
430 virtio_vsock_event_fill_one(vsock, event);
432 } while (!virtqueue_enable_cb(vq));
434 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
436 mutex_unlock(&vsock->event_lock);
439 static void virtio_vsock_event_done(struct virtqueue *vq)
441 struct virtio_vsock *vsock = vq->vdev->priv;
445 queue_work(virtio_vsock_workqueue, &vsock->event_work);
448 static void virtio_vsock_tx_done(struct virtqueue *vq)
450 struct virtio_vsock *vsock = vq->vdev->priv;
454 queue_work(virtio_vsock_workqueue, &vsock->tx_work);
457 static void virtio_vsock_rx_done(struct virtqueue *vq)
459 struct virtio_vsock *vsock = vq->vdev->priv;
463 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
466 static struct virtio_transport virtio_transport = {
468 .get_local_cid = virtio_transport_get_local_cid,
470 .init = virtio_transport_do_socket_init,
471 .destruct = virtio_transport_destruct,
472 .release = virtio_transport_release,
473 .connect = virtio_transport_connect,
474 .shutdown = virtio_transport_shutdown,
475 .cancel_pkt = virtio_transport_cancel_pkt,
477 .dgram_bind = virtio_transport_dgram_bind,
478 .dgram_dequeue = virtio_transport_dgram_dequeue,
479 .dgram_enqueue = virtio_transport_dgram_enqueue,
480 .dgram_allow = virtio_transport_dgram_allow,
482 .stream_dequeue = virtio_transport_stream_dequeue,
483 .stream_enqueue = virtio_transport_stream_enqueue,
484 .stream_has_data = virtio_transport_stream_has_data,
485 .stream_has_space = virtio_transport_stream_has_space,
486 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
487 .stream_is_active = virtio_transport_stream_is_active,
488 .stream_allow = virtio_transport_stream_allow,
490 .notify_poll_in = virtio_transport_notify_poll_in,
491 .notify_poll_out = virtio_transport_notify_poll_out,
492 .notify_recv_init = virtio_transport_notify_recv_init,
493 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
494 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
495 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
496 .notify_send_init = virtio_transport_notify_send_init,
497 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
498 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
499 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
501 .set_buffer_size = virtio_transport_set_buffer_size,
502 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
503 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
504 .get_buffer_size = virtio_transport_get_buffer_size,
505 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
506 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
509 .send_pkt = virtio_transport_send_pkt,
512 static void virtio_transport_loopback_work(struct work_struct *work)
514 struct virtio_vsock *vsock =
515 container_of(work, struct virtio_vsock, loopback_work);
518 spin_lock_bh(&vsock->loopback_list_lock);
519 list_splice_init(&vsock->loopback_list, &pkts);
520 spin_unlock_bh(&vsock->loopback_list_lock);
522 mutex_lock(&vsock->rx_lock);
527 while (!list_empty(&pkts)) {
528 struct virtio_vsock_pkt *pkt;
530 pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
531 list_del_init(&pkt->list);
533 virtio_transport_recv_pkt(&virtio_transport, pkt);
536 mutex_unlock(&vsock->rx_lock);
539 static void virtio_transport_rx_work(struct work_struct *work)
541 struct virtio_vsock *vsock =
542 container_of(work, struct virtio_vsock, rx_work);
543 struct virtqueue *vq;
545 vq = vsock->vqs[VSOCK_VQ_RX];
547 mutex_lock(&vsock->rx_lock);
553 virtqueue_disable_cb(vq);
555 struct virtio_vsock_pkt *pkt;
558 if (!virtio_transport_more_replies(vsock)) {
559 /* Stop rx until the device processes already
560 * pending replies. Leave rx virtqueue
561 * callbacks disabled.
566 pkt = virtqueue_get_buf(vq, &len);
573 /* Drop short/long packets */
574 if (unlikely(len < sizeof(pkt->hdr) ||
575 len > sizeof(pkt->hdr) + pkt->len)) {
576 virtio_transport_free_pkt(pkt);
580 pkt->len = len - sizeof(pkt->hdr);
581 virtio_transport_deliver_tap_pkt(pkt);
582 virtio_transport_recv_pkt(&virtio_transport, pkt);
584 } while (!virtqueue_enable_cb(vq));
587 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
588 virtio_vsock_rx_fill(vsock);
589 mutex_unlock(&vsock->rx_lock);
592 static int virtio_vsock_probe(struct virtio_device *vdev)
594 vq_callback_t *callbacks[] = {
595 virtio_vsock_rx_done,
596 virtio_vsock_tx_done,
597 virtio_vsock_event_done,
599 static const char * const names[] = {
604 struct virtio_vsock *vsock = NULL;
607 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
611 /* Only one virtio-vsock device per guest is supported */
612 if (rcu_dereference_protected(the_virtio_vsock,
613 lockdep_is_held(&the_virtio_vsock_mutex))) {
618 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
626 ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX,
627 vsock->vqs, callbacks, names,
632 virtio_vsock_update_guest_cid(vsock);
634 vsock->rx_buf_nr = 0;
635 vsock->rx_buf_max_nr = 0;
636 atomic_set(&vsock->queued_replies, 0);
638 mutex_init(&vsock->tx_lock);
639 mutex_init(&vsock->rx_lock);
640 mutex_init(&vsock->event_lock);
641 spin_lock_init(&vsock->send_pkt_list_lock);
642 INIT_LIST_HEAD(&vsock->send_pkt_list);
643 spin_lock_init(&vsock->loopback_list_lock);
644 INIT_LIST_HEAD(&vsock->loopback_list);
645 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
646 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
647 INIT_WORK(&vsock->event_work, virtio_transport_event_work);
648 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
649 INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work);
651 mutex_lock(&vsock->tx_lock);
652 vsock->tx_run = true;
653 mutex_unlock(&vsock->tx_lock);
655 mutex_lock(&vsock->rx_lock);
656 virtio_vsock_rx_fill(vsock);
657 vsock->rx_run = true;
658 mutex_unlock(&vsock->rx_lock);
660 mutex_lock(&vsock->event_lock);
661 virtio_vsock_event_fill(vsock);
662 vsock->event_run = true;
663 mutex_unlock(&vsock->event_lock);
666 rcu_assign_pointer(the_virtio_vsock, vsock);
668 mutex_unlock(&the_virtio_vsock_mutex);
673 mutex_unlock(&the_virtio_vsock_mutex);
677 static void virtio_vsock_remove(struct virtio_device *vdev)
679 struct virtio_vsock *vsock = vdev->priv;
680 struct virtio_vsock_pkt *pkt;
682 mutex_lock(&the_virtio_vsock_mutex);
685 rcu_assign_pointer(the_virtio_vsock, NULL);
688 flush_work(&vsock->loopback_work);
689 flush_work(&vsock->rx_work);
690 flush_work(&vsock->tx_work);
691 flush_work(&vsock->event_work);
692 flush_work(&vsock->send_pkt_work);
694 /* Reset all connected sockets when the device disappear */
695 vsock_for_each_connected_socket(virtio_vsock_reset_sock);
697 /* Stop all work handlers to make sure no one is accessing the device,
698 * so we can safely call vdev->config->reset().
700 mutex_lock(&vsock->rx_lock);
701 vsock->rx_run = false;
702 mutex_unlock(&vsock->rx_lock);
704 mutex_lock(&vsock->tx_lock);
705 vsock->tx_run = false;
706 mutex_unlock(&vsock->tx_lock);
708 mutex_lock(&vsock->event_lock);
709 vsock->event_run = false;
710 mutex_unlock(&vsock->event_lock);
712 /* Flush all device writes and interrupts, device will not use any
715 vdev->config->reset(vdev);
717 mutex_lock(&vsock->rx_lock);
718 while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
719 virtio_transport_free_pkt(pkt);
720 mutex_unlock(&vsock->rx_lock);
722 mutex_lock(&vsock->tx_lock);
723 while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
724 virtio_transport_free_pkt(pkt);
725 mutex_unlock(&vsock->tx_lock);
727 spin_lock_bh(&vsock->send_pkt_list_lock);
728 while (!list_empty(&vsock->send_pkt_list)) {
729 pkt = list_first_entry(&vsock->send_pkt_list,
730 struct virtio_vsock_pkt, list);
731 list_del(&pkt->list);
732 virtio_transport_free_pkt(pkt);
734 spin_unlock_bh(&vsock->send_pkt_list_lock);
736 spin_lock_bh(&vsock->loopback_list_lock);
737 while (!list_empty(&vsock->loopback_list)) {
738 pkt = list_first_entry(&vsock->loopback_list,
739 struct virtio_vsock_pkt, list);
740 list_del(&pkt->list);
741 virtio_transport_free_pkt(pkt);
743 spin_unlock_bh(&vsock->loopback_list_lock);
745 /* Delete virtqueues and flush outstanding callbacks if any */
746 vdev->config->del_vqs(vdev);
748 mutex_unlock(&the_virtio_vsock_mutex);
753 static struct virtio_device_id id_table[] = {
754 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
758 static unsigned int features[] = {
761 static struct virtio_driver virtio_vsock_driver = {
762 .feature_table = features,
763 .feature_table_size = ARRAY_SIZE(features),
764 .driver.name = KBUILD_MODNAME,
765 .driver.owner = THIS_MODULE,
766 .id_table = id_table,
767 .probe = virtio_vsock_probe,
768 .remove = virtio_vsock_remove,
771 static int __init virtio_vsock_init(void)
775 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
776 if (!virtio_vsock_workqueue)
779 ret = vsock_core_init(&virtio_transport.transport);
783 ret = register_virtio_driver(&virtio_vsock_driver);
792 destroy_workqueue(virtio_vsock_workqueue);
796 static void __exit virtio_vsock_exit(void)
798 unregister_virtio_driver(&virtio_vsock_driver);
800 destroy_workqueue(virtio_vsock_workqueue);
803 module_init(virtio_vsock_init);
804 module_exit(virtio_vsock_exit);
805 MODULE_LICENSE("GPL v2");
806 MODULE_AUTHOR("Asias He");
807 MODULE_DESCRIPTION("virtio transport for vsock");
808 MODULE_DEVICE_TABLE(virtio, id_table);