1 // SPDX-License-Identifier: GPL-2.0-only
3 * virtio transport for vsock
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
10 * early virtio-vsock proof-of-concept bits.
12 #include <linux/spinlock.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/atomic.h>
16 #include <linux/virtio.h>
17 #include <linux/virtio_ids.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_vsock.h>
21 #include <linux/mutex.h>
22 #include <net/af_vsock.h>
24 static struct workqueue_struct *virtio_vsock_workqueue;
25 static struct virtio_vsock __rcu *the_virtio_vsock;
26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
27 static struct virtio_transport virtio_transport; /* forward declaration */
30 struct virtio_device *vdev;
31 struct virtqueue *vqs[VSOCK_VQ_MAX];
33 /* Virtqueue processing is deferred to a workqueue */
34 struct work_struct tx_work;
35 struct work_struct rx_work;
36 struct work_struct event_work;
38 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
39 * must be accessed with tx_lock held.
44 struct work_struct send_pkt_work;
45 struct sk_buff_head send_pkt_queue;
47 atomic_t queued_replies;
49 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
50 * must be accessed with rx_lock held.
57 /* The following fields are protected by event_lock.
58 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
60 struct mutex event_lock;
62 struct virtio_vsock_event event_list[8];
67 /* These fields are used only in tx path in function
68 * 'virtio_transport_send_pkt_work()', so to save
69 * stack space in it, place both of them here. Each
70 * pointer from 'out_sgs' points to the corresponding
71 * element in 'out_bufs' - this is initialized in
72 * 'virtio_vsock_probe()'. Both fields are protected
73 * by 'tx_lock'. +1 is needed for packet header.
75 struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1];
76 struct scatterlist out_bufs[MAX_SKB_FRAGS + 1];
79 static u32 virtio_transport_get_local_cid(void)
81 struct virtio_vsock *vsock;
85 vsock = rcu_dereference(the_virtio_vsock);
91 ret = vsock->guest_cid;
98 virtio_transport_send_pkt_work(struct work_struct *work)
100 struct virtio_vsock *vsock =
101 container_of(work, struct virtio_vsock, send_pkt_work);
102 struct virtqueue *vq;
104 bool restart_rx = false;
106 mutex_lock(&vsock->tx_lock);
111 vq = vsock->vqs[VSOCK_VQ_TX];
114 int ret, in_sg = 0, out_sg = 0;
115 struct scatterlist **sgs;
119 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
123 reply = virtio_vsock_skb_reply(skb);
124 sgs = vsock->out_sgs;
125 sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
126 sizeof(*virtio_vsock_hdr(skb)));
129 if (!skb_is_nonlinear(skb)) {
131 sg_init_one(sgs[out_sg], skb->data, skb->len);
135 struct skb_shared_info *si;
138 /* If skb is nonlinear, then its buffer must contain
139 * only header and nothing more. Data is stored in
142 WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
144 si = skb_shinfo(skb);
146 for (i = 0; i < si->nr_frags; i++) {
147 skb_frag_t *skb_frag = &si->frags[i];
150 /* We will use 'page_to_virt()' for the userspace page
151 * here, because virtio or dma-mapping layers will call
152 * 'virt_to_phys()' later to fill the buffer descriptor.
153 * We don't touch memory at "virtual" address of this page.
155 va = page_to_virt(skb_frag_page(skb_frag));
156 sg_init_one(sgs[out_sg],
157 va + skb_frag_off(skb_frag),
158 skb_frag_size(skb_frag));
163 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
164 /* Usually this means that there is no more space available in
168 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
172 virtio_transport_deliver_tap_pkt(skb);
175 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
178 val = atomic_dec_return(&vsock->queued_replies);
180 /* Do we now have resources to resume rx processing? */
181 if (val + 1 == virtqueue_get_vring_size(rx_vq))
192 mutex_unlock(&vsock->tx_lock);
195 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
199 virtio_transport_send_pkt(struct sk_buff *skb)
201 struct virtio_vsock_hdr *hdr;
202 struct virtio_vsock *vsock;
205 hdr = virtio_vsock_hdr(skb);
208 vsock = rcu_dereference(the_virtio_vsock);
215 if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
221 if (virtio_vsock_skb_reply(skb))
222 atomic_inc(&vsock->queued_replies);
224 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
225 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
233 virtio_transport_cancel_pkt(struct vsock_sock *vsk)
235 struct virtio_vsock *vsock;
239 vsock = rcu_dereference(the_virtio_vsock);
245 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
248 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
251 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
252 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
253 new_cnt < virtqueue_get_vring_size(rx_vq))
254 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
264 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
266 int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
267 struct scatterlist pkt, *p;
268 struct virtqueue *vq;
272 vq = vsock->vqs[VSOCK_VQ_RX];
275 skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
279 memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM);
280 sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len);
282 ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL);
289 } while (vq->num_free);
290 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
291 vsock->rx_buf_max_nr = vsock->rx_buf_nr;
295 static void virtio_transport_tx_work(struct work_struct *work)
297 struct virtio_vsock *vsock =
298 container_of(work, struct virtio_vsock, tx_work);
299 struct virtqueue *vq;
302 vq = vsock->vqs[VSOCK_VQ_TX];
303 mutex_lock(&vsock->tx_lock);
312 virtqueue_disable_cb(vq);
313 while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
317 } while (!virtqueue_enable_cb(vq));
320 mutex_unlock(&vsock->tx_lock);
323 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
326 /* Is there space left for replies to rx packets? */
327 static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
329 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
332 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
333 val = atomic_read(&vsock->queued_replies);
335 return val < virtqueue_get_vring_size(vq);
338 /* event_lock must be held */
339 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
340 struct virtio_vsock_event *event)
342 struct scatterlist sg;
343 struct virtqueue *vq;
345 vq = vsock->vqs[VSOCK_VQ_EVENT];
347 sg_init_one(&sg, event, sizeof(*event));
349 return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
352 /* event_lock must be held */
353 static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
357 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
358 struct virtio_vsock_event *event = &vsock->event_list[i];
360 virtio_vsock_event_fill_one(vsock, event);
363 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
366 static void virtio_vsock_reset_sock(struct sock *sk)
368 /* vmci_transport.c doesn't take sk_lock here either. At least we're
369 * under vsock_table_lock so the sock cannot disappear while we're
373 sk->sk_state = TCP_CLOSE;
374 sk->sk_err = ECONNRESET;
378 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
380 struct virtio_device *vdev = vsock->vdev;
383 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
384 &guest_cid, sizeof(guest_cid));
385 vsock->guest_cid = le64_to_cpu(guest_cid);
388 /* event_lock must be held */
389 static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
390 struct virtio_vsock_event *event)
392 switch (le32_to_cpu(event->id)) {
393 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
394 virtio_vsock_update_guest_cid(vsock);
395 vsock_for_each_connected_socket(&virtio_transport.transport,
396 virtio_vsock_reset_sock);
401 static void virtio_transport_event_work(struct work_struct *work)
403 struct virtio_vsock *vsock =
404 container_of(work, struct virtio_vsock, event_work);
405 struct virtqueue *vq;
407 vq = vsock->vqs[VSOCK_VQ_EVENT];
409 mutex_lock(&vsock->event_lock);
411 if (!vsock->event_run)
415 struct virtio_vsock_event *event;
418 virtqueue_disable_cb(vq);
419 while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
420 if (len == sizeof(*event))
421 virtio_vsock_event_handle(vsock, event);
423 virtio_vsock_event_fill_one(vsock, event);
425 } while (!virtqueue_enable_cb(vq));
427 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
429 mutex_unlock(&vsock->event_lock);
432 static void virtio_vsock_event_done(struct virtqueue *vq)
434 struct virtio_vsock *vsock = vq->vdev->priv;
438 queue_work(virtio_vsock_workqueue, &vsock->event_work);
441 static void virtio_vsock_tx_done(struct virtqueue *vq)
443 struct virtio_vsock *vsock = vq->vdev->priv;
447 queue_work(virtio_vsock_workqueue, &vsock->tx_work);
450 static void virtio_vsock_rx_done(struct virtqueue *vq)
452 struct virtio_vsock *vsock = vq->vdev->priv;
456 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
459 static bool virtio_transport_can_msgzerocopy(int bufs_num)
461 struct virtio_vsock *vsock;
466 vsock = rcu_dereference(the_virtio_vsock);
468 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
470 /* Check that tx queue is large enough to keep whole
471 * data to send. This is needed, because when there is
472 * not enough free space in the queue, current skb to
473 * send will be reinserted to the head of tx list of
474 * the socket to retry transmission later, so if skb
475 * is bigger than whole queue, it will be reinserted
476 * again and again, thus blocking other skbs to be sent.
477 * Each page of the user provided buffer will be added
478 * as a single buffer to the tx virtqueue, so compare
479 * number of pages against maximum capacity of the queue.
481 if (bufs_num <= vq->num_max)
490 static bool virtio_transport_msgzerocopy_allow(void)
495 static bool virtio_transport_seqpacket_allow(u32 remote_cid);
497 static struct virtio_transport virtio_transport = {
499 .module = THIS_MODULE,
501 .get_local_cid = virtio_transport_get_local_cid,
503 .init = virtio_transport_do_socket_init,
504 .destruct = virtio_transport_destruct,
505 .release = virtio_transport_release,
506 .connect = virtio_transport_connect,
507 .shutdown = virtio_transport_shutdown,
508 .cancel_pkt = virtio_transport_cancel_pkt,
510 .dgram_bind = virtio_transport_dgram_bind,
511 .dgram_dequeue = virtio_transport_dgram_dequeue,
512 .dgram_enqueue = virtio_transport_dgram_enqueue,
513 .dgram_allow = virtio_transport_dgram_allow,
515 .stream_dequeue = virtio_transport_stream_dequeue,
516 .stream_enqueue = virtio_transport_stream_enqueue,
517 .stream_has_data = virtio_transport_stream_has_data,
518 .stream_has_space = virtio_transport_stream_has_space,
519 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
520 .stream_is_active = virtio_transport_stream_is_active,
521 .stream_allow = virtio_transport_stream_allow,
523 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
524 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
525 .seqpacket_allow = virtio_transport_seqpacket_allow,
526 .seqpacket_has_data = virtio_transport_seqpacket_has_data,
528 .msgzerocopy_allow = virtio_transport_msgzerocopy_allow,
530 .notify_poll_in = virtio_transport_notify_poll_in,
531 .notify_poll_out = virtio_transport_notify_poll_out,
532 .notify_recv_init = virtio_transport_notify_recv_init,
533 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
534 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
535 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
536 .notify_send_init = virtio_transport_notify_send_init,
537 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
538 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
539 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
540 .notify_buffer_size = virtio_transport_notify_buffer_size,
541 .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
543 .read_skb = virtio_transport_read_skb,
546 .send_pkt = virtio_transport_send_pkt,
547 .can_msgzerocopy = virtio_transport_can_msgzerocopy,
550 static bool virtio_transport_seqpacket_allow(u32 remote_cid)
552 struct virtio_vsock *vsock;
553 bool seqpacket_allow;
555 seqpacket_allow = false;
557 vsock = rcu_dereference(the_virtio_vsock);
559 seqpacket_allow = vsock->seqpacket_allow;
562 return seqpacket_allow;
565 static void virtio_transport_rx_work(struct work_struct *work)
567 struct virtio_vsock *vsock =
568 container_of(work, struct virtio_vsock, rx_work);
569 struct virtqueue *vq;
571 vq = vsock->vqs[VSOCK_VQ_RX];
573 mutex_lock(&vsock->rx_lock);
579 virtqueue_disable_cb(vq);
584 if (!virtio_transport_more_replies(vsock)) {
585 /* Stop rx until the device processes already
586 * pending replies. Leave rx virtqueue
587 * callbacks disabled.
592 skb = virtqueue_get_buf(vq, &len);
598 /* Drop short/long packets */
599 if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
600 len > virtio_vsock_skb_len(skb))) {
605 virtio_vsock_skb_rx_put(skb);
606 virtio_transport_deliver_tap_pkt(skb);
607 virtio_transport_recv_pkt(&virtio_transport, skb);
609 } while (!virtqueue_enable_cb(vq));
612 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
613 virtio_vsock_rx_fill(vsock);
614 mutex_unlock(&vsock->rx_lock);
617 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
619 struct virtio_device *vdev = vsock->vdev;
620 static const char * const names[] = {
625 vq_callback_t *callbacks[] = {
626 virtio_vsock_rx_done,
627 virtio_vsock_tx_done,
628 virtio_vsock_event_done,
632 ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names,
637 virtio_vsock_update_guest_cid(vsock);
639 virtio_device_ready(vdev);
644 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
646 mutex_lock(&vsock->tx_lock);
647 vsock->tx_run = true;
648 mutex_unlock(&vsock->tx_lock);
650 mutex_lock(&vsock->rx_lock);
651 virtio_vsock_rx_fill(vsock);
652 vsock->rx_run = true;
653 mutex_unlock(&vsock->rx_lock);
655 mutex_lock(&vsock->event_lock);
656 virtio_vsock_event_fill(vsock);
657 vsock->event_run = true;
658 mutex_unlock(&vsock->event_lock);
660 /* virtio_transport_send_pkt() can queue packets once
661 * the_virtio_vsock is set, but they won't be processed until
662 * vsock->tx_run is set to true. We queue vsock->send_pkt_work
663 * when initialization finishes to send those packets queued
665 * We don't need to queue the other workers (rx, event) because
666 * as long as we don't fill the queues with empty buffers, the
667 * host can't send us any notification.
669 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
672 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
674 struct virtio_device *vdev = vsock->vdev;
677 /* Reset all connected sockets when the VQs disappear */
678 vsock_for_each_connected_socket(&virtio_transport.transport,
679 virtio_vsock_reset_sock);
681 /* Stop all work handlers to make sure no one is accessing the device,
682 * so we can safely call virtio_reset_device().
684 mutex_lock(&vsock->rx_lock);
685 vsock->rx_run = false;
686 mutex_unlock(&vsock->rx_lock);
688 mutex_lock(&vsock->tx_lock);
689 vsock->tx_run = false;
690 mutex_unlock(&vsock->tx_lock);
692 mutex_lock(&vsock->event_lock);
693 vsock->event_run = false;
694 mutex_unlock(&vsock->event_lock);
696 /* Flush all device writes and interrupts, device will not use any
699 virtio_reset_device(vdev);
701 mutex_lock(&vsock->rx_lock);
702 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
704 mutex_unlock(&vsock->rx_lock);
706 mutex_lock(&vsock->tx_lock);
707 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
709 mutex_unlock(&vsock->tx_lock);
711 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
713 /* Delete virtqueues and flush outstanding callbacks if any */
714 vdev->config->del_vqs(vdev);
717 static int virtio_vsock_probe(struct virtio_device *vdev)
719 struct virtio_vsock *vsock = NULL;
723 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
727 /* Only one virtio-vsock device per guest is supported */
728 if (rcu_dereference_protected(the_virtio_vsock,
729 lockdep_is_held(&the_virtio_vsock_mutex))) {
734 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
742 vsock->rx_buf_nr = 0;
743 vsock->rx_buf_max_nr = 0;
744 atomic_set(&vsock->queued_replies, 0);
746 mutex_init(&vsock->tx_lock);
747 mutex_init(&vsock->rx_lock);
748 mutex_init(&vsock->event_lock);
749 skb_queue_head_init(&vsock->send_pkt_queue);
750 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
751 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
752 INIT_WORK(&vsock->event_work, virtio_transport_event_work);
753 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
755 if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
756 vsock->seqpacket_allow = true;
760 ret = virtio_vsock_vqs_init(vsock);
764 for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++)
765 vsock->out_sgs[i] = &vsock->out_bufs[i];
767 rcu_assign_pointer(the_virtio_vsock, vsock);
768 virtio_vsock_vqs_start(vsock);
770 mutex_unlock(&the_virtio_vsock_mutex);
776 mutex_unlock(&the_virtio_vsock_mutex);
780 static void virtio_vsock_remove(struct virtio_device *vdev)
782 struct virtio_vsock *vsock = vdev->priv;
784 mutex_lock(&the_virtio_vsock_mutex);
787 rcu_assign_pointer(the_virtio_vsock, NULL);
790 virtio_vsock_vqs_del(vsock);
792 /* Other works can be queued before 'config->del_vqs()', so we flush
793 * all works before to free the vsock object to avoid use after free.
795 flush_work(&vsock->rx_work);
796 flush_work(&vsock->tx_work);
797 flush_work(&vsock->event_work);
798 flush_work(&vsock->send_pkt_work);
800 mutex_unlock(&the_virtio_vsock_mutex);
805 #ifdef CONFIG_PM_SLEEP
806 static int virtio_vsock_freeze(struct virtio_device *vdev)
808 struct virtio_vsock *vsock = vdev->priv;
810 mutex_lock(&the_virtio_vsock_mutex);
812 rcu_assign_pointer(the_virtio_vsock, NULL);
815 virtio_vsock_vqs_del(vsock);
817 mutex_unlock(&the_virtio_vsock_mutex);
822 static int virtio_vsock_restore(struct virtio_device *vdev)
824 struct virtio_vsock *vsock = vdev->priv;
827 mutex_lock(&the_virtio_vsock_mutex);
829 /* Only one virtio-vsock device per guest is supported */
830 if (rcu_dereference_protected(the_virtio_vsock,
831 lockdep_is_held(&the_virtio_vsock_mutex))) {
836 ret = virtio_vsock_vqs_init(vsock);
840 rcu_assign_pointer(the_virtio_vsock, vsock);
841 virtio_vsock_vqs_start(vsock);
844 mutex_unlock(&the_virtio_vsock_mutex);
847 #endif /* CONFIG_PM_SLEEP */
849 static struct virtio_device_id id_table[] = {
850 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
854 static unsigned int features[] = {
855 VIRTIO_VSOCK_F_SEQPACKET
858 static struct virtio_driver virtio_vsock_driver = {
859 .feature_table = features,
860 .feature_table_size = ARRAY_SIZE(features),
861 .driver.name = KBUILD_MODNAME,
862 .driver.owner = THIS_MODULE,
863 .id_table = id_table,
864 .probe = virtio_vsock_probe,
865 .remove = virtio_vsock_remove,
866 #ifdef CONFIG_PM_SLEEP
867 .freeze = virtio_vsock_freeze,
868 .restore = virtio_vsock_restore,
872 static int __init virtio_vsock_init(void)
876 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
877 if (!virtio_vsock_workqueue)
880 ret = vsock_core_register(&virtio_transport.transport,
881 VSOCK_TRANSPORT_F_G2H);
885 ret = register_virtio_driver(&virtio_vsock_driver);
892 vsock_core_unregister(&virtio_transport.transport);
894 destroy_workqueue(virtio_vsock_workqueue);
898 static void __exit virtio_vsock_exit(void)
900 unregister_virtio_driver(&virtio_vsock_driver);
901 vsock_core_unregister(&virtio_transport.transport);
902 destroy_workqueue(virtio_vsock_workqueue);
905 module_init(virtio_vsock_init);
906 module_exit(virtio_vsock_exit);
907 MODULE_LICENSE("GPL v2");
908 MODULE_AUTHOR("Asias He");
909 MODULE_DESCRIPTION("virtio transport for vsock");
910 MODULE_DEVICE_TABLE(virtio, id_table);