2 * common code for virtio vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12 #include <linux/ctype.h>
13 #include <linux/list.h>
14 #include <linux/virtio.h>
15 #include <linux/virtio_ids.h>
16 #include <linux/virtio_config.h>
17 #include <linux/virtio_vsock.h>
20 #include <net/af_vsock.h>
22 #define CREATE_TRACE_POINTS
23 #include <trace/events/vsock_virtio_transport_common.h>
25 /* How long to wait for graceful shutdown of a connection */
26 #define VSOCK_CLOSE_TIMEOUT (8 * HZ)
28 static const struct virtio_transport *virtio_transport_get_ops(void)
30 const struct vsock_transport *t = vsock_core_get_transport();
32 return container_of(t, struct virtio_transport, transport);
35 struct virtio_vsock_pkt *
36 virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
43 struct virtio_vsock_pkt *pkt;
46 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
50 pkt->hdr.type = cpu_to_le16(info->type);
51 pkt->hdr.op = cpu_to_le16(info->op);
52 pkt->hdr.src_cid = cpu_to_le64(src_cid);
53 pkt->hdr.dst_cid = cpu_to_le64(dst_cid);
54 pkt->hdr.src_port = cpu_to_le32(src_port);
55 pkt->hdr.dst_port = cpu_to_le32(dst_port);
56 pkt->hdr.flags = cpu_to_le32(info->flags);
58 pkt->hdr.len = cpu_to_le32(len);
59 pkt->reply = info->reply;
62 if (info->msg && len > 0) {
63 pkt->buf = kmalloc(len, GFP_KERNEL);
66 err = memcpy_from_msg(pkt->buf, info->msg, len);
71 trace_virtio_transport_alloc_pkt(src_cid, src_port,
86 EXPORT_SYMBOL_GPL(virtio_transport_alloc_pkt);
88 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
89 struct virtio_vsock_pkt_info *info)
91 u32 src_cid, src_port, dst_cid, dst_port;
92 struct virtio_vsock_sock *vvs;
93 struct virtio_vsock_pkt *pkt;
94 u32 pkt_len = info->pkt_len;
96 src_cid = vm_sockets_get_local_cid();
97 src_port = vsk->local_addr.svm_port;
98 if (!info->remote_cid) {
99 dst_cid = vsk->remote_addr.svm_cid;
100 dst_port = vsk->remote_addr.svm_port;
102 dst_cid = info->remote_cid;
103 dst_port = info->remote_port;
108 /* we can send less than pkt_len bytes */
109 if (pkt_len > VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE)
110 pkt_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
112 /* virtio_transport_get_credit might return less than pkt_len credit */
113 pkt_len = virtio_transport_get_credit(vvs, pkt_len);
115 /* Do not send zero length OP_RW pkt */
116 if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
119 pkt = virtio_transport_alloc_pkt(info, pkt_len,
123 virtio_transport_put_credit(vvs, pkt_len);
127 virtio_transport_inc_tx_pkt(vvs, pkt);
129 return virtio_transport_get_ops()->send_pkt(pkt);
132 static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
133 struct virtio_vsock_pkt *pkt)
135 vvs->rx_bytes += pkt->len;
138 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
139 struct virtio_vsock_pkt *pkt)
141 vvs->rx_bytes -= pkt->len;
142 vvs->fwd_cnt += pkt->len;
145 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
147 spin_lock_bh(&vvs->tx_lock);
148 pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
149 pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
150 spin_unlock_bh(&vvs->tx_lock);
152 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
154 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
158 spin_lock_bh(&vvs->tx_lock);
159 ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
163 spin_unlock_bh(&vvs->tx_lock);
167 EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
169 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
171 spin_lock_bh(&vvs->tx_lock);
172 vvs->tx_cnt -= credit;
173 spin_unlock_bh(&vvs->tx_lock);
175 EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
177 static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
179 struct virtio_vsock_hdr *hdr)
181 struct virtio_vsock_pkt_info info = {
182 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
187 return virtio_transport_send_pkt_info(vsk, &info);
191 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
195 struct virtio_vsock_sock *vvs = vsk->trans;
196 struct virtio_vsock_pkt *pkt;
197 size_t bytes, total = 0;
200 spin_lock_bh(&vvs->rx_lock);
201 while (total < len && !list_empty(&vvs->rx_queue)) {
202 pkt = list_first_entry(&vvs->rx_queue,
203 struct virtio_vsock_pkt, list);
206 if (bytes > pkt->len - pkt->off)
207 bytes = pkt->len - pkt->off;
209 /* sk_lock is held by caller so no one else can dequeue.
210 * Unlock rx_lock since memcpy_to_msg() may sleep.
212 spin_unlock_bh(&vvs->rx_lock);
214 err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
218 spin_lock_bh(&vvs->rx_lock);
222 if (pkt->off == pkt->len) {
223 virtio_transport_dec_rx_pkt(vvs, pkt);
224 list_del(&pkt->list);
225 virtio_transport_free_pkt(pkt);
228 spin_unlock_bh(&vvs->rx_lock);
230 /* Send a credit pkt to peer */
231 virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
243 virtio_transport_stream_dequeue(struct vsock_sock *vsk,
245 size_t len, int flags)
247 if (flags & MSG_PEEK)
250 return virtio_transport_stream_do_dequeue(vsk, msg, len);
252 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
255 virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
257 size_t len, int flags)
261 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
263 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
265 struct virtio_vsock_sock *vvs = vsk->trans;
268 spin_lock_bh(&vvs->rx_lock);
269 bytes = vvs->rx_bytes;
270 spin_unlock_bh(&vvs->rx_lock);
274 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
276 static s64 virtio_transport_has_space(struct vsock_sock *vsk)
278 struct virtio_vsock_sock *vvs = vsk->trans;
281 bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
288 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
290 struct virtio_vsock_sock *vvs = vsk->trans;
293 spin_lock_bh(&vvs->tx_lock);
294 bytes = virtio_transport_has_space(vsk);
295 spin_unlock_bh(&vvs->tx_lock);
299 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
301 int virtio_transport_do_socket_init(struct vsock_sock *vsk,
302 struct vsock_sock *psk)
304 struct virtio_vsock_sock *vvs;
306 vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
313 struct virtio_vsock_sock *ptrans = psk->trans;
315 vvs->buf_size = ptrans->buf_size;
316 vvs->buf_size_min = ptrans->buf_size_min;
317 vvs->buf_size_max = ptrans->buf_size_max;
318 vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
320 vvs->buf_size = VIRTIO_VSOCK_DEFAULT_BUF_SIZE;
321 vvs->buf_size_min = VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE;
322 vvs->buf_size_max = VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE;
325 vvs->buf_alloc = vvs->buf_size;
327 spin_lock_init(&vvs->rx_lock);
328 spin_lock_init(&vvs->tx_lock);
329 INIT_LIST_HEAD(&vvs->rx_queue);
333 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
335 u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk)
337 struct virtio_vsock_sock *vvs = vsk->trans;
339 return vvs->buf_size;
341 EXPORT_SYMBOL_GPL(virtio_transport_get_buffer_size);
343 u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk)
345 struct virtio_vsock_sock *vvs = vsk->trans;
347 return vvs->buf_size_min;
349 EXPORT_SYMBOL_GPL(virtio_transport_get_min_buffer_size);
351 u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk)
353 struct virtio_vsock_sock *vvs = vsk->trans;
355 return vvs->buf_size_max;
357 EXPORT_SYMBOL_GPL(virtio_transport_get_max_buffer_size);
359 void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
361 struct virtio_vsock_sock *vvs = vsk->trans;
363 if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
364 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
365 if (val < vvs->buf_size_min)
366 vvs->buf_size_min = val;
367 if (val > vvs->buf_size_max)
368 vvs->buf_size_max = val;
370 vvs->buf_alloc = val;
372 EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
374 void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
376 struct virtio_vsock_sock *vvs = vsk->trans;
378 if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
379 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
380 if (val > vvs->buf_size)
382 vvs->buf_size_min = val;
384 EXPORT_SYMBOL_GPL(virtio_transport_set_min_buffer_size);
386 void virtio_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
388 struct virtio_vsock_sock *vvs = vsk->trans;
390 if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
391 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
392 if (val < vvs->buf_size)
394 vvs->buf_size_max = val;
396 EXPORT_SYMBOL_GPL(virtio_transport_set_max_buffer_size);
399 virtio_transport_notify_poll_in(struct vsock_sock *vsk,
401 bool *data_ready_now)
403 if (vsock_stream_has_data(vsk))
404 *data_ready_now = true;
406 *data_ready_now = false;
410 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
413 virtio_transport_notify_poll_out(struct vsock_sock *vsk,
415 bool *space_avail_now)
419 free_space = vsock_stream_has_space(vsk);
421 *space_avail_now = true;
422 else if (free_space == 0)
423 *space_avail_now = false;
427 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
429 int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
430 size_t target, struct vsock_transport_recv_notify_data *data)
434 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
436 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
437 size_t target, struct vsock_transport_recv_notify_data *data)
441 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
443 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
444 size_t target, struct vsock_transport_recv_notify_data *data)
448 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
450 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
451 size_t target, ssize_t copied, bool data_read,
452 struct vsock_transport_recv_notify_data *data)
456 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
458 int virtio_transport_notify_send_init(struct vsock_sock *vsk,
459 struct vsock_transport_send_notify_data *data)
463 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
465 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
466 struct vsock_transport_send_notify_data *data)
470 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
472 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
473 struct vsock_transport_send_notify_data *data)
477 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
479 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
480 ssize_t written, struct vsock_transport_send_notify_data *data)
484 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
486 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
488 struct virtio_vsock_sock *vvs = vsk->trans;
490 return vvs->buf_size;
492 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
494 bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
498 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
500 bool virtio_transport_stream_allow(u32 cid, u32 port)
504 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
506 int virtio_transport_dgram_bind(struct vsock_sock *vsk,
507 struct sockaddr_vm *addr)
511 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
513 bool virtio_transport_dgram_allow(u32 cid, u32 port)
517 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
519 int virtio_transport_connect(struct vsock_sock *vsk)
521 struct virtio_vsock_pkt_info info = {
522 .op = VIRTIO_VSOCK_OP_REQUEST,
523 .type = VIRTIO_VSOCK_TYPE_STREAM,
527 return virtio_transport_send_pkt_info(vsk, &info);
529 EXPORT_SYMBOL_GPL(virtio_transport_connect);
531 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
533 struct virtio_vsock_pkt_info info = {
534 .op = VIRTIO_VSOCK_OP_SHUTDOWN,
535 .type = VIRTIO_VSOCK_TYPE_STREAM,
536 .flags = (mode & RCV_SHUTDOWN ?
537 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
538 (mode & SEND_SHUTDOWN ?
539 VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
543 return virtio_transport_send_pkt_info(vsk, &info);
545 EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
548 virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
549 struct sockaddr_vm *remote_addr,
555 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
558 virtio_transport_stream_enqueue(struct vsock_sock *vsk,
562 struct virtio_vsock_pkt_info info = {
563 .op = VIRTIO_VSOCK_OP_RW,
564 .type = VIRTIO_VSOCK_TYPE_STREAM,
570 return virtio_transport_send_pkt_info(vsk, &info);
572 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
574 void virtio_transport_destruct(struct vsock_sock *vsk)
576 struct virtio_vsock_sock *vvs = vsk->trans;
580 EXPORT_SYMBOL_GPL(virtio_transport_destruct);
582 static int virtio_transport_reset(struct vsock_sock *vsk,
583 struct virtio_vsock_pkt *pkt)
585 struct virtio_vsock_pkt_info info = {
586 .op = VIRTIO_VSOCK_OP_RST,
587 .type = VIRTIO_VSOCK_TYPE_STREAM,
592 /* Send RST only if the original pkt is not a RST pkt */
593 if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
596 return virtio_transport_send_pkt_info(vsk, &info);
599 /* Normally packets are associated with a socket. There may be no socket if an
600 * attempt was made to connect to a socket that does not exist.
602 static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
604 const struct virtio_transport *t;
605 struct virtio_vsock_pkt *reply;
606 struct virtio_vsock_pkt_info info = {
607 .op = VIRTIO_VSOCK_OP_RST,
608 .type = le16_to_cpu(pkt->hdr.type),
612 /* Send RST only if the original pkt is not a RST pkt */
613 if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
616 reply = virtio_transport_alloc_pkt(&info, 0,
617 le64_to_cpu(pkt->hdr.dst_cid),
618 le32_to_cpu(pkt->hdr.dst_port),
619 le64_to_cpu(pkt->hdr.src_cid),
620 le32_to_cpu(pkt->hdr.src_port));
624 t = virtio_transport_get_ops();
626 virtio_transport_free_pkt(reply);
630 return t->send_pkt(reply);
633 static void virtio_transport_wait_close(struct sock *sk, long timeout)
639 prepare_to_wait(sk_sleep(sk), &wait,
641 if (sk_wait_event(sk, &timeout,
642 sock_flag(sk, SOCK_DONE)))
644 } while (!signal_pending(current) && timeout);
646 finish_wait(sk_sleep(sk), &wait);
650 static void virtio_transport_do_close(struct vsock_sock *vsk,
653 struct sock *sk = sk_vsock(vsk);
655 sock_set_flag(sk, SOCK_DONE);
656 vsk->peer_shutdown = SHUTDOWN_MASK;
657 if (vsock_stream_has_data(vsk) <= 0)
658 sk->sk_state = SS_DISCONNECTING;
659 sk->sk_state_change(sk);
661 if (vsk->close_work_scheduled &&
662 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
663 vsk->close_work_scheduled = false;
665 vsock_remove_sock(vsk);
667 /* Release refcnt obtained when we scheduled the timeout */
672 static void virtio_transport_close_timeout(struct work_struct *work)
674 struct vsock_sock *vsk =
675 container_of(work, struct vsock_sock, close_work.work);
676 struct sock *sk = sk_vsock(vsk);
681 if (!sock_flag(sk, SOCK_DONE)) {
682 (void)virtio_transport_reset(vsk, NULL);
684 virtio_transport_do_close(vsk, false);
687 vsk->close_work_scheduled = false;
693 /* User context, vsk->sk is locked */
694 static bool virtio_transport_close(struct vsock_sock *vsk)
696 struct sock *sk = &vsk->sk;
698 if (!(sk->sk_state == SS_CONNECTED ||
699 sk->sk_state == SS_DISCONNECTING))
702 /* Already received SHUTDOWN from peer, reply with RST */
703 if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
704 (void)virtio_transport_reset(vsk, NULL);
708 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
709 (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
711 if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
712 virtio_transport_wait_close(sk, sk->sk_lingertime);
714 if (sock_flag(sk, SOCK_DONE)) {
719 INIT_DELAYED_WORK(&vsk->close_work,
720 virtio_transport_close_timeout);
721 vsk->close_work_scheduled = true;
722 schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
726 void virtio_transport_release(struct vsock_sock *vsk)
728 struct virtio_vsock_sock *vvs = vsk->trans;
729 struct virtio_vsock_pkt *pkt, *tmp;
730 struct sock *sk = &vsk->sk;
731 bool remove_sock = true;
734 if (sk->sk_type == SOCK_STREAM)
735 remove_sock = virtio_transport_close(vsk);
737 list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
738 list_del(&pkt->list);
739 virtio_transport_free_pkt(pkt);
744 vsock_remove_sock(vsk);
746 EXPORT_SYMBOL_GPL(virtio_transport_release);
749 virtio_transport_recv_connecting(struct sock *sk,
750 struct virtio_vsock_pkt *pkt)
752 struct vsock_sock *vsk = vsock_sk(sk);
756 switch (le16_to_cpu(pkt->hdr.op)) {
757 case VIRTIO_VSOCK_OP_RESPONSE:
758 sk->sk_state = SS_CONNECTED;
759 sk->sk_socket->state = SS_CONNECTED;
760 vsock_insert_connected(vsk);
761 sk->sk_state_change(sk);
763 case VIRTIO_VSOCK_OP_INVALID:
765 case VIRTIO_VSOCK_OP_RST:
777 virtio_transport_reset(vsk, pkt);
778 sk->sk_state = SS_UNCONNECTED;
780 sk->sk_error_report(sk);
785 virtio_transport_recv_connected(struct sock *sk,
786 struct virtio_vsock_pkt *pkt)
788 struct vsock_sock *vsk = vsock_sk(sk);
789 struct virtio_vsock_sock *vvs = vsk->trans;
792 switch (le16_to_cpu(pkt->hdr.op)) {
793 case VIRTIO_VSOCK_OP_RW:
794 pkt->len = le32_to_cpu(pkt->hdr.len);
797 spin_lock_bh(&vvs->rx_lock);
798 virtio_transport_inc_rx_pkt(vvs, pkt);
799 list_add_tail(&pkt->list, &vvs->rx_queue);
800 spin_unlock_bh(&vvs->rx_lock);
802 sk->sk_data_ready(sk);
804 case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
805 sk->sk_write_space(sk);
807 case VIRTIO_VSOCK_OP_SHUTDOWN:
808 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
809 vsk->peer_shutdown |= RCV_SHUTDOWN;
810 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
811 vsk->peer_shutdown |= SEND_SHUTDOWN;
812 if (vsk->peer_shutdown == SHUTDOWN_MASK &&
813 vsock_stream_has_data(vsk) <= 0)
814 sk->sk_state = SS_DISCONNECTING;
815 if (le32_to_cpu(pkt->hdr.flags))
816 sk->sk_state_change(sk);
818 case VIRTIO_VSOCK_OP_RST:
819 virtio_transport_do_close(vsk, true);
826 virtio_transport_free_pkt(pkt);
831 virtio_transport_recv_disconnecting(struct sock *sk,
832 struct virtio_vsock_pkt *pkt)
834 struct vsock_sock *vsk = vsock_sk(sk);
836 if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
837 virtio_transport_do_close(vsk, true);
841 virtio_transport_send_response(struct vsock_sock *vsk,
842 struct virtio_vsock_pkt *pkt)
844 struct virtio_vsock_pkt_info info = {
845 .op = VIRTIO_VSOCK_OP_RESPONSE,
846 .type = VIRTIO_VSOCK_TYPE_STREAM,
847 .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
848 .remote_port = le32_to_cpu(pkt->hdr.src_port),
853 return virtio_transport_send_pkt_info(vsk, &info);
856 /* Handle server socket */
858 virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
860 struct vsock_sock *vsk = vsock_sk(sk);
861 struct vsock_sock *vchild;
864 if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
865 virtio_transport_reset(vsk, pkt);
869 if (sk_acceptq_is_full(sk)) {
870 virtio_transport_reset(vsk, pkt);
874 child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
877 virtio_transport_reset(vsk, pkt);
881 sk->sk_ack_backlog++;
883 lock_sock_nested(child, SINGLE_DEPTH_NESTING);
885 child->sk_state = SS_CONNECTED;
887 vchild = vsock_sk(child);
888 vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
889 le32_to_cpu(pkt->hdr.dst_port));
890 vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
891 le32_to_cpu(pkt->hdr.src_port));
893 vsock_insert_connected(vchild);
894 vsock_enqueue_accept(sk, child);
895 virtio_transport_send_response(vchild, pkt);
899 sk->sk_data_ready(sk);
903 static bool virtio_transport_space_update(struct sock *sk,
904 struct virtio_vsock_pkt *pkt)
906 struct vsock_sock *vsk = vsock_sk(sk);
907 struct virtio_vsock_sock *vvs = vsk->trans;
908 bool space_available;
910 /* buf_alloc and fwd_cnt is always included in the hdr */
911 spin_lock_bh(&vvs->tx_lock);
912 vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
913 vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
914 space_available = virtio_transport_has_space(vsk);
915 spin_unlock_bh(&vvs->tx_lock);
916 return space_available;
919 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
922 void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
924 struct sockaddr_vm src, dst;
925 struct vsock_sock *vsk;
927 bool space_available;
929 vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
930 le32_to_cpu(pkt->hdr.src_port));
931 vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
932 le32_to_cpu(pkt->hdr.dst_port));
934 trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
935 dst.svm_cid, dst.svm_port,
936 le32_to_cpu(pkt->hdr.len),
937 le16_to_cpu(pkt->hdr.type),
938 le16_to_cpu(pkt->hdr.op),
939 le32_to_cpu(pkt->hdr.flags),
940 le32_to_cpu(pkt->hdr.buf_alloc),
941 le32_to_cpu(pkt->hdr.fwd_cnt));
943 if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
944 (void)virtio_transport_reset_no_sock(pkt);
948 /* The socket must be in connected or bound table
949 * otherwise send reset back
951 sk = vsock_find_connected_socket(&src, &dst);
953 sk = vsock_find_bound_socket(&dst);
955 (void)virtio_transport_reset_no_sock(pkt);
964 space_available = virtio_transport_space_update(sk, pkt);
966 /* Update CID in case it has changed after a transport reset event */
967 vsk->local_addr.svm_cid = dst.svm_cid;
970 sk->sk_write_space(sk);
972 switch (sk->sk_state) {
973 case VSOCK_SS_LISTEN:
974 virtio_transport_recv_listen(sk, pkt);
975 virtio_transport_free_pkt(pkt);
978 virtio_transport_recv_connecting(sk, pkt);
979 virtio_transport_free_pkt(pkt);
982 virtio_transport_recv_connected(sk, pkt);
984 case SS_DISCONNECTING:
985 virtio_transport_recv_disconnecting(sk, pkt);
986 virtio_transport_free_pkt(pkt);
989 virtio_transport_free_pkt(pkt);
994 /* Release refcnt obtained when we fetched this socket out of the
995 * bound or connected list.
1001 virtio_transport_free_pkt(pkt);
1003 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
1005 void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
1010 EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
1012 MODULE_LICENSE("GPL v2");
1013 MODULE_AUTHOR("Asias He");
1014 MODULE_DESCRIPTION("common code for virtio vsock");