2 * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/inet.h>
16 #include <linux/kthread.h>
17 #include <linux/list.h>
18 #include <linux/radix-tree.h>
19 #include <linux/module.h>
20 #include <linux/semaphore.h>
21 #include <linux/wait.h>
23 #include <net/inet_common.h>
24 #include <net/inet_connection_sock.h>
25 #include <net/request_sock.h>
27 #include <xen/events.h>
28 #include <xen/grant_table.h>
30 #include <xen/xenbus.h>
31 #include <xen/interface/io/pvcalls.h>
33 #define PVCALLS_VERSIONS "1"
34 #define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
36 struct pvcalls_back_global {
37 struct list_head frontends;
38 struct semaphore frontends_lock;
39 } pvcalls_back_global;
42 * Per-frontend data structure. It contains pointers to the command
43 * ring, its event channel, a list of active sockets and a tree of
46 struct pvcalls_fedata {
47 struct list_head list;
48 struct xenbus_device *dev;
49 struct xen_pvcalls_sring *sring;
50 struct xen_pvcalls_back_ring ring;
52 struct list_head socket_mappings;
53 struct radix_tree_root socketpass_mappings;
54 struct semaphore socket_lock;
57 struct pvcalls_ioworker {
58 struct work_struct register_work;
59 struct workqueue_struct *wq;
63 struct list_head list;
64 struct pvcalls_fedata *fedata;
65 struct sockpass_mapping *sockpass;
69 struct pvcalls_data_intf *ring;
71 struct pvcalls_data data;
79 void (*saved_data_ready)(struct sock *sk);
80 struct pvcalls_ioworker ioworker;
83 struct sockpass_mapping {
84 struct list_head list;
85 struct pvcalls_fedata *fedata;
88 struct xen_pvcalls_request reqcopy;
90 struct workqueue_struct *wq;
91 struct work_struct register_work;
92 void (*saved_data_ready)(struct sock *sk);
95 static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
96 static int pvcalls_back_release_active(struct xenbus_device *dev,
97 struct pvcalls_fedata *fedata,
98 struct sock_mapping *map);
100 static bool pvcalls_conn_back_read(void *opaque)
102 struct sock_mapping *map = (struct sock_mapping *)opaque;
105 RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
107 struct pvcalls_data_intf *intf = map->ring;
108 struct pvcalls_data *data = &map->data;
112 array_size = XEN_FLEX_RING_SIZE(map->ring_order);
113 cons = intf->in_cons;
114 prod = intf->in_prod;
115 error = intf->in_error;
116 /* read the indexes first, then deal with the data */
122 size = pvcalls_queued(prod, cons, array_size);
123 if (size >= array_size)
125 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
126 if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
127 atomic_set(&map->read, 0);
128 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
132 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
133 wanted = array_size - size;
134 masked_prod = pvcalls_mask(prod, array_size);
135 masked_cons = pvcalls_mask(cons, array_size);
137 memset(&msg, 0, sizeof(msg));
138 if (masked_prod < masked_cons) {
139 vec[0].iov_base = data->in + masked_prod;
140 vec[0].iov_len = wanted;
141 iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 1, wanted);
143 vec[0].iov_base = data->in + masked_prod;
144 vec[0].iov_len = array_size - masked_prod;
145 vec[1].iov_base = data->in;
146 vec[1].iov_len = wanted - vec[0].iov_len;
147 iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 2, wanted);
150 atomic_set(&map->read, 0);
151 ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
152 WARN_ON(ret > wanted);
153 if (ret == -EAGAIN) /* shouldn't happen */
157 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
158 if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
159 atomic_inc(&map->read);
160 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
162 /* write the data, then modify the indexes */
165 atomic_set(&map->read, 0);
166 intf->in_error = ret;
168 intf->in_prod = prod + ret;
169 /* update the indexes, then notify the other end */
171 notify_remote_via_irq(map->irq);
176 static bool pvcalls_conn_back_write(struct sock_mapping *map)
178 struct pvcalls_data_intf *intf = map->ring;
179 struct pvcalls_data *data = &map->data;
182 RING_IDX cons, prod, size, array_size;
185 cons = intf->out_cons;
186 prod = intf->out_prod;
187 /* read the indexes before dealing with the data */
190 array_size = XEN_FLEX_RING_SIZE(map->ring_order);
191 size = pvcalls_queued(prod, cons, array_size);
195 memset(&msg, 0, sizeof(msg));
196 msg.msg_flags |= MSG_DONTWAIT;
197 if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
198 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
199 vec[0].iov_len = size;
200 iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 1, size);
202 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
203 vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
204 vec[1].iov_base = data->out;
205 vec[1].iov_len = size - vec[0].iov_len;
206 iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 2, size);
209 atomic_set(&map->write, 0);
210 ret = inet_sendmsg(map->sock, &msg, size);
211 if (ret == -EAGAIN) {
212 atomic_inc(&map->write);
213 atomic_inc(&map->io);
217 /* write the data, then update the indexes */
220 intf->out_error = ret;
223 intf->out_cons = cons + ret;
224 prod = intf->out_prod;
226 /* update the indexes, then notify the other end */
228 if (prod != cons + ret) {
229 atomic_inc(&map->write);
230 atomic_inc(&map->io);
232 notify_remote_via_irq(map->irq);
237 static void pvcalls_back_ioworker(struct work_struct *work)
239 struct pvcalls_ioworker *ioworker = container_of(work,
240 struct pvcalls_ioworker, register_work);
241 struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
243 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
245 while (atomic_read(&map->io) > 0) {
246 if (atomic_read(&map->release) > 0) {
247 atomic_set(&map->release, 0);
251 if (atomic_read(&map->read) > 0 &&
252 pvcalls_conn_back_read(map))
254 if (atomic_read(&map->write) > 0 &&
255 pvcalls_conn_back_write(map))
258 if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
259 atomic_set(&map->eoi, 0);
260 xen_irq_lateeoi(map->irq, eoi_flags);
261 eoi_flags = XEN_EOI_FLAG_SPURIOUS;
264 atomic_dec(&map->io);
268 static int pvcalls_back_socket(struct xenbus_device *dev,
269 struct xen_pvcalls_request *req)
271 struct pvcalls_fedata *fedata;
273 struct xen_pvcalls_response *rsp;
275 fedata = dev_get_drvdata(&dev->dev);
277 if (req->u.socket.domain != AF_INET ||
278 req->u.socket.type != SOCK_STREAM ||
279 (req->u.socket.protocol != IPPROTO_IP &&
280 req->u.socket.protocol != AF_INET))
285 /* leave the actual socket allocation for later */
287 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
288 rsp->req_id = req->req_id;
290 rsp->u.socket.id = req->u.socket.id;
296 static void pvcalls_sk_state_change(struct sock *sock)
298 struct sock_mapping *map = sock->sk_user_data;
303 atomic_inc(&map->read);
304 notify_remote_via_irq(map->irq);
307 static void pvcalls_sk_data_ready(struct sock *sock)
309 struct sock_mapping *map = sock->sk_user_data;
310 struct pvcalls_ioworker *iow;
315 iow = &map->ioworker;
316 atomic_inc(&map->read);
317 atomic_inc(&map->io);
318 queue_work(iow->wq, &iow->register_work);
321 static struct sock_mapping *pvcalls_new_active_socket(
322 struct pvcalls_fedata *fedata,
329 struct sock_mapping *map;
332 map = kzalloc(sizeof(*map), GFP_KERNEL);
336 map->fedata = fedata;
341 ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
345 map->ring_order = map->ring->ring_order;
346 /* first read the order, then map the data ring */
348 if (map->ring_order > MAX_RING_ORDER) {
349 pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
350 __func__, map->ring_order, MAX_RING_ORDER);
353 ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
354 (1 << map->ring_order), &page);
359 ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
360 fedata->dev->otherend_id, evtchn,
361 pvcalls_back_conn_event, 0, "pvcalls-backend", map);
366 map->data.in = map->bytes;
367 map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
369 map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
370 if (!map->ioworker.wq)
372 atomic_set(&map->io, 1);
373 INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
375 down(&fedata->socket_lock);
376 list_add_tail(&map->list, &fedata->socket_mappings);
377 up(&fedata->socket_lock);
379 write_lock_bh(&map->sock->sk->sk_callback_lock);
380 map->saved_data_ready = map->sock->sk->sk_data_ready;
381 map->sock->sk->sk_user_data = map;
382 map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
383 map->sock->sk->sk_state_change = pvcalls_sk_state_change;
384 write_unlock_bh(&map->sock->sk->sk_callback_lock);
388 down(&fedata->socket_lock);
389 list_del(&map->list);
390 pvcalls_back_release_active(fedata->dev, fedata, map);
391 up(&fedata->socket_lock);
395 static int pvcalls_back_connect(struct xenbus_device *dev,
396 struct xen_pvcalls_request *req)
398 struct pvcalls_fedata *fedata;
401 struct sock_mapping *map;
402 struct xen_pvcalls_response *rsp;
403 struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
405 fedata = dev_get_drvdata(&dev->dev);
407 if (req->u.connect.len < sizeof(sa->sa_family) ||
408 req->u.connect.len > sizeof(req->u.connect.addr) ||
409 sa->sa_family != AF_INET)
412 ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
415 ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
421 map = pvcalls_new_active_socket(fedata,
424 req->u.connect.evtchn,
432 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
433 rsp->req_id = req->req_id;
435 rsp->u.connect.id = req->u.connect.id;
441 static int pvcalls_back_release_active(struct xenbus_device *dev,
442 struct pvcalls_fedata *fedata,
443 struct sock_mapping *map)
445 disable_irq(map->irq);
446 if (map->sock->sk != NULL) {
447 write_lock_bh(&map->sock->sk->sk_callback_lock);
448 map->sock->sk->sk_user_data = NULL;
449 map->sock->sk->sk_data_ready = map->saved_data_ready;
450 write_unlock_bh(&map->sock->sk->sk_callback_lock);
453 atomic_set(&map->release, 1);
454 flush_work(&map->ioworker.register_work);
456 xenbus_unmap_ring_vfree(dev, map->bytes);
457 xenbus_unmap_ring_vfree(dev, (void *)map->ring);
458 unbind_from_irqhandler(map->irq, map);
460 sock_release(map->sock);
466 static int pvcalls_back_release_passive(struct xenbus_device *dev,
467 struct pvcalls_fedata *fedata,
468 struct sockpass_mapping *mappass)
470 if (mappass->sock->sk != NULL) {
471 write_lock_bh(&mappass->sock->sk->sk_callback_lock);
472 mappass->sock->sk->sk_user_data = NULL;
473 mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
474 write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
476 sock_release(mappass->sock);
477 flush_workqueue(mappass->wq);
478 destroy_workqueue(mappass->wq);
484 static int pvcalls_back_release(struct xenbus_device *dev,
485 struct xen_pvcalls_request *req)
487 struct pvcalls_fedata *fedata;
488 struct sock_mapping *map, *n;
489 struct sockpass_mapping *mappass;
491 struct xen_pvcalls_response *rsp;
493 fedata = dev_get_drvdata(&dev->dev);
495 down(&fedata->socket_lock);
496 list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
497 if (map->id == req->u.release.id) {
498 list_del(&map->list);
499 up(&fedata->socket_lock);
500 ret = pvcalls_back_release_active(dev, fedata, map);
504 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
506 if (mappass != NULL) {
507 radix_tree_delete(&fedata->socketpass_mappings, mappass->id);
508 up(&fedata->socket_lock);
509 ret = pvcalls_back_release_passive(dev, fedata, mappass);
511 up(&fedata->socket_lock);
514 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
515 rsp->req_id = req->req_id;
516 rsp->u.release.id = req->u.release.id;
522 static void __pvcalls_back_accept(struct work_struct *work)
524 struct sockpass_mapping *mappass = container_of(
525 work, struct sockpass_mapping, register_work);
526 struct sock_mapping *map;
527 struct pvcalls_ioworker *iow;
528 struct pvcalls_fedata *fedata;
530 struct xen_pvcalls_response *rsp;
531 struct xen_pvcalls_request *req;
536 fedata = mappass->fedata;
538 * __pvcalls_back_accept can race against pvcalls_back_accept.
539 * We only need to check the value of "cmd" on read. It could be
540 * done atomically, but to simplify the code on the write side, we
543 spin_lock_irqsave(&mappass->copy_lock, flags);
544 req = &mappass->reqcopy;
545 if (req->cmd != PVCALLS_ACCEPT) {
546 spin_unlock_irqrestore(&mappass->copy_lock, flags);
549 spin_unlock_irqrestore(&mappass->copy_lock, flags);
554 sock->type = mappass->sock->type;
555 sock->ops = mappass->sock->ops;
557 ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
558 if (ret == -EAGAIN) {
563 map = pvcalls_new_active_socket(fedata,
564 req->u.accept.id_new,
566 req->u.accept.evtchn,
574 map->sockpass = mappass;
575 iow = &map->ioworker;
576 atomic_inc(&map->read);
577 atomic_inc(&map->io);
578 queue_work(iow->wq, &iow->register_work);
581 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
582 rsp->req_id = req->req_id;
584 rsp->u.accept.id = req->u.accept.id;
586 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
588 notify_remote_via_irq(fedata->irq);
590 mappass->reqcopy.cmd = 0;
593 static void pvcalls_pass_sk_data_ready(struct sock *sock)
595 struct sockpass_mapping *mappass = sock->sk_user_data;
596 struct pvcalls_fedata *fedata;
597 struct xen_pvcalls_response *rsp;
604 fedata = mappass->fedata;
605 spin_lock_irqsave(&mappass->copy_lock, flags);
606 if (mappass->reqcopy.cmd == PVCALLS_POLL) {
607 rsp = RING_GET_RESPONSE(&fedata->ring,
608 fedata->ring.rsp_prod_pvt++);
609 rsp->req_id = mappass->reqcopy.req_id;
610 rsp->u.poll.id = mappass->reqcopy.u.poll.id;
611 rsp->cmd = mappass->reqcopy.cmd;
614 mappass->reqcopy.cmd = 0;
615 spin_unlock_irqrestore(&mappass->copy_lock, flags);
617 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
619 notify_remote_via_irq(mappass->fedata->irq);
621 spin_unlock_irqrestore(&mappass->copy_lock, flags);
622 queue_work(mappass->wq, &mappass->register_work);
626 static int pvcalls_back_bind(struct xenbus_device *dev,
627 struct xen_pvcalls_request *req)
629 struct pvcalls_fedata *fedata;
631 struct sockpass_mapping *map;
632 struct xen_pvcalls_response *rsp;
634 fedata = dev_get_drvdata(&dev->dev);
636 map = kzalloc(sizeof(*map), GFP_KERNEL);
642 INIT_WORK(&map->register_work, __pvcalls_back_accept);
643 spin_lock_init(&map->copy_lock);
644 map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
650 ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
654 ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
659 map->fedata = fedata;
660 map->id = req->u.bind.id;
662 down(&fedata->socket_lock);
663 ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
665 up(&fedata->socket_lock);
669 write_lock_bh(&map->sock->sk->sk_callback_lock);
670 map->saved_data_ready = map->sock->sk->sk_data_ready;
671 map->sock->sk->sk_user_data = map;
672 map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
673 write_unlock_bh(&map->sock->sk->sk_callback_lock);
677 if (map && map->sock)
678 sock_release(map->sock);
680 destroy_workqueue(map->wq);
683 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
684 rsp->req_id = req->req_id;
686 rsp->u.bind.id = req->u.bind.id;
691 static int pvcalls_back_listen(struct xenbus_device *dev,
692 struct xen_pvcalls_request *req)
694 struct pvcalls_fedata *fedata;
696 struct sockpass_mapping *map;
697 struct xen_pvcalls_response *rsp;
699 fedata = dev_get_drvdata(&dev->dev);
701 down(&fedata->socket_lock);
702 map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
703 up(&fedata->socket_lock);
707 ret = inet_listen(map->sock, req->u.listen.backlog);
710 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
711 rsp->req_id = req->req_id;
713 rsp->u.listen.id = req->u.listen.id;
718 static int pvcalls_back_accept(struct xenbus_device *dev,
719 struct xen_pvcalls_request *req)
721 struct pvcalls_fedata *fedata;
722 struct sockpass_mapping *mappass;
724 struct xen_pvcalls_response *rsp;
727 fedata = dev_get_drvdata(&dev->dev);
729 down(&fedata->socket_lock);
730 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
732 up(&fedata->socket_lock);
737 * Limitation of the current implementation: only support one
738 * concurrent accept or poll call on one socket.
740 spin_lock_irqsave(&mappass->copy_lock, flags);
741 if (mappass->reqcopy.cmd != 0) {
742 spin_unlock_irqrestore(&mappass->copy_lock, flags);
747 mappass->reqcopy = *req;
748 spin_unlock_irqrestore(&mappass->copy_lock, flags);
749 queue_work(mappass->wq, &mappass->register_work);
751 /* Tell the caller we don't need to send back a notification yet */
755 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
756 rsp->req_id = req->req_id;
758 rsp->u.accept.id = req->u.accept.id;
763 static int pvcalls_back_poll(struct xenbus_device *dev,
764 struct xen_pvcalls_request *req)
766 struct pvcalls_fedata *fedata;
767 struct sockpass_mapping *mappass;
768 struct xen_pvcalls_response *rsp;
769 struct inet_connection_sock *icsk;
770 struct request_sock_queue *queue;
775 fedata = dev_get_drvdata(&dev->dev);
777 down(&fedata->socket_lock);
778 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
780 up(&fedata->socket_lock);
785 * Limitation of the current implementation: only support one
786 * concurrent accept or poll call on one socket.
788 spin_lock_irqsave(&mappass->copy_lock, flags);
789 if (mappass->reqcopy.cmd != 0) {
794 mappass->reqcopy = *req;
795 icsk = inet_csk(mappass->sock->sk);
796 queue = &icsk->icsk_accept_queue;
797 data = READ_ONCE(queue->rskq_accept_head) != NULL;
799 mappass->reqcopy.cmd = 0;
803 spin_unlock_irqrestore(&mappass->copy_lock, flags);
805 /* Tell the caller we don't need to send back a notification yet */
809 spin_unlock_irqrestore(&mappass->copy_lock, flags);
811 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
812 rsp->req_id = req->req_id;
814 rsp->u.poll.id = req->u.poll.id;
819 static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
820 struct xen_pvcalls_request *req)
826 ret = pvcalls_back_socket(dev, req);
828 case PVCALLS_CONNECT:
829 ret = pvcalls_back_connect(dev, req);
831 case PVCALLS_RELEASE:
832 ret = pvcalls_back_release(dev, req);
835 ret = pvcalls_back_bind(dev, req);
838 ret = pvcalls_back_listen(dev, req);
841 ret = pvcalls_back_accept(dev, req);
844 ret = pvcalls_back_poll(dev, req);
848 struct pvcalls_fedata *fedata;
849 struct xen_pvcalls_response *rsp;
851 fedata = dev_get_drvdata(&dev->dev);
852 rsp = RING_GET_RESPONSE(
853 &fedata->ring, fedata->ring.rsp_prod_pvt++);
854 rsp->req_id = req->req_id;
856 rsp->ret = -ENOTSUPP;
863 static void pvcalls_back_work(struct pvcalls_fedata *fedata)
865 int notify, notify_all = 0, more = 1;
866 struct xen_pvcalls_request req;
867 struct xenbus_device *dev = fedata->dev;
870 while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
871 RING_COPY_REQUEST(&fedata->ring,
872 fedata->ring.req_cons++,
875 if (!pvcalls_back_handle_cmd(dev, &req)) {
876 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
877 &fedata->ring, notify);
878 notify_all += notify;
883 notify_remote_via_irq(fedata->irq);
887 RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
891 static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
893 struct xenbus_device *dev = dev_id;
894 struct pvcalls_fedata *fedata = NULL;
895 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
898 fedata = dev_get_drvdata(&dev->dev);
900 pvcalls_back_work(fedata);
905 xen_irq_lateeoi(irq, eoi_flags);
910 static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
912 struct sock_mapping *map = sock_map;
913 struct pvcalls_ioworker *iow;
915 if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
916 map->sock->sk->sk_user_data != map) {
917 xen_irq_lateeoi(irq, 0);
921 iow = &map->ioworker;
923 atomic_inc(&map->write);
924 atomic_inc(&map->eoi);
925 atomic_inc(&map->io);
926 queue_work(iow->wq, &iow->register_work);
931 static int backend_connect(struct xenbus_device *dev)
934 grant_ref_t ring_ref;
935 struct pvcalls_fedata *fedata = NULL;
937 fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
942 err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
946 xenbus_dev_fatal(dev, err, "reading %s/event-channel",
951 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
954 xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
959 err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
964 err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
965 IRQF_ONESHOT, "pvcalls-back", dev);
969 err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
970 (void **)&fedata->sring);
974 BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
977 INIT_LIST_HEAD(&fedata->socket_mappings);
978 INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
979 sema_init(&fedata->socket_lock, 1);
980 dev_set_drvdata(&dev->dev, fedata);
982 down(&pvcalls_back_global.frontends_lock);
983 list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
984 up(&pvcalls_back_global.frontends_lock);
989 if (fedata->irq >= 0)
990 unbind_from_irqhandler(fedata->irq, dev);
991 if (fedata->sring != NULL)
992 xenbus_unmap_ring_vfree(dev, fedata->sring);
997 static int backend_disconnect(struct xenbus_device *dev)
999 struct pvcalls_fedata *fedata;
1000 struct sock_mapping *map, *n;
1001 struct sockpass_mapping *mappass;
1002 struct radix_tree_iter iter;
1006 fedata = dev_get_drvdata(&dev->dev);
1008 down(&fedata->socket_lock);
1009 list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
1010 list_del(&map->list);
1011 pvcalls_back_release_active(dev, fedata, map);
1014 radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
1015 mappass = radix_tree_deref_slot(slot);
1018 if (radix_tree_exception(mappass)) {
1019 if (radix_tree_deref_retry(mappass))
1020 slot = radix_tree_iter_retry(&iter);
1022 radix_tree_delete(&fedata->socketpass_mappings,
1024 pvcalls_back_release_passive(dev, fedata, mappass);
1027 up(&fedata->socket_lock);
1029 unbind_from_irqhandler(fedata->irq, dev);
1030 xenbus_unmap_ring_vfree(dev, fedata->sring);
1032 list_del(&fedata->list);
1034 dev_set_drvdata(&dev->dev, NULL);
1039 static int pvcalls_back_probe(struct xenbus_device *dev,
1040 const struct xenbus_device_id *id)
1043 struct xenbus_transaction xbt;
1048 err = xenbus_transaction_start(&xbt);
1050 pr_warn("%s cannot create xenstore transaction\n", __func__);
1054 err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
1057 pr_warn("%s write out 'versions' failed\n", __func__);
1061 err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
1064 pr_warn("%s write out 'max-page-order' failed\n", __func__);
1068 err = xenbus_printf(xbt, dev->nodename, "function-calls",
1069 XENBUS_FUNCTIONS_CALLS);
1071 pr_warn("%s write out 'function-calls' failed\n", __func__);
1077 err = xenbus_transaction_end(xbt, abort);
1079 if (err == -EAGAIN && !abort)
1081 pr_warn("%s cannot complete xenstore transaction\n", __func__);
1088 xenbus_switch_state(dev, XenbusStateInitWait);
1093 static void set_backend_state(struct xenbus_device *dev,
1094 enum xenbus_state state)
1096 while (dev->state != state) {
1097 switch (dev->state) {
1098 case XenbusStateClosed:
1100 case XenbusStateInitWait:
1101 case XenbusStateConnected:
1102 xenbus_switch_state(dev, XenbusStateInitWait);
1104 case XenbusStateClosing:
1105 xenbus_switch_state(dev, XenbusStateClosing);
1111 case XenbusStateInitWait:
1112 case XenbusStateInitialised:
1114 case XenbusStateConnected:
1115 if (backend_connect(dev))
1117 xenbus_switch_state(dev, XenbusStateConnected);
1119 case XenbusStateClosing:
1120 case XenbusStateClosed:
1121 xenbus_switch_state(dev, XenbusStateClosing);
1127 case XenbusStateConnected:
1129 case XenbusStateInitWait:
1130 case XenbusStateClosing:
1131 case XenbusStateClosed:
1132 down(&pvcalls_back_global.frontends_lock);
1133 backend_disconnect(dev);
1134 up(&pvcalls_back_global.frontends_lock);
1135 xenbus_switch_state(dev, XenbusStateClosing);
1141 case XenbusStateClosing:
1143 case XenbusStateInitWait:
1144 case XenbusStateConnected:
1145 case XenbusStateClosed:
1146 xenbus_switch_state(dev, XenbusStateClosed);
1158 static void pvcalls_back_changed(struct xenbus_device *dev,
1159 enum xenbus_state frontend_state)
1161 switch (frontend_state) {
1162 case XenbusStateInitialising:
1163 set_backend_state(dev, XenbusStateInitWait);
1166 case XenbusStateInitialised:
1167 case XenbusStateConnected:
1168 set_backend_state(dev, XenbusStateConnected);
1171 case XenbusStateClosing:
1172 set_backend_state(dev, XenbusStateClosing);
1175 case XenbusStateClosed:
1176 set_backend_state(dev, XenbusStateClosed);
1177 if (xenbus_dev_is_online(dev))
1179 device_unregister(&dev->dev);
1181 case XenbusStateUnknown:
1182 set_backend_state(dev, XenbusStateClosed);
1183 device_unregister(&dev->dev);
1187 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1193 static int pvcalls_back_remove(struct xenbus_device *dev)
1198 static int pvcalls_back_uevent(struct xenbus_device *xdev,
1199 struct kobj_uevent_env *env)
1204 static const struct xenbus_device_id pvcalls_back_ids[] = {
1209 static struct xenbus_driver pvcalls_back_driver = {
1210 .ids = pvcalls_back_ids,
1211 .probe = pvcalls_back_probe,
1212 .remove = pvcalls_back_remove,
1213 .uevent = pvcalls_back_uevent,
1214 .otherend_changed = pvcalls_back_changed,
1217 static int __init pvcalls_back_init(void)
1224 ret = xenbus_register_backend(&pvcalls_back_driver);
1228 sema_init(&pvcalls_back_global.frontends_lock, 1);
1229 INIT_LIST_HEAD(&pvcalls_back_global.frontends);
1232 module_init(pvcalls_back_init);
1234 static void __exit pvcalls_back_fin(void)
1236 struct pvcalls_fedata *fedata, *nfedata;
1238 down(&pvcalls_back_global.frontends_lock);
1239 list_for_each_entry_safe(fedata, nfedata,
1240 &pvcalls_back_global.frontends, list) {
1241 backend_disconnect(fedata->dev);
1243 up(&pvcalls_back_global.frontends_lock);
1245 xenbus_unregister_driver(&pvcalls_back_driver);
1248 module_exit(pvcalls_back_fin);
1250 MODULE_DESCRIPTION("Xen PV Calls backend driver");
1251 MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
1252 MODULE_LICENSE("GPL");