1 /******************************************************************************
4 * Driver for receiving and demuxing event-channel signals.
6 * Copyright (c) 2004-2005, K A Fraser
7 * Multi-process extensions Copyright (c) 2004, Steven Smith
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/string.h>
41 #include <linux/errno.h>
43 #include <linux/miscdevice.h>
44 #include <linux/major.h>
45 #include <linux/proc_fs.h>
46 #include <linux/stat.h>
47 #include <linux/poll.h>
48 #include <linux/irq.h>
49 #include <linux/init.h>
50 #include <linux/mutex.h>
51 #include <linux/cpu.h>
53 #include <linux/vmalloc.h>
56 #include <xen/events.h>
57 #include <xen/evtchn.h>
58 #include <xen/xen-ops.h>
59 #include <asm/xen/hypervisor.h>
61 struct per_user_data {
62 struct mutex bind_mutex; /* serialize bind/unbind operations */
63 struct rb_root evtchns;
64 unsigned int nr_evtchns;
66 /* Notification ring, accessed via /dev/xen/evtchn. */
67 unsigned int ring_size;
69 unsigned int ring_cons, ring_prod, ring_overflow;
70 struct mutex ring_cons_mutex; /* protect against concurrent readers */
71 spinlock_t ring_prod_lock; /* product against concurrent interrupts */
73 /* Processes wait on this queue when ring is empty. */
74 wait_queue_head_t evtchn_wait;
75 struct fasync_struct *evtchn_async_queue;
78 domid_t restrict_domid;
81 #define UNRESTRICTED_DOMID ((domid_t)-1)
85 struct per_user_data *user;
90 static evtchn_port_t *evtchn_alloc_ring(unsigned int size)
93 size_t s = size * sizeof(*ring);
95 ring = kmalloc(s, GFP_KERNEL);
102 static void evtchn_free_ring(evtchn_port_t *ring)
107 static unsigned int evtchn_ring_offset(struct per_user_data *u,
110 return idx & (u->ring_size - 1);
113 static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
116 return u->ring + evtchn_ring_offset(u, idx);
119 static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
121 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
126 struct user_evtchn *this;
128 this = container_of(*new, struct user_evtchn, node);
131 if (this->port < evtchn->port)
132 new = &((*new)->rb_left);
133 else if (this->port > evtchn->port)
134 new = &((*new)->rb_right);
139 /* Add new node and rebalance tree. */
140 rb_link_node(&evtchn->node, parent, new);
141 rb_insert_color(&evtchn->node, &u->evtchns);
146 static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
149 rb_erase(&evtchn->node, &u->evtchns);
153 static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port)
155 struct rb_node *node = u->evtchns.rb_node;
158 struct user_evtchn *evtchn;
160 evtchn = container_of(node, struct user_evtchn, node);
162 if (evtchn->port < port)
163 node = node->rb_left;
164 else if (evtchn->port > port)
165 node = node->rb_right;
172 static irqreturn_t evtchn_interrupt(int irq, void *data)
174 struct user_evtchn *evtchn = data;
175 struct per_user_data *u = evtchn->user;
177 WARN(!evtchn->enabled,
178 "Interrupt for port %d, but apparently not enabled; per-user %p\n",
181 evtchn->enabled = false;
183 spin_lock(&u->ring_prod_lock);
185 if ((u->ring_prod - u->ring_cons) < u->ring_size) {
186 *evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
187 wmb(); /* Ensure ring contents visible */
188 if (u->ring_cons == u->ring_prod++) {
189 wake_up_interruptible(&u->evtchn_wait);
190 kill_fasync(&u->evtchn_async_queue,
194 u->ring_overflow = 1;
196 spin_unlock(&u->ring_prod_lock);
201 static ssize_t evtchn_read(struct file *file, char __user *buf,
202 size_t count, loff_t *ppos)
205 unsigned int c, p, bytes1 = 0, bytes2 = 0;
206 struct per_user_data *u = file->private_data;
208 /* Whole number of ports. */
209 count &= ~(sizeof(evtchn_port_t)-1);
214 if (count > PAGE_SIZE)
218 mutex_lock(&u->ring_cons_mutex);
221 if (u->ring_overflow)
229 mutex_unlock(&u->ring_cons_mutex);
231 if (file->f_flags & O_NONBLOCK)
234 rc = wait_event_interruptible(u->evtchn_wait,
235 u->ring_cons != u->ring_prod);
240 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
241 if (((c ^ p) & u->ring_size) != 0) {
242 bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
243 sizeof(evtchn_port_t);
244 bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
246 bytes1 = (p - c) * sizeof(evtchn_port_t);
250 /* Truncate chunks according to caller's maximum byte count. */
251 if (bytes1 > count) {
254 } else if ((bytes1 + bytes2) > count) {
255 bytes2 = count - bytes1;
259 rmb(); /* Ensure that we see the port before we copy it. */
260 if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
262 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
265 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
266 rc = bytes1 + bytes2;
269 mutex_unlock(&u->ring_cons_mutex);
273 static ssize_t evtchn_write(struct file *file, const char __user *buf,
274 size_t count, loff_t *ppos)
277 evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
278 struct per_user_data *u = file->private_data;
283 /* Whole number of ports. */
284 count &= ~(sizeof(evtchn_port_t)-1);
290 if (count > PAGE_SIZE)
294 if (copy_from_user(kbuf, buf, count) != 0)
297 mutex_lock(&u->bind_mutex);
299 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
300 unsigned port = kbuf[i];
301 struct user_evtchn *evtchn;
303 evtchn = find_evtchn(u, port);
304 if (evtchn && !evtchn->enabled) {
305 evtchn->enabled = true;
306 xen_irq_lateeoi(irq_from_evtchn(port), 0);
310 mutex_unlock(&u->bind_mutex);
315 free_page((unsigned long)kbuf);
319 static int evtchn_resize_ring(struct per_user_data *u)
321 unsigned int new_size;
322 evtchn_port_t *new_ring, *old_ring;
325 * Ensure the ring is large enough to capture all possible
326 * events. i.e., one free slot for each bound event.
328 if (u->nr_evtchns <= u->ring_size)
331 if (u->ring_size == 0)
334 new_size = 2 * u->ring_size;
336 new_ring = evtchn_alloc_ring(new_size);
343 * Access to the ring contents is serialized by either the
344 * prod /or/ cons lock so take both when resizing.
346 mutex_lock(&u->ring_cons_mutex);
347 spin_lock_irq(&u->ring_prod_lock);
350 * Copy the old ring contents to the new ring.
352 * To take care of wrapping, a full ring, and the new index
353 * pointing into the second half, simply copy the old contents
356 * +---------+ +------------------+
357 * |34567 12| -> |34567 1234567 12|
358 * +-----p-c-+ +-------c------p---+
360 memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
361 memcpy(new_ring + u->ring_size, old_ring,
362 u->ring_size * sizeof(*u->ring));
365 u->ring_size = new_size;
367 spin_unlock_irq(&u->ring_prod_lock);
368 mutex_unlock(&u->ring_cons_mutex);
370 evtchn_free_ring(old_ring);
375 static int evtchn_bind_to_user(struct per_user_data *u, int port)
377 struct user_evtchn *evtchn;
378 struct evtchn_close close;
382 * Ports are never reused, so every caller should pass in a
385 * (Locking not necessary because we haven't registered the
386 * interrupt handler yet, and our caller has already
387 * serialized bind operations.)
390 evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL);
396 evtchn->enabled = true; /* start enabled */
398 rc = add_evtchn(u, evtchn);
402 rc = evtchn_resize_ring(u);
406 rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
411 rc = evtchn_make_refcounted(port);
415 /* bind failed, should close the port now */
417 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
419 del_evtchn(u, evtchn);
423 static void evtchn_unbind_from_user(struct per_user_data *u,
424 struct user_evtchn *evtchn)
426 int irq = irq_from_evtchn(evtchn->port);
430 unbind_from_irqhandler(irq, evtchn);
432 del_evtchn(u, evtchn);
435 static long evtchn_ioctl(struct file *file,
436 unsigned int cmd, unsigned long arg)
439 struct per_user_data *u = file->private_data;
440 void __user *uarg = (void __user *) arg;
442 /* Prevent bind from racing with unbind */
443 mutex_lock(&u->bind_mutex);
446 case IOCTL_EVTCHN_BIND_VIRQ: {
447 struct ioctl_evtchn_bind_virq bind;
448 struct evtchn_bind_virq bind_virq;
451 if (u->restrict_domid != UNRESTRICTED_DOMID)
455 if (copy_from_user(&bind, uarg, sizeof(bind)))
458 bind_virq.virq = bind.virq;
459 bind_virq.vcpu = xen_vcpu_nr(0);
460 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
465 rc = evtchn_bind_to_user(u, bind_virq.port);
471 case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
472 struct ioctl_evtchn_bind_interdomain bind;
473 struct evtchn_bind_interdomain bind_interdomain;
476 if (copy_from_user(&bind, uarg, sizeof(bind)))
480 if (u->restrict_domid != UNRESTRICTED_DOMID &&
481 u->restrict_domid != bind.remote_domain)
484 bind_interdomain.remote_dom = bind.remote_domain;
485 bind_interdomain.remote_port = bind.remote_port;
486 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
491 rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
493 rc = bind_interdomain.local_port;
497 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
498 struct ioctl_evtchn_bind_unbound_port bind;
499 struct evtchn_alloc_unbound alloc_unbound;
502 if (u->restrict_domid != UNRESTRICTED_DOMID)
506 if (copy_from_user(&bind, uarg, sizeof(bind)))
509 alloc_unbound.dom = DOMID_SELF;
510 alloc_unbound.remote_dom = bind.remote_domain;
511 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
516 rc = evtchn_bind_to_user(u, alloc_unbound.port);
518 rc = alloc_unbound.port;
522 case IOCTL_EVTCHN_UNBIND: {
523 struct ioctl_evtchn_unbind unbind;
524 struct user_evtchn *evtchn;
527 if (copy_from_user(&unbind, uarg, sizeof(unbind)))
531 if (unbind.port >= xen_evtchn_nr_channels())
535 evtchn = find_evtchn(u, unbind.port);
539 disable_irq(irq_from_evtchn(unbind.port));
540 evtchn_unbind_from_user(u, evtchn);
545 case IOCTL_EVTCHN_NOTIFY: {
546 struct ioctl_evtchn_notify notify;
547 struct user_evtchn *evtchn;
550 if (copy_from_user(¬ify, uarg, sizeof(notify)))
554 evtchn = find_evtchn(u, notify.port);
556 notify_remote_via_evtchn(notify.port);
562 case IOCTL_EVTCHN_RESET: {
563 /* Initialise the ring to empty. Clear errors. */
564 mutex_lock(&u->ring_cons_mutex);
565 spin_lock_irq(&u->ring_prod_lock);
566 u->ring_cons = u->ring_prod = u->ring_overflow = 0;
567 spin_unlock_irq(&u->ring_prod_lock);
568 mutex_unlock(&u->ring_cons_mutex);
573 case IOCTL_EVTCHN_RESTRICT_DOMID: {
574 struct ioctl_evtchn_restrict_domid ierd;
577 if (u->restrict_domid != UNRESTRICTED_DOMID)
581 if (copy_from_user(&ierd, uarg, sizeof(ierd)))
585 if (ierd.domid == 0 || ierd.domid >= DOMID_FIRST_RESERVED)
588 u->restrict_domid = ierd.domid;
598 mutex_unlock(&u->bind_mutex);
603 static unsigned int evtchn_poll(struct file *file, poll_table *wait)
605 unsigned int mask = POLLOUT | POLLWRNORM;
606 struct per_user_data *u = file->private_data;
608 poll_wait(file, &u->evtchn_wait, wait);
609 if (u->ring_cons != u->ring_prod)
610 mask |= POLLIN | POLLRDNORM;
611 if (u->ring_overflow)
616 static int evtchn_fasync(int fd, struct file *filp, int on)
618 struct per_user_data *u = filp->private_data;
619 return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
622 static int evtchn_open(struct inode *inode, struct file *filp)
624 struct per_user_data *u;
626 u = kzalloc(sizeof(*u), GFP_KERNEL);
630 u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
631 if (u->name == NULL) {
636 init_waitqueue_head(&u->evtchn_wait);
638 mutex_init(&u->bind_mutex);
639 mutex_init(&u->ring_cons_mutex);
640 spin_lock_init(&u->ring_prod_lock);
642 u->restrict_domid = UNRESTRICTED_DOMID;
644 filp->private_data = u;
646 return nonseekable_open(inode, filp);
649 static int evtchn_release(struct inode *inode, struct file *filp)
651 struct per_user_data *u = filp->private_data;
652 struct rb_node *node;
654 while ((node = u->evtchns.rb_node)) {
655 struct user_evtchn *evtchn;
657 evtchn = rb_entry(node, struct user_evtchn, node);
658 disable_irq(irq_from_evtchn(evtchn->port));
659 evtchn_unbind_from_user(u, evtchn);
662 evtchn_free_ring(u->ring);
669 static const struct file_operations evtchn_fops = {
670 .owner = THIS_MODULE,
672 .write = evtchn_write,
673 .unlocked_ioctl = evtchn_ioctl,
675 .fasync = evtchn_fasync,
677 .release = evtchn_release,
681 static struct miscdevice evtchn_miscdev = {
682 .minor = MISC_DYNAMIC_MINOR,
683 .name = "xen/evtchn",
684 .fops = &evtchn_fops,
686 static int __init evtchn_init(void)
693 /* Create '/dev/xen/evtchn'. */
694 err = misc_register(&evtchn_miscdev);
696 pr_err("Could not register /dev/xen/evtchn\n");
700 pr_info("Event-channel device installed\n");
705 static void __exit evtchn_cleanup(void)
707 misc_deregister(&evtchn_miscdev);
710 module_init(evtchn_init);
711 module_exit(evtchn_cleanup);
713 MODULE_LICENSE("GPL");