4 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
5 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
6 * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de>
7 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
13 * Licensed under the GPLv2 only.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/poll.h>
19 #include <linux/device.h>
20 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/sched.h>
24 #include <linux/string.h>
25 #include <linux/kobject.h>
26 #include <linux/cdev.h>
27 #include <linux/uio_driver.h>
29 #define UIO_MAX_DEVICES (1U << MINORBITS)
32 static struct cdev *uio_cdev;
33 static DEFINE_IDR(uio_idr);
34 static const struct file_operations uio_fops;
36 /* Protect idr accesses */
37 static DEFINE_MUTEX(minor_lock);
47 #define to_map(map) container_of(map, struct uio_map, kobj)
49 static ssize_t map_name_show(struct uio_mem *mem, char *buf)
51 if (unlikely(!mem->name))
54 return sprintf(buf, "%s\n", mem->name);
57 static ssize_t map_addr_show(struct uio_mem *mem, char *buf)
59 return sprintf(buf, "%pa\n", &mem->addr);
62 static ssize_t map_size_show(struct uio_mem *mem, char *buf)
64 return sprintf(buf, "%pa\n", &mem->size);
67 static ssize_t map_offset_show(struct uio_mem *mem, char *buf)
69 return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr & ~PAGE_MASK);
72 struct map_sysfs_entry {
73 struct attribute attr;
74 ssize_t (*show)(struct uio_mem *, char *);
75 ssize_t (*store)(struct uio_mem *, const char *, size_t);
78 static struct map_sysfs_entry name_attribute =
79 __ATTR(name, S_IRUGO, map_name_show, NULL);
80 static struct map_sysfs_entry addr_attribute =
81 __ATTR(addr, S_IRUGO, map_addr_show, NULL);
82 static struct map_sysfs_entry size_attribute =
83 __ATTR(size, S_IRUGO, map_size_show, NULL);
84 static struct map_sysfs_entry offset_attribute =
85 __ATTR(offset, S_IRUGO, map_offset_show, NULL);
87 static struct attribute *attrs[] = {
91 &offset_attribute.attr,
92 NULL, /* need to NULL terminate the list of attributes */
95 static void map_release(struct kobject *kobj)
97 struct uio_map *map = to_map(kobj);
101 static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
104 struct uio_map *map = to_map(kobj);
105 struct uio_mem *mem = map->mem;
106 struct map_sysfs_entry *entry;
108 entry = container_of(attr, struct map_sysfs_entry, attr);
113 return entry->show(mem, buf);
116 static const struct sysfs_ops map_sysfs_ops = {
117 .show = map_type_show,
120 static struct kobj_type map_attr_type = {
121 .release = map_release,
122 .sysfs_ops = &map_sysfs_ops,
123 .default_attrs = attrs,
128 struct uio_port *port;
130 #define to_portio(portio) container_of(portio, struct uio_portio, kobj)
132 static ssize_t portio_name_show(struct uio_port *port, char *buf)
134 if (unlikely(!port->name))
137 return sprintf(buf, "%s\n", port->name);
140 static ssize_t portio_start_show(struct uio_port *port, char *buf)
142 return sprintf(buf, "0x%lx\n", port->start);
145 static ssize_t portio_size_show(struct uio_port *port, char *buf)
147 return sprintf(buf, "0x%lx\n", port->size);
150 static ssize_t portio_porttype_show(struct uio_port *port, char *buf)
152 const char *porttypes[] = {"none", "x86", "gpio", "other"};
154 if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER))
157 return sprintf(buf, "port_%s\n", porttypes[port->porttype]);
160 struct portio_sysfs_entry {
161 struct attribute attr;
162 ssize_t (*show)(struct uio_port *, char *);
163 ssize_t (*store)(struct uio_port *, const char *, size_t);
166 static struct portio_sysfs_entry portio_name_attribute =
167 __ATTR(name, S_IRUGO, portio_name_show, NULL);
168 static struct portio_sysfs_entry portio_start_attribute =
169 __ATTR(start, S_IRUGO, portio_start_show, NULL);
170 static struct portio_sysfs_entry portio_size_attribute =
171 __ATTR(size, S_IRUGO, portio_size_show, NULL);
172 static struct portio_sysfs_entry portio_porttype_attribute =
173 __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL);
175 static struct attribute *portio_attrs[] = {
176 &portio_name_attribute.attr,
177 &portio_start_attribute.attr,
178 &portio_size_attribute.attr,
179 &portio_porttype_attribute.attr,
183 static void portio_release(struct kobject *kobj)
185 struct uio_portio *portio = to_portio(kobj);
189 static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
192 struct uio_portio *portio = to_portio(kobj);
193 struct uio_port *port = portio->port;
194 struct portio_sysfs_entry *entry;
196 entry = container_of(attr, struct portio_sysfs_entry, attr);
201 return entry->show(port, buf);
204 static const struct sysfs_ops portio_sysfs_ops = {
205 .show = portio_type_show,
208 static struct kobj_type portio_attr_type = {
209 .release = portio_release,
210 .sysfs_ops = &portio_sysfs_ops,
211 .default_attrs = portio_attrs,
214 static ssize_t name_show(struct device *dev,
215 struct device_attribute *attr, char *buf)
217 struct uio_device *idev = dev_get_drvdata(dev);
218 return sprintf(buf, "%s\n", idev->info->name);
220 static DEVICE_ATTR_RO(name);
222 static ssize_t version_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
225 struct uio_device *idev = dev_get_drvdata(dev);
226 return sprintf(buf, "%s\n", idev->info->version);
228 static DEVICE_ATTR_RO(version);
230 static ssize_t event_show(struct device *dev,
231 struct device_attribute *attr, char *buf)
233 struct uio_device *idev = dev_get_drvdata(dev);
234 return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
236 static DEVICE_ATTR_RO(event);
238 static struct attribute *uio_attrs[] = {
240 &dev_attr_version.attr,
241 &dev_attr_event.attr,
244 ATTRIBUTE_GROUPS(uio);
246 /* UIO class infrastructure */
247 static struct class uio_class = {
249 .dev_groups = uio_groups,
252 bool uio_class_registered;
257 static int uio_dev_add_attributes(struct uio_device *idev)
262 int portio_found = 0;
265 struct uio_port *port;
266 struct uio_portio *portio;
268 for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
269 mem = &idev->info->mem[mi];
274 idev->map_dir = kobject_create_and_add("maps",
276 if (!idev->map_dir) {
281 map = kzalloc(sizeof(*map), GFP_KERNEL);
286 kobject_init(&map->kobj, &map_attr_type);
289 ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi);
292 ret = kobject_uevent(&map->kobj, KOBJ_ADD);
297 for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) {
298 port = &idev->info->port[pi];
303 idev->portio_dir = kobject_create_and_add("portio",
305 if (!idev->portio_dir) {
310 portio = kzalloc(sizeof(*portio), GFP_KERNEL);
315 kobject_init(&portio->kobj, &portio_attr_type);
317 port->portio = portio;
318 ret = kobject_add(&portio->kobj, idev->portio_dir,
321 goto err_portio_kobj;
322 ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
324 goto err_portio_kobj;
332 for (; pi >= 0; pi--) {
333 port = &idev->info->port[pi];
334 portio = port->portio;
335 kobject_put(&portio->kobj);
337 kobject_put(idev->portio_dir);
341 for (; mi >= 0; mi--) {
342 mem = &idev->info->mem[mi];
344 kobject_put(&map->kobj);
346 kobject_put(idev->map_dir);
347 dev_err(idev->dev, "error creating sysfs files (%d)\n", ret);
351 static void uio_dev_del_attributes(struct uio_device *idev)
355 struct uio_port *port;
357 for (i = 0; i < MAX_UIO_MAPS; i++) {
358 mem = &idev->info->mem[i];
361 kobject_put(&mem->map->kobj);
363 kobject_put(idev->map_dir);
365 for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) {
366 port = &idev->info->port[i];
369 kobject_put(&port->portio->kobj);
371 kobject_put(idev->portio_dir);
374 static int uio_get_minor(struct uio_device *idev)
376 int retval = -ENOMEM;
378 mutex_lock(&minor_lock);
379 retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
381 idev->minor = retval;
383 } else if (retval == -ENOSPC) {
384 dev_err(idev->dev, "too many uio devices\n");
387 mutex_unlock(&minor_lock);
391 static void uio_free_minor(struct uio_device *idev)
393 mutex_lock(&minor_lock);
394 idr_remove(&uio_idr, idev->minor);
395 mutex_unlock(&minor_lock);
399 * uio_event_notify - trigger an interrupt event
400 * @info: UIO device capabilities
402 void uio_event_notify(struct uio_info *info)
404 struct uio_device *idev = info->uio_dev;
406 atomic_inc(&idev->event);
407 wake_up_interruptible(&idev->wait);
408 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
410 EXPORT_SYMBOL_GPL(uio_event_notify);
413 * uio_interrupt - hardware interrupt handler
414 * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer
415 * @dev_id: Pointer to the devices uio_device structure
417 static irqreturn_t uio_interrupt(int irq, void *dev_id)
419 struct uio_device *idev = (struct uio_device *)dev_id;
420 irqreturn_t ret = idev->info->handler(irq, idev->info);
422 if (ret == IRQ_HANDLED)
423 uio_event_notify(idev->info);
428 struct uio_listener {
429 struct uio_device *dev;
433 static int uio_open(struct inode *inode, struct file *filep)
435 struct uio_device *idev;
436 struct uio_listener *listener;
439 mutex_lock(&minor_lock);
440 idev = idr_find(&uio_idr, iminor(inode));
441 mutex_unlock(&minor_lock);
447 if (!try_module_get(idev->owner)) {
452 listener = kmalloc(sizeof(*listener), GFP_KERNEL);
455 goto err_alloc_listener;
458 listener->dev = idev;
459 listener->event_count = atomic_read(&idev->event);
460 filep->private_data = listener;
462 if (idev->info->open) {
463 ret = idev->info->open(idev->info, inode);
473 module_put(idev->owner);
479 static int uio_fasync(int fd, struct file *filep, int on)
481 struct uio_listener *listener = filep->private_data;
482 struct uio_device *idev = listener->dev;
484 return fasync_helper(fd, filep, on, &idev->async_queue);
487 static int uio_release(struct inode *inode, struct file *filep)
490 struct uio_listener *listener = filep->private_data;
491 struct uio_device *idev = listener->dev;
493 if (idev->info->release)
494 ret = idev->info->release(idev->info, inode);
496 module_put(idev->owner);
501 static unsigned int uio_poll(struct file *filep, poll_table *wait)
503 struct uio_listener *listener = filep->private_data;
504 struct uio_device *idev = listener->dev;
506 if (!idev->info->irq)
509 poll_wait(filep, &idev->wait, wait);
510 if (listener->event_count != atomic_read(&idev->event))
511 return POLLIN | POLLRDNORM;
515 static ssize_t uio_read(struct file *filep, char __user *buf,
516 size_t count, loff_t *ppos)
518 struct uio_listener *listener = filep->private_data;
519 struct uio_device *idev = listener->dev;
520 DECLARE_WAITQUEUE(wait, current);
524 if (!idev->info->irq)
527 if (count != sizeof(s32))
530 add_wait_queue(&idev->wait, &wait);
533 set_current_state(TASK_INTERRUPTIBLE);
535 event_count = atomic_read(&idev->event);
536 if (event_count != listener->event_count) {
537 __set_current_state(TASK_RUNNING);
538 if (copy_to_user(buf, &event_count, count))
541 listener->event_count = event_count;
547 if (filep->f_flags & O_NONBLOCK) {
552 if (signal_pending(current)) {
553 retval = -ERESTARTSYS;
559 __set_current_state(TASK_RUNNING);
560 remove_wait_queue(&idev->wait, &wait);
565 static ssize_t uio_write(struct file *filep, const char __user *buf,
566 size_t count, loff_t *ppos)
568 struct uio_listener *listener = filep->private_data;
569 struct uio_device *idev = listener->dev;
573 if (!idev->info->irq)
576 if (count != sizeof(s32))
579 if (!idev->info->irqcontrol)
582 if (copy_from_user(&irq_on, buf, count))
585 retval = idev->info->irqcontrol(idev->info, irq_on);
587 return retval ? retval : sizeof(s32);
590 static int uio_find_mem_index(struct vm_area_struct *vma)
592 struct uio_device *idev = vma->vm_private_data;
594 if (vma->vm_pgoff < MAX_UIO_MAPS) {
595 if (idev->info->mem[vma->vm_pgoff].size == 0)
597 return (int)vma->vm_pgoff;
602 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
604 struct uio_device *idev = vma->vm_private_data;
606 unsigned long offset;
609 int mi = uio_find_mem_index(vma);
611 return VM_FAULT_SIGBUS;
614 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
617 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
619 addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset;
620 if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
621 page = virt_to_page(addr);
623 page = vmalloc_to_page(addr);
629 static const struct vm_operations_struct uio_logical_vm_ops = {
630 .fault = uio_vma_fault,
633 static int uio_mmap_logical(struct vm_area_struct *vma)
635 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
636 vma->vm_ops = &uio_logical_vm_ops;
640 static const struct vm_operations_struct uio_physical_vm_ops = {
641 #ifdef CONFIG_HAVE_IOREMAP_PROT
642 .access = generic_access_phys,
646 static int uio_mmap_physical(struct vm_area_struct *vma)
648 struct uio_device *idev = vma->vm_private_data;
649 int mi = uio_find_mem_index(vma);
653 mem = idev->info->mem + mi;
655 if (mem->addr & ~PAGE_MASK)
657 if (vma->vm_end - vma->vm_start > mem->size)
660 vma->vm_ops = &uio_physical_vm_ops;
661 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
664 * We cannot use the vm_iomap_memory() helper here,
665 * because vma->vm_pgoff is the map index we looked
666 * up above in uio_find_mem_index(), rather than an
667 * actual page offset into the mmap.
669 * So we just do the physical mmap without a page
672 return remap_pfn_range(vma,
674 mem->addr >> PAGE_SHIFT,
675 vma->vm_end - vma->vm_start,
679 static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
681 struct uio_listener *listener = filep->private_data;
682 struct uio_device *idev = listener->dev;
684 unsigned long requested_pages, actual_pages;
687 if (vma->vm_end < vma->vm_start)
690 vma->vm_private_data = idev;
692 mi = uio_find_mem_index(vma);
696 requested_pages = vma_pages(vma);
697 actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
698 + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
699 if (requested_pages > actual_pages)
702 if (idev->info->mmap) {
703 ret = idev->info->mmap(idev->info, vma);
707 switch (idev->info->mem[mi].memtype) {
709 return uio_mmap_physical(vma);
710 case UIO_MEM_LOGICAL:
711 case UIO_MEM_VIRTUAL:
712 return uio_mmap_logical(vma);
718 static const struct file_operations uio_fops = {
719 .owner = THIS_MODULE,
721 .release = uio_release,
726 .fasync = uio_fasync,
727 .llseek = noop_llseek,
730 static int uio_major_init(void)
732 static const char name[] = "uio";
733 struct cdev *cdev = NULL;
737 result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name);
746 cdev->owner = THIS_MODULE;
747 cdev->ops = &uio_fops;
748 kobject_set_name(&cdev->kobj, "%s", name);
750 result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES);
754 uio_major = MAJOR(uio_dev);
758 kobject_put(&cdev->kobj);
760 unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES);
765 static void uio_major_cleanup(void)
767 unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES);
771 static int init_uio_class(void)
775 /* This is the first time in here, set everything up properly */
776 ret = uio_major_init();
780 ret = class_register(&uio_class);
782 printk(KERN_ERR "class_register failed for uio\n");
783 goto err_class_register;
786 uio_class_registered = true;
796 static void release_uio_class(void)
798 uio_class_registered = false;
799 class_unregister(&uio_class);
804 * uio_register_device - register a new userspace IO device
805 * @owner: module that creates the new device
806 * @parent: parent device
807 * @info: UIO device capabilities
809 * returns zero on success or a negative error code.
811 int __uio_register_device(struct module *owner,
812 struct device *parent,
813 struct uio_info *info)
815 struct uio_device *idev;
818 if (!uio_class_registered)
819 return -EPROBE_DEFER;
821 if (!parent || !info || !info->name || !info->version)
824 info->uio_dev = NULL;
826 idev = devm_kzalloc(parent, sizeof(*idev), GFP_KERNEL);
833 init_waitqueue_head(&idev->wait);
834 atomic_set(&idev->event, 0);
836 ret = uio_get_minor(idev);
840 idev->dev = device_create(&uio_class, parent,
841 MKDEV(uio_major, idev->minor), idev,
842 "uio%d", idev->minor);
843 if (IS_ERR(idev->dev)) {
844 printk(KERN_ERR "UIO: device register failed\n");
845 ret = PTR_ERR(idev->dev);
846 goto err_device_create;
849 ret = uio_dev_add_attributes(idev);
851 goto err_uio_dev_add_attributes;
853 info->uio_dev = idev;
855 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
857 * Note that we deliberately don't use devm_request_irq
858 * here. The parent module can unregister the UIO device
859 * and call pci_disable_msi, which requires that this
860 * irq has been freed. However, the device may have open
861 * FDs at the time of unregister and therefore may not be
862 * freed until they are released.
864 ret = request_irq(info->irq, uio_interrupt,
865 info->irq_flags, info->name, idev);
867 info->uio_dev = NULL;
868 goto err_request_irq;
875 uio_dev_del_attributes(idev);
876 err_uio_dev_add_attributes:
877 device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
879 uio_free_minor(idev);
882 EXPORT_SYMBOL_GPL(__uio_register_device);
885 * uio_unregister_device - unregister a industrial IO device
886 * @info: UIO device capabilities
889 void uio_unregister_device(struct uio_info *info)
891 struct uio_device *idev;
893 if (!info || !info->uio_dev)
896 idev = info->uio_dev;
898 uio_free_minor(idev);
900 uio_dev_del_attributes(idev);
902 if (info->irq && info->irq != UIO_IRQ_CUSTOM)
903 free_irq(info->irq, idev);
905 device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
909 EXPORT_SYMBOL_GPL(uio_unregister_device);
911 static int __init uio_init(void)
913 return init_uio_class();
916 static void __exit uio_exit(void)
919 idr_destroy(&uio_idr);
922 module_init(uio_init)
923 module_exit(uio_exit)
924 MODULE_LICENSE("GPL v2");