2 * RapidIO mport character device
4 * Copyright 2014-2015 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * Copyright 2014-2015 Prodrive Technologies
7 * Andre van Herk <andre.van.herk@prodrive-technologies.com>
8 * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
9 * Copyright (C) 2014 Texas Instruments Incorporated
10 * Aurelien Jacquiot <a-jacquiot@ti.com>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/cdev.h>
20 #include <linux/ioctl.h>
21 #include <linux/uaccess.h>
22 #include <linux/list.h>
24 #include <linux/err.h>
25 #include <linux/net.h>
26 #include <linux/poll.h>
27 #include <linux/spinlock.h>
28 #include <linux/sched.h>
29 #include <linux/kfifo.h>
32 #include <linux/slab.h>
33 #include <linux/vmalloc.h>
34 #include <linux/mman.h>
36 #include <linux/dma-mapping.h>
37 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
38 #include <linux/dmaengine.h>
41 #include <linux/rio.h>
42 #include <linux/rio_ids.h>
43 #include <linux/rio_drv.h>
44 #include <linux/rio_mport_cdev.h>
48 #define DRV_NAME "rio_mport"
49 #define DRV_PREFIX DRV_NAME ": "
50 #define DEV_NAME "rio_mport"
51 #define DRV_VERSION "1.0.0"
53 /* Debug output filtering masks */
56 DBG_INIT = BIT(0), /* driver init */
57 DBG_EXIT = BIT(1), /* driver exit */
58 DBG_MPORT = BIT(2), /* mport add/remove */
59 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
60 DBG_DMA = BIT(4), /* DMA transfer messages */
61 DBG_MMAP = BIT(5), /* mapping messages */
62 DBG_IBW = BIT(6), /* inbound window */
63 DBG_EVENT = BIT(7), /* event handling messages */
64 DBG_OBW = BIT(8), /* outbound window messages */
65 DBG_DBELL = BIT(9), /* doorbell messages */
70 #define rmcd_debug(level, fmt, arg...) \
72 if (DBG_##level & dbg_level) \
73 pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
76 #define rmcd_debug(level, fmt, arg...) \
77 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
80 #define rmcd_warn(fmt, arg...) \
81 pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
83 #define rmcd_error(fmt, arg...) \
84 pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
86 MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
87 MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
88 MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
89 MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
90 MODULE_DESCRIPTION("RapidIO mport character device driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION);
94 static int dma_timeout = 3000; /* DMA transfer timeout in msec */
95 module_param(dma_timeout, int, S_IRUGO);
96 MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
99 static u32 dbg_level = DBG_NONE;
100 module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
101 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
105 * An internal DMA coherent buffer
107 struct mport_dma_buf {
117 * Internal memory mapping structure
119 enum rio_mport_map_dir {
125 struct rio_mport_mapping {
126 struct list_head node;
127 struct mport_dev *md;
128 enum rio_mport_map_dir dir;
131 dma_addr_t phys_addr; /* for mmap */
132 void *virt_addr; /* kernel address, for dma_free_coherent */
134 struct kref ref; /* refcount of vmas sharing the mapping */
138 struct rio_mport_dma_map {
145 #define MPORT_MAX_DMA_BUFS 16
146 #define MPORT_EVENT_DEPTH 10
149 * mport_dev driver-specific structure that represents mport device
150 * @active mport device status flag
151 * @node list node to maintain list of registered mports
152 * @cdev character device
153 * @dev associated device object
154 * @mport associated subsystem's master port device object
155 * @buf_mutex lock for buffer handling
156 * @file_mutex - lock for open files list
157 * @file_list - list of open files on given mport
158 * @properties properties of this mport
159 * @portwrites queue of inbound portwrites
160 * @pw_lock lock for port write queue
161 * @mappings queue for memory mappings
162 * @dma_chan DMA channels associated with this device
168 struct list_head node;
171 struct rio_mport *mport;
172 struct mutex buf_mutex;
173 struct mutex file_mutex;
174 struct list_head file_list;
175 struct rio_mport_properties properties;
176 struct list_head doorbells;
178 struct list_head portwrites;
180 struct list_head mappings;
181 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
182 struct dma_chan *dma_chan;
184 struct completion comp;
189 * mport_cdev_priv - data structure specific to individual file object
190 * associated with an open device
191 * @md master port character device object
192 * @async_queue - asynchronous notification queue
193 * @list - file objects tracking list
194 * @db_filters inbound doorbell filters for this descriptor
195 * @pw_filters portwrite filters for this descriptor
196 * @event_fifo event fifo for this descriptor
197 * @event_rx_wait wait queue for this descriptor
198 * @fifo_lock lock for event_fifo
199 * @event_mask event mask for this descriptor
200 * @dmach DMA engine channel allocated for specific file object
202 struct mport_cdev_priv {
203 struct mport_dev *md;
204 struct fasync_struct *async_queue;
205 struct list_head list;
206 struct list_head db_filters;
207 struct list_head pw_filters;
208 struct kfifo event_fifo;
209 wait_queue_head_t event_rx_wait;
210 spinlock_t fifo_lock;
211 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
212 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
213 struct dma_chan *dmach;
214 struct list_head async_list;
216 struct mutex dma_lock;
218 struct completion comp;
223 * rio_mport_pw_filter - structure to describe a portwrite filter
224 * md_node node in mport device's list
225 * priv_node node in private file object's list
226 * priv reference to private data
227 * filter actual portwrite filter
229 struct rio_mport_pw_filter {
230 struct list_head md_node;
231 struct list_head priv_node;
232 struct mport_cdev_priv *priv;
233 struct rio_pw_filter filter;
237 * rio_mport_db_filter - structure to describe a doorbell filter
238 * @data_node reference to device node
239 * @priv_node node in private data
240 * @priv reference to private data
241 * @filter actual doorbell filter
243 struct rio_mport_db_filter {
244 struct list_head data_node;
245 struct list_head priv_node;
246 struct mport_cdev_priv *priv;
247 struct rio_doorbell_filter filter;
250 static LIST_HEAD(mport_devs);
251 static DEFINE_MUTEX(mport_devs_lock);
253 #if (0) /* used by commented out portion of poll function : FIXME */
254 static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
257 static struct class *dev_class;
258 static dev_t dev_number;
260 static void mport_release_mapping(struct kref *ref);
262 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
265 struct rio_mport *mport = priv->md->mport;
266 struct rio_mport_maint_io maint_io;
272 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
275 if ((maint_io.offset % 4) ||
276 (maint_io.length == 0) || (maint_io.length % 4) ||
277 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
280 buffer = vmalloc(maint_io.length);
283 length = maint_io.length/sizeof(u32);
284 offset = maint_io.offset;
286 for (i = 0; i < length; i++) {
288 ret = __rio_local_read_config_32(mport,
291 ret = rio_mport_read_config_32(mport, maint_io.rioid,
292 maint_io.hopcount, offset, &buffer[i]);
299 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
300 buffer, maint_io.length)))
307 static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
310 struct rio_mport *mport = priv->md->mport;
311 struct rio_mport_maint_io maint_io;
315 int ret = -EINVAL, i;
317 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
320 if ((maint_io.offset % 4) ||
321 (maint_io.length == 0) || (maint_io.length % 4) ||
322 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
325 buffer = vmalloc(maint_io.length);
328 length = maint_io.length;
330 if (unlikely(copy_from_user(buffer,
331 (void __user *)(uintptr_t)maint_io.buffer, length))) {
336 offset = maint_io.offset;
337 length /= sizeof(u32);
339 for (i = 0; i < length; i++) {
341 ret = __rio_local_write_config_32(mport,
344 ret = rio_mport_write_config_32(mport, maint_io.rioid,
360 * Inbound/outbound memory mapping functions
363 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
364 u16 rioid, u64 raddr, u32 size,
367 struct rio_mport *mport = md->mport;
368 struct rio_mport_mapping *map;
371 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
373 map = kzalloc(sizeof(*map), GFP_KERNEL);
377 ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
381 map->dir = MAP_OUTBOUND;
383 map->rio_addr = raddr;
385 map->phys_addr = *paddr;
388 kref_init(&map->ref);
389 list_add_tail(&map->node, &md->mappings);
397 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
398 u16 rioid, u64 raddr, u32 size,
401 struct rio_mport_mapping *map;
404 mutex_lock(&md->buf_mutex);
405 list_for_each_entry(map, &md->mappings, node) {
406 if (map->dir != MAP_OUTBOUND)
408 if (rioid == map->rioid &&
409 raddr == map->rio_addr && size == map->size) {
410 *paddr = map->phys_addr;
413 } else if (rioid == map->rioid &&
414 raddr < (map->rio_addr + map->size - 1) &&
415 (raddr + size) > map->rio_addr) {
421 /* If not found, create new */
423 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
425 mutex_unlock(&md->buf_mutex);
429 static int rio_mport_obw_map(struct file *filp, void __user *arg)
431 struct mport_cdev_priv *priv = filp->private_data;
432 struct mport_dev *data = priv->md;
437 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
440 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
441 map.rioid, map.rio_addr, map.length);
443 ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
444 map.rio_addr, map.length, &paddr);
446 rmcd_error("Failed to set OBW err= %d", ret);
452 if (unlikely(copy_to_user(arg, &map, sizeof(map))))
458 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
460 * @priv: driver private data
461 * @arg: buffer handle returned by allocation routine
463 static int rio_mport_obw_free(struct file *filp, void __user *arg)
465 struct mport_cdev_priv *priv = filp->private_data;
466 struct mport_dev *md = priv->md;
468 struct rio_mport_mapping *map, *_map;
470 if (!md->mport->ops->unmap_outb)
471 return -EPROTONOSUPPORT;
473 if (copy_from_user(&handle, arg, sizeof(handle)))
476 rmcd_debug(OBW, "h=0x%llx", handle);
478 mutex_lock(&md->buf_mutex);
479 list_for_each_entry_safe(map, _map, &md->mappings, node) {
480 if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
481 if (map->filp == filp) {
482 rmcd_debug(OBW, "kref_put h=0x%llx", handle);
484 kref_put(&map->ref, mport_release_mapping);
489 mutex_unlock(&md->buf_mutex);
495 * maint_hdid_set() - Set the host Device ID
496 * @priv: driver private data
499 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
501 struct mport_dev *md = priv->md;
504 if (copy_from_user(&hdid, arg, sizeof(hdid)))
507 md->mport->host_deviceid = hdid;
508 md->properties.hdid = hdid;
509 rio_local_set_device_id(md->mport, hdid);
511 rmcd_debug(MPORT, "Set host device Id to %d", hdid);
517 * maint_comptag_set() - Set the host Component Tag
518 * @priv: driver private data
519 * @arg: Component Tag
521 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
523 struct mport_dev *md = priv->md;
526 if (copy_from_user(&comptag, arg, sizeof(comptag)))
529 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
531 rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
536 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
538 struct mport_dma_req {
539 struct kref refcount;
540 struct list_head node;
542 struct mport_cdev_priv *priv;
543 enum rio_transfer_sync sync;
545 struct page **page_list;
546 unsigned int nr_pages;
547 struct rio_mport_mapping *map;
548 struct dma_chan *dmach;
549 enum dma_data_direction dir;
551 enum dma_status status;
552 struct completion req_comp;
555 static void mport_release_def_dma(struct kref *dma_ref)
557 struct mport_dev *md =
558 container_of(dma_ref, struct mport_dev, dma_ref);
560 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
561 rio_release_dma(md->dma_chan);
565 static void mport_release_dma(struct kref *dma_ref)
567 struct mport_cdev_priv *priv =
568 container_of(dma_ref, struct mport_cdev_priv, dma_ref);
570 rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
571 complete(&priv->comp);
574 static void dma_req_free(struct kref *ref)
576 struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
578 struct mport_cdev_priv *priv = req->priv;
581 dma_unmap_sg(req->dmach->device->dev,
582 req->sgt.sgl, req->sgt.nents, req->dir);
583 sg_free_table(&req->sgt);
584 if (req->page_list) {
585 for (i = 0; i < req->nr_pages; i++)
586 put_page(req->page_list[i]);
587 kfree(req->page_list);
591 mutex_lock(&req->map->md->buf_mutex);
592 kref_put(&req->map->ref, mport_release_mapping);
593 mutex_unlock(&req->map->md->buf_mutex);
596 kref_put(&priv->dma_ref, mport_release_dma);
601 static void dma_xfer_callback(void *param)
603 struct mport_dma_req *req = (struct mport_dma_req *)param;
604 struct mport_cdev_priv *priv = req->priv;
606 req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
608 complete(&req->req_comp);
609 kref_put(&req->refcount, dma_req_free);
613 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
615 * Returns pointer to DMA transaction descriptor allocated by DMA driver on
616 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
617 * non-NULL pointer using IS_ERR macro.
619 static struct dma_async_tx_descriptor
620 *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
621 struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
622 enum dma_ctrl_flags flags)
624 struct rio_dma_data tx_data;
626 tx_data.sg = sgt->sgl;
627 tx_data.sg_len = nents;
628 tx_data.rio_addr_u = 0;
629 tx_data.rio_addr = transfer->rio_addr;
630 if (dir == DMA_MEM_TO_DEV) {
631 switch (transfer->method) {
632 case RIO_EXCHANGE_NWRITE:
633 tx_data.wr_type = RDW_ALL_NWRITE;
635 case RIO_EXCHANGE_NWRITE_R_ALL:
636 tx_data.wr_type = RDW_ALL_NWRITE_R;
638 case RIO_EXCHANGE_NWRITE_R:
639 tx_data.wr_type = RDW_LAST_NWRITE_R;
641 case RIO_EXCHANGE_DEFAULT:
642 tx_data.wr_type = RDW_DEFAULT;
645 return ERR_PTR(-EINVAL);
649 return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
652 /* Request DMA channel associated with this mport device.
653 * Try to request DMA channel for every new process that opened given
654 * mport. If a new DMA channel is not available use default channel
655 * which is the first DMA channel opened on mport device.
657 static int get_dma_channel(struct mport_cdev_priv *priv)
659 mutex_lock(&priv->dma_lock);
661 priv->dmach = rio_request_mport_dma(priv->md->mport);
663 /* Use default DMA channel if available */
664 if (priv->md->dma_chan) {
665 priv->dmach = priv->md->dma_chan;
666 kref_get(&priv->md->dma_ref);
668 rmcd_error("Failed to get DMA channel");
669 mutex_unlock(&priv->dma_lock);
672 } else if (!priv->md->dma_chan) {
673 /* Register default DMA channel if we do not have one */
674 priv->md->dma_chan = priv->dmach;
675 kref_init(&priv->md->dma_ref);
676 rmcd_debug(DMA, "Register DMA_chan %d as default",
677 priv->dmach->chan_id);
680 kref_init(&priv->dma_ref);
681 init_completion(&priv->comp);
684 kref_get(&priv->dma_ref);
685 mutex_unlock(&priv->dma_lock);
689 static void put_dma_channel(struct mport_cdev_priv *priv)
691 kref_put(&priv->dma_ref, mport_release_dma);
695 * DMA transfer functions
697 static int do_dma_request(struct mport_dma_req *req,
698 struct rio_transfer_io *xfer,
699 enum rio_transfer_sync sync, int nents)
701 struct mport_cdev_priv *priv;
702 struct sg_table *sgt;
703 struct dma_chan *chan;
704 struct dma_async_tx_descriptor *tx;
706 unsigned long tmo = msecs_to_jiffies(dma_timeout);
707 enum dma_transfer_direction dir;
715 dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
717 rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
718 current->comm, task_pid_nr(current),
719 dev_name(&chan->dev->device),
720 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
722 /* Initialize DMA transaction request */
723 tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
724 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
727 rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
728 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
729 xfer->rio_addr, xfer->length);
732 } else if (IS_ERR(tx)) {
734 rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
735 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
736 xfer->rio_addr, xfer->length);
740 tx->callback = dma_xfer_callback;
741 tx->callback_param = req;
743 req->status = DMA_IN_PROGRESS;
744 kref_get(&req->refcount);
746 cookie = dmaengine_submit(tx);
747 req->cookie = cookie;
749 rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
750 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
752 if (dma_submit_error(cookie)) {
753 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
754 cookie, xfer->rio_addr, xfer->length);
755 kref_put(&req->refcount, dma_req_free);
760 dma_async_issue_pending(chan);
762 if (sync == RIO_TRANSFER_ASYNC) {
763 spin_lock(&priv->req_lock);
764 list_add_tail(&req->node, &priv->async_list);
765 spin_unlock(&priv->req_lock);
767 } else if (sync == RIO_TRANSFER_FAF)
770 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
773 /* Timeout on wait occurred */
774 rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
775 current->comm, task_pid_nr(current),
776 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
778 } else if (wret == -ERESTARTSYS) {
779 /* Wait_for_completion was interrupted by a signal but DMA may
782 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
783 current->comm, task_pid_nr(current),
784 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
788 if (req->status != DMA_COMPLETE) {
789 /* DMA transaction completion was signaled with error */
790 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
791 current->comm, task_pid_nr(current),
792 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
793 cookie, req->status, ret);
802 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
803 * the remote RapidIO device
804 * @filp: file pointer associated with the call
805 * @transfer_mode: DMA transfer mode
806 * @sync: synchronization mode
807 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
808 * DMA_DEV_TO_MEM = read)
809 * @xfer: data transfer descriptor structure
812 rio_dma_transfer(struct file *filp, u32 transfer_mode,
813 enum rio_transfer_sync sync, enum dma_data_direction dir,
814 struct rio_transfer_io *xfer)
816 struct mport_cdev_priv *priv = filp->private_data;
817 unsigned long nr_pages = 0;
818 struct page **page_list = NULL;
819 struct mport_dma_req *req;
820 struct mport_dev *md = priv->md;
821 struct dma_chan *chan;
825 if (xfer->length == 0)
827 req = kzalloc(sizeof(*req), GFP_KERNEL);
831 ret = get_dma_channel(priv);
838 kref_init(&req->refcount);
839 init_completion(&req->req_comp);
847 * If parameter loc_addr != NULL, we are transferring data from/to
848 * data buffer allocated in user-space: lock in memory user-space
849 * buffer pages and build an SG table for DMA transfer request
851 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
852 * used for DMA data transfers: build single entry SG table using
853 * offset within the internal buffer specified by handle parameter.
855 if (xfer->loc_addr) {
859 offset = lower_32_bits(offset_in_page(xfer->loc_addr));
860 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
862 page_list = kmalloc_array(nr_pages,
863 sizeof(*page_list), GFP_KERNEL);
864 if (page_list == NULL) {
869 pinned = get_user_pages_fast(
870 (unsigned long)xfer->loc_addr & PAGE_MASK,
871 nr_pages, dir == DMA_FROM_DEVICE, page_list);
873 if (pinned != nr_pages) {
875 rmcd_error("get_user_pages_unlocked err=%ld",
879 rmcd_error("pinned %ld out of %ld pages",
882 * Set nr_pages up to mean "how many pages to unpin, in
891 ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
892 offset, xfer->length, GFP_KERNEL);
894 rmcd_error("sg_alloc_table failed with err=%d", ret);
898 req->page_list = page_list;
899 req->nr_pages = nr_pages;
902 struct rio_mport_mapping *map;
904 baddr = (dma_addr_t)xfer->handle;
906 mutex_lock(&md->buf_mutex);
907 list_for_each_entry(map, &md->mappings, node) {
908 if (baddr >= map->phys_addr &&
909 baddr < (map->phys_addr + map->size)) {
915 mutex_unlock(&md->buf_mutex);
917 if (req->map == NULL) {
922 if (xfer->length + xfer->offset > map->size) {
927 ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
929 rmcd_error("sg_alloc_table failed for internal buf");
933 sg_set_buf(req->sgt.sgl,
934 map->virt_addr + (baddr - map->phys_addr) +
935 xfer->offset, xfer->length);
938 nents = dma_map_sg(chan->device->dev,
939 req->sgt.sgl, req->sgt.nents, dir);
941 rmcd_error("Failed to map SG list");
946 ret = do_dma_request(req, xfer, sync, nents);
949 if (sync == RIO_TRANSFER_ASYNC)
950 return ret; /* return ASYNC cookie */
952 rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
956 if (!req->page_list) {
957 for (i = 0; i < nr_pages; i++)
958 put_page(page_list[i]);
962 kref_put(&req->refcount, dma_req_free);
966 static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
968 struct mport_cdev_priv *priv = filp->private_data;
969 struct rio_transaction transaction;
970 struct rio_transfer_io *transfer;
971 enum dma_data_direction dir;
974 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
977 if (transaction.count != 1) /* only single transfer for now */
980 if ((transaction.transfer_mode &
981 priv->md->properties.transfer_mode) == 0)
984 transfer = vmalloc(array_size(sizeof(*transfer), transaction.count));
988 if (unlikely(copy_from_user(transfer,
989 (void __user *)(uintptr_t)transaction.block,
990 transaction.count * sizeof(*transfer)))) {
995 dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
996 DMA_FROM_DEVICE : DMA_TO_DEVICE;
997 for (i = 0; i < transaction.count && ret == 0; i++)
998 ret = rio_dma_transfer(filp, transaction.transfer_mode,
999 transaction.sync, dir, &transfer[i]);
1001 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
1003 transaction.count * sizeof(*transfer))))
1012 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1014 struct mport_cdev_priv *priv;
1015 struct rio_async_tx_wait w_param;
1016 struct mport_dma_req *req;
1017 dma_cookie_t cookie;
1023 priv = (struct mport_cdev_priv *)filp->private_data;
1025 if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1028 cookie = w_param.token;
1029 if (w_param.timeout)
1030 tmo = msecs_to_jiffies(w_param.timeout);
1031 else /* Use default DMA timeout */
1032 tmo = msecs_to_jiffies(dma_timeout);
1034 spin_lock(&priv->req_lock);
1035 list_for_each_entry(req, &priv->async_list, node) {
1036 if (req->cookie == cookie) {
1037 list_del(&req->node);
1042 spin_unlock(&priv->req_lock);
1047 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1050 /* Timeout on wait occurred */
1051 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1052 current->comm, task_pid_nr(current),
1053 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1056 } else if (wret == -ERESTARTSYS) {
1057 /* Wait_for_completion was interrupted by a signal but DMA may
1058 * be still in progress
1060 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1061 current->comm, task_pid_nr(current),
1062 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1067 if (req->status != DMA_COMPLETE) {
1068 /* DMA transaction completion signaled with transfer error */
1069 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1070 current->comm, task_pid_nr(current),
1071 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1077 if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
1078 kref_put(&req->refcount, dma_req_free);
1083 /* Return request back into async queue */
1084 spin_lock(&priv->req_lock);
1085 list_add_tail(&req->node, &priv->async_list);
1086 spin_unlock(&priv->req_lock);
1090 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1091 u64 size, struct rio_mport_mapping **mapping)
1093 struct rio_mport_mapping *map;
1095 map = kzalloc(sizeof(*map), GFP_KERNEL);
1099 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1100 &map->phys_addr, GFP_KERNEL);
1101 if (map->virt_addr == NULL) {
1110 kref_init(&map->ref);
1111 mutex_lock(&md->buf_mutex);
1112 list_add_tail(&map->node, &md->mappings);
1113 mutex_unlock(&md->buf_mutex);
1119 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1121 struct mport_cdev_priv *priv = filp->private_data;
1122 struct mport_dev *md = priv->md;
1123 struct rio_dma_mem map;
1124 struct rio_mport_mapping *mapping = NULL;
1127 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1130 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1134 map.dma_handle = mapping->phys_addr;
1136 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1137 mutex_lock(&md->buf_mutex);
1138 kref_put(&mapping->ref, mport_release_mapping);
1139 mutex_unlock(&md->buf_mutex);
1146 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1148 struct mport_cdev_priv *priv = filp->private_data;
1149 struct mport_dev *md = priv->md;
1152 struct rio_mport_mapping *map, *_map;
1154 if (copy_from_user(&handle, arg, sizeof(handle)))
1156 rmcd_debug(EXIT, "filp=%p", filp);
1158 mutex_lock(&md->buf_mutex);
1159 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1160 if (map->dir == MAP_DMA && map->phys_addr == handle &&
1161 map->filp == filp) {
1162 kref_put(&map->ref, mport_release_mapping);
1167 mutex_unlock(&md->buf_mutex);
1169 if (ret == -EFAULT) {
1170 rmcd_debug(DMA, "ERR no matching mapping");
1177 static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1182 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1187 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1192 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1196 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1199 * Inbound/outbound memory mapping functions
1203 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1204 u64 raddr, u64 size,
1205 struct rio_mport_mapping **mapping)
1207 struct rio_mport *mport = md->mport;
1208 struct rio_mport_mapping *map;
1211 /* rio_map_inb_region() accepts u32 size */
1212 if (size > 0xffffffff)
1215 map = kzalloc(sizeof(*map), GFP_KERNEL);
1219 map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1220 &map->phys_addr, GFP_KERNEL);
1221 if (map->virt_addr == NULL) {
1226 if (raddr == RIO_MAP_ANY_ADDR)
1227 raddr = map->phys_addr;
1228 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1232 map->dir = MAP_INBOUND;
1233 map->rio_addr = raddr;
1237 kref_init(&map->ref);
1238 mutex_lock(&md->buf_mutex);
1239 list_add_tail(&map->node, &md->mappings);
1240 mutex_unlock(&md->buf_mutex);
1245 dma_free_coherent(mport->dev.parent, size,
1246 map->virt_addr, map->phys_addr);
1253 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1254 u64 raddr, u64 size,
1255 struct rio_mport_mapping **mapping)
1257 struct rio_mport_mapping *map;
1260 if (raddr == RIO_MAP_ANY_ADDR)
1263 mutex_lock(&md->buf_mutex);
1264 list_for_each_entry(map, &md->mappings, node) {
1265 if (map->dir != MAP_INBOUND)
1267 if (raddr == map->rio_addr && size == map->size) {
1268 /* allow exact match only */
1272 } else if (raddr < (map->rio_addr + map->size - 1) &&
1273 (raddr + size) > map->rio_addr) {
1278 mutex_unlock(&md->buf_mutex);
1283 /* not found, create new */
1284 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1287 static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1289 struct mport_cdev_priv *priv = filp->private_data;
1290 struct mport_dev *md = priv->md;
1291 struct rio_mmap map;
1292 struct rio_mport_mapping *mapping = NULL;
1295 if (!md->mport->ops->map_inb)
1296 return -EPROTONOSUPPORT;
1297 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1300 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1302 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1303 map.length, &mapping);
1307 map.handle = mapping->phys_addr;
1308 map.rio_addr = mapping->rio_addr;
1310 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1311 /* Delete mapping if it was created by this request */
1312 if (ret == 0 && mapping->filp == filp) {
1313 mutex_lock(&md->buf_mutex);
1314 kref_put(&mapping->ref, mport_release_mapping);
1315 mutex_unlock(&md->buf_mutex);
1324 * rio_mport_inbound_free() - unmap from RapidIO address space and free
1325 * previously allocated inbound DMA coherent buffer
1326 * @priv: driver private data
1327 * @arg: buffer handle returned by allocation routine
1329 static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1331 struct mport_cdev_priv *priv = filp->private_data;
1332 struct mport_dev *md = priv->md;
1334 struct rio_mport_mapping *map, *_map;
1336 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1338 if (!md->mport->ops->unmap_inb)
1339 return -EPROTONOSUPPORT;
1341 if (copy_from_user(&handle, arg, sizeof(handle)))
1344 mutex_lock(&md->buf_mutex);
1345 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1346 if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1347 if (map->filp == filp) {
1349 kref_put(&map->ref, mport_release_mapping);
1354 mutex_unlock(&md->buf_mutex);
1360 * maint_port_idx_get() - Get the port index of the mport instance
1361 * @priv: driver private data
1364 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1366 struct mport_dev *md = priv->md;
1367 u32 port_idx = md->mport->index;
1369 rmcd_debug(MPORT, "port_index=%d", port_idx);
1371 if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1377 static int rio_mport_add_event(struct mport_cdev_priv *priv,
1378 struct rio_event *event)
1382 if (!(priv->event_mask & event->header))
1385 spin_lock(&priv->fifo_lock);
1386 overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1387 || kfifo_in(&priv->event_fifo, (unsigned char *)event,
1388 sizeof(*event)) != sizeof(*event);
1389 spin_unlock(&priv->fifo_lock);
1391 wake_up_interruptible(&priv->event_rx_wait);
1394 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1401 static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1402 u16 src, u16 dst, u16 info)
1404 struct mport_dev *data = dev_id;
1405 struct mport_cdev_priv *priv;
1406 struct rio_mport_db_filter *db_filter;
1407 struct rio_event event;
1410 event.header = RIO_DOORBELL;
1411 event.u.doorbell.rioid = src;
1412 event.u.doorbell.payload = info;
1415 spin_lock(&data->db_lock);
1416 list_for_each_entry(db_filter, &data->doorbells, data_node) {
1417 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1418 db_filter->filter.rioid == src)) &&
1419 info >= db_filter->filter.low &&
1420 info <= db_filter->filter.high) {
1421 priv = db_filter->priv;
1422 rio_mport_add_event(priv, &event);
1426 spin_unlock(&data->db_lock);
1429 dev_warn(&data->dev,
1430 "%s: spurious DB received from 0x%x, info=0x%04x\n",
1431 __func__, src, info);
1434 static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1437 struct mport_dev *md = priv->md;
1438 struct rio_mport_db_filter *db_filter;
1439 struct rio_doorbell_filter filter;
1440 unsigned long flags;
1443 if (copy_from_user(&filter, arg, sizeof(filter)))
1446 if (filter.low > filter.high)
1449 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1450 rio_mport_doorbell_handler);
1452 rmcd_error("%s failed to register IBDB, err=%d",
1453 dev_name(&md->dev), ret);
1457 db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1458 if (db_filter == NULL) {
1459 rio_release_inb_dbell(md->mport, filter.low, filter.high);
1463 db_filter->filter = filter;
1464 db_filter->priv = priv;
1465 spin_lock_irqsave(&md->db_lock, flags);
1466 list_add_tail(&db_filter->priv_node, &priv->db_filters);
1467 list_add_tail(&db_filter->data_node, &md->doorbells);
1468 spin_unlock_irqrestore(&md->db_lock, flags);
1473 static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1475 list_del(&db_filter->data_node);
1476 list_del(&db_filter->priv_node);
1480 static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1483 struct rio_mport_db_filter *db_filter;
1484 struct rio_doorbell_filter filter;
1485 unsigned long flags;
1488 if (copy_from_user(&filter, arg, sizeof(filter)))
1491 if (filter.low > filter.high)
1494 spin_lock_irqsave(&priv->md->db_lock, flags);
1495 list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1496 if (db_filter->filter.rioid == filter.rioid &&
1497 db_filter->filter.low == filter.low &&
1498 db_filter->filter.high == filter.high) {
1499 rio_mport_delete_db_filter(db_filter);
1504 spin_unlock_irqrestore(&priv->md->db_lock, flags);
1507 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1512 static int rio_mport_match_pw(union rio_pw_msg *msg,
1513 struct rio_pw_filter *filter)
1515 if ((msg->em.comptag & filter->mask) < filter->low ||
1516 (msg->em.comptag & filter->mask) > filter->high)
1521 static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1522 union rio_pw_msg *msg, int step)
1524 struct mport_dev *md = context;
1525 struct mport_cdev_priv *priv;
1526 struct rio_mport_pw_filter *pw_filter;
1527 struct rio_event event;
1530 event.header = RIO_PORTWRITE;
1531 memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1534 spin_lock(&md->pw_lock);
1535 list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1536 if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1537 priv = pw_filter->priv;
1538 rio_mport_add_event(priv, &event);
1542 spin_unlock(&md->pw_lock);
1545 printk_ratelimited(KERN_WARNING DRV_NAME
1546 ": mport%d received spurious PW from 0x%08x\n",
1547 mport->id, msg->em.comptag);
1553 static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1556 struct mport_dev *md = priv->md;
1557 struct rio_mport_pw_filter *pw_filter;
1558 struct rio_pw_filter filter;
1559 unsigned long flags;
1562 if (copy_from_user(&filter, arg, sizeof(filter)))
1565 pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1566 if (pw_filter == NULL)
1569 pw_filter->filter = filter;
1570 pw_filter->priv = priv;
1571 spin_lock_irqsave(&md->pw_lock, flags);
1572 if (list_empty(&md->portwrites))
1574 list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1575 list_add_tail(&pw_filter->md_node, &md->portwrites);
1576 spin_unlock_irqrestore(&md->pw_lock, flags);
1581 ret = rio_add_mport_pw_handler(md->mport, md,
1582 rio_mport_pw_handler);
1585 "%s: failed to add IB_PW handler, err=%d\n",
1589 rio_pw_enable(md->mport, 1);
1595 static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1597 list_del(&pw_filter->md_node);
1598 list_del(&pw_filter->priv_node);
1602 static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1603 struct rio_pw_filter *b)
1605 if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1610 static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1613 struct mport_dev *md = priv->md;
1614 struct rio_mport_pw_filter *pw_filter;
1615 struct rio_pw_filter filter;
1616 unsigned long flags;
1620 if (copy_from_user(&filter, arg, sizeof(filter)))
1623 spin_lock_irqsave(&md->pw_lock, flags);
1624 list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1625 if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1626 rio_mport_delete_pw_filter(pw_filter);
1632 if (list_empty(&md->portwrites))
1634 spin_unlock_irqrestore(&md->pw_lock, flags);
1637 rio_del_mport_pw_handler(md->mport, priv->md,
1638 rio_mport_pw_handler);
1639 rio_pw_enable(md->mport, 0);
1646 * rio_release_dev - release routine for kernel RIO device object
1647 * @dev: kernel device object associated with a RIO device structure
1649 * Frees a RIO device struct associated a RIO device struct.
1650 * The RIO device struct is freed.
1652 static void rio_release_dev(struct device *dev)
1654 struct rio_dev *rdev;
1656 rdev = to_rio_dev(dev);
1657 pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1662 static void rio_release_net(struct device *dev)
1664 struct rio_net *net;
1666 net = to_rio_net(dev);
1667 rmcd_debug(RDEV, "net_%d", net->id);
1673 * rio_mport_add_riodev - creates a kernel RIO device object
1675 * Allocates a RIO device data structure and initializes required fields based
1676 * on device's configuration space contents.
1677 * If the device has switch capabilities, then a switch specific portion is
1678 * allocated and configured.
1680 static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1683 struct mport_dev *md = priv->md;
1684 struct rio_rdev_info dev_info;
1685 struct rio_dev *rdev;
1686 struct rio_switch *rswitch = NULL;
1687 struct rio_mport *mport;
1696 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1698 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1700 rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1701 dev_info.comptag, dev_info.destid, dev_info.hopcount);
1703 dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
1705 rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1710 size = sizeof(*rdev);
1712 destid = dev_info.destid;
1713 hopcount = dev_info.hopcount;
1715 if (rio_mport_read_config_32(mport, destid, hopcount,
1716 RIO_PEF_CAR, &rval))
1719 if (rval & RIO_PEF_SWITCH) {
1720 rio_mport_read_config_32(mport, destid, hopcount,
1721 RIO_SWP_INFO_CAR, &swpinfo);
1722 size += (RIO_GET_TOTAL_PORTS(swpinfo) *
1723 sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
1726 rdev = kzalloc(size, GFP_KERNEL);
1730 if (mport->net == NULL) {
1731 struct rio_net *net;
1733 net = rio_alloc_net(mport);
1736 rmcd_debug(RDEV, "failed to allocate net object");
1740 net->id = mport->id;
1742 dev_set_name(&net->dev, "rnet_%d", net->id);
1743 net->dev.parent = &mport->dev;
1744 net->dev.release = rio_release_net;
1745 err = rio_add_net(net);
1747 rmcd_debug(RDEV, "failed to register net, err=%d", err);
1753 rdev->net = mport->net;
1755 rdev->swpinfo = swpinfo;
1756 rio_mport_read_config_32(mport, destid, hopcount,
1757 RIO_DEV_ID_CAR, &rval);
1758 rdev->did = rval >> 16;
1759 rdev->vid = rval & 0xffff;
1760 rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1762 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1764 rdev->asm_did = rval >> 16;
1765 rdev->asm_vid = rval & 0xffff;
1766 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1768 rdev->asm_rev = rval >> 16;
1770 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1771 rdev->efptr = rval & 0xffff;
1772 rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
1773 hopcount, &rdev->phys_rmap);
1775 rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1776 hopcount, RIO_EFB_ERR_MGMNT);
1779 rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1781 rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1784 rdev->comp_tag = dev_info.comptag;
1785 rdev->destid = destid;
1786 /* hopcount is stored as specified by a caller, regardles of EP or SW */
1787 rdev->hopcount = hopcount;
1789 if (rdev->pef & RIO_PEF_SWITCH) {
1790 rswitch = rdev->rswitch;
1791 rswitch->route_table = NULL;
1794 if (strlen(dev_info.name))
1795 dev_set_name(&rdev->dev, "%s", dev_info.name);
1796 else if (rdev->pef & RIO_PEF_SWITCH)
1797 dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1798 rdev->comp_tag & RIO_CTAG_UDEVID);
1800 dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1801 rdev->comp_tag & RIO_CTAG_UDEVID);
1803 INIT_LIST_HEAD(&rdev->net_list);
1804 rdev->dev.parent = &mport->net->dev;
1805 rio_attach_device(rdev);
1806 rdev->dev.release = rio_release_dev;
1808 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1809 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1811 err = rio_add_device(rdev);
1813 put_device(&rdev->dev);
1825 static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1827 struct rio_rdev_info dev_info;
1828 struct rio_dev *rdev = NULL;
1830 struct rio_mport *mport;
1831 struct rio_net *net;
1833 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1835 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1837 mport = priv->md->mport;
1839 /* If device name is specified, removal by name has priority */
1840 if (strlen(dev_info.name)) {
1841 dev = bus_find_device_by_name(&rio_bus_type, NULL,
1844 rdev = to_rio_dev(dev);
1847 rdev = rio_get_comptag(dev_info.comptag, rdev);
1848 if (rdev && rdev->dev.parent == &mport->net->dev &&
1849 rdev->destid == dev_info.destid &&
1850 rdev->hopcount == dev_info.hopcount)
1857 "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1858 dev_info.name, dev_info.comptag, dev_info.destid,
1865 rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1867 if (list_empty(&net->devices)) {
1876 * Mport cdev management
1880 * mport_cdev_open() - Open character device (mport)
1882 static int mport_cdev_open(struct inode *inode, struct file *filp)
1885 int minor = iminor(inode);
1886 struct mport_dev *chdev;
1887 struct mport_cdev_priv *priv;
1889 /* Test for valid device */
1890 if (minor >= RIO_MAX_MPORTS) {
1891 rmcd_error("Invalid minor device number");
1895 chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1897 rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1899 if (atomic_read(&chdev->active) == 0)
1902 get_device(&chdev->dev);
1904 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1906 put_device(&chdev->dev);
1912 INIT_LIST_HEAD(&priv->db_filters);
1913 INIT_LIST_HEAD(&priv->pw_filters);
1914 spin_lock_init(&priv->fifo_lock);
1915 init_waitqueue_head(&priv->event_rx_wait);
1916 ret = kfifo_alloc(&priv->event_fifo,
1917 sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1920 put_device(&chdev->dev);
1921 dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1926 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1927 INIT_LIST_HEAD(&priv->async_list);
1928 spin_lock_init(&priv->req_lock);
1929 mutex_init(&priv->dma_lock);
1931 mutex_lock(&chdev->file_mutex);
1932 list_add_tail(&priv->list, &chdev->file_list);
1933 mutex_unlock(&chdev->file_mutex);
1935 filp->private_data = priv;
1943 static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1945 struct mport_cdev_priv *priv = filp->private_data;
1947 return fasync_helper(fd, filp, mode, &priv->async_queue);
1950 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1951 static void mport_cdev_release_dma(struct file *filp)
1953 struct mport_cdev_priv *priv = filp->private_data;
1954 struct mport_dev *md;
1955 struct mport_dma_req *req, *req_next;
1956 unsigned long tmo = msecs_to_jiffies(dma_timeout);
1960 rmcd_debug(EXIT, "from filp=%p %s(%d)",
1961 filp, current->comm, task_pid_nr(current));
1964 rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
1970 spin_lock(&priv->req_lock);
1971 if (!list_empty(&priv->async_list)) {
1972 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
1973 filp, current->comm, task_pid_nr(current));
1974 list_splice_init(&priv->async_list, &list);
1976 spin_unlock(&priv->req_lock);
1978 if (!list_empty(&list)) {
1979 rmcd_debug(EXIT, "temp list not empty");
1980 list_for_each_entry_safe(req, req_next, &list, node) {
1981 rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
1982 req->filp, req->cookie,
1983 completion_done(&req->req_comp)?"yes":"no");
1984 list_del(&req->node);
1985 kref_put(&req->refcount, dma_req_free);
1989 put_dma_channel(priv);
1990 wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
1993 rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
1994 current->comm, task_pid_nr(current), wret);
1997 if (priv->dmach != priv->md->dma_chan) {
1998 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
1999 filp, current->comm, task_pid_nr(current));
2000 rio_release_dma(priv->dmach);
2002 rmcd_debug(EXIT, "Adjust default DMA channel refcount");
2003 kref_put(&md->dma_ref, mport_release_def_dma);
2009 #define mport_cdev_release_dma(priv) do {} while (0)
2013 * mport_cdev_release() - Release character device
2015 static int mport_cdev_release(struct inode *inode, struct file *filp)
2017 struct mport_cdev_priv *priv = filp->private_data;
2018 struct mport_dev *chdev;
2019 struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
2020 struct rio_mport_db_filter *db_filter, *db_filter_next;
2021 struct rio_mport_mapping *map, *_map;
2022 unsigned long flags;
2024 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2027 mport_cdev_release_dma(filp);
2029 priv->event_mask = 0;
2031 spin_lock_irqsave(&chdev->pw_lock, flags);
2032 if (!list_empty(&priv->pw_filters)) {
2033 list_for_each_entry_safe(pw_filter, pw_filter_next,
2034 &priv->pw_filters, priv_node)
2035 rio_mport_delete_pw_filter(pw_filter);
2037 spin_unlock_irqrestore(&chdev->pw_lock, flags);
2039 spin_lock_irqsave(&chdev->db_lock, flags);
2040 list_for_each_entry_safe(db_filter, db_filter_next,
2041 &priv->db_filters, priv_node) {
2042 rio_mport_delete_db_filter(db_filter);
2044 spin_unlock_irqrestore(&chdev->db_lock, flags);
2046 kfifo_free(&priv->event_fifo);
2048 mutex_lock(&chdev->buf_mutex);
2049 list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2050 if (map->filp == filp) {
2051 rmcd_debug(EXIT, "release mapping %p filp=%p",
2052 map->virt_addr, filp);
2053 kref_put(&map->ref, mport_release_mapping);
2056 mutex_unlock(&chdev->buf_mutex);
2058 mport_cdev_fasync(-1, filp, 0);
2059 filp->private_data = NULL;
2060 mutex_lock(&chdev->file_mutex);
2061 list_del(&priv->list);
2062 mutex_unlock(&chdev->file_mutex);
2063 put_device(&chdev->dev);
2069 * mport_cdev_ioctl() - IOCTLs for character device
2071 static long mport_cdev_ioctl(struct file *filp,
2072 unsigned int cmd, unsigned long arg)
2075 struct mport_cdev_priv *data = filp->private_data;
2076 struct mport_dev *md = data->md;
2078 if (atomic_read(&md->active) == 0)
2082 case RIO_MPORT_MAINT_READ_LOCAL:
2083 return rio_mport_maint_rd(data, (void __user *)arg, 1);
2084 case RIO_MPORT_MAINT_WRITE_LOCAL:
2085 return rio_mport_maint_wr(data, (void __user *)arg, 1);
2086 case RIO_MPORT_MAINT_READ_REMOTE:
2087 return rio_mport_maint_rd(data, (void __user *)arg, 0);
2088 case RIO_MPORT_MAINT_WRITE_REMOTE:
2089 return rio_mport_maint_wr(data, (void __user *)arg, 0);
2090 case RIO_MPORT_MAINT_HDID_SET:
2091 return maint_hdid_set(data, (void __user *)arg);
2092 case RIO_MPORT_MAINT_COMPTAG_SET:
2093 return maint_comptag_set(data, (void __user *)arg);
2094 case RIO_MPORT_MAINT_PORT_IDX_GET:
2095 return maint_port_idx_get(data, (void __user *)arg);
2096 case RIO_MPORT_GET_PROPERTIES:
2097 md->properties.hdid = md->mport->host_deviceid;
2098 if (copy_to_user((void __user *)arg, &(md->properties),
2099 sizeof(md->properties)))
2102 case RIO_ENABLE_DOORBELL_RANGE:
2103 return rio_mport_add_db_filter(data, (void __user *)arg);
2104 case RIO_DISABLE_DOORBELL_RANGE:
2105 return rio_mport_remove_db_filter(data, (void __user *)arg);
2106 case RIO_ENABLE_PORTWRITE_RANGE:
2107 return rio_mport_add_pw_filter(data, (void __user *)arg);
2108 case RIO_DISABLE_PORTWRITE_RANGE:
2109 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2110 case RIO_SET_EVENT_MASK:
2111 data->event_mask = (u32)arg;
2113 case RIO_GET_EVENT_MASK:
2114 if (copy_to_user((void __user *)arg, &data->event_mask,
2118 case RIO_MAP_OUTBOUND:
2119 return rio_mport_obw_map(filp, (void __user *)arg);
2120 case RIO_MAP_INBOUND:
2121 return rio_mport_map_inbound(filp, (void __user *)arg);
2122 case RIO_UNMAP_OUTBOUND:
2123 return rio_mport_obw_free(filp, (void __user *)arg);
2124 case RIO_UNMAP_INBOUND:
2125 return rio_mport_inbound_free(filp, (void __user *)arg);
2127 return rio_mport_alloc_dma(filp, (void __user *)arg);
2129 return rio_mport_free_dma(filp, (void __user *)arg);
2130 case RIO_WAIT_FOR_ASYNC:
2131 return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2133 return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2135 return rio_mport_add_riodev(data, (void __user *)arg);
2137 return rio_mport_del_riodev(data, (void __user *)arg);
2146 * mport_release_mapping - free mapping resources and info structure
2147 * @ref: a pointer to the kref within struct rio_mport_mapping
2149 * NOTE: Shall be called while holding buf_mutex.
2151 static void mport_release_mapping(struct kref *ref)
2153 struct rio_mport_mapping *map =
2154 container_of(ref, struct rio_mport_mapping, ref);
2155 struct rio_mport *mport = map->md->mport;
2157 rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2158 map->dir, map->virt_addr,
2159 &map->phys_addr, mport->name);
2161 list_del(&map->node);
2165 rio_unmap_inb_region(mport, map->phys_addr);
2167 dma_free_coherent(mport->dev.parent, map->size,
2168 map->virt_addr, map->phys_addr);
2171 rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2177 static void mport_mm_open(struct vm_area_struct *vma)
2179 struct rio_mport_mapping *map = vma->vm_private_data;
2181 rmcd_debug(MMAP, "%pad", &map->phys_addr);
2182 kref_get(&map->ref);
2185 static void mport_mm_close(struct vm_area_struct *vma)
2187 struct rio_mport_mapping *map = vma->vm_private_data;
2189 rmcd_debug(MMAP, "%pad", &map->phys_addr);
2190 mutex_lock(&map->md->buf_mutex);
2191 kref_put(&map->ref, mport_release_mapping);
2192 mutex_unlock(&map->md->buf_mutex);
2195 static const struct vm_operations_struct vm_ops = {
2196 .open = mport_mm_open,
2197 .close = mport_mm_close,
2200 static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2202 struct mport_cdev_priv *priv = filp->private_data;
2203 struct mport_dev *md;
2204 size_t size = vma->vm_end - vma->vm_start;
2206 unsigned long offset;
2208 struct rio_mport_mapping *map;
2210 rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2211 (unsigned int)size, vma->vm_pgoff);
2214 baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2216 mutex_lock(&md->buf_mutex);
2217 list_for_each_entry(map, &md->mappings, node) {
2218 if (baddr >= map->phys_addr &&
2219 baddr < (map->phys_addr + map->size)) {
2224 mutex_unlock(&md->buf_mutex);
2229 offset = baddr - map->phys_addr;
2231 if (size + offset > map->size)
2234 vma->vm_pgoff = offset >> PAGE_SHIFT;
2235 rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2237 if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2238 ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2239 map->virt_addr, map->phys_addr, map->size);
2240 else if (map->dir == MAP_OUTBOUND) {
2241 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2242 ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2244 rmcd_error("Attempt to mmap unsupported mapping type");
2249 vma->vm_private_data = map;
2250 vma->vm_ops = &vm_ops;
2253 rmcd_error("MMAP exit with err=%d", ret);
2259 static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
2261 struct mport_cdev_priv *priv = filp->private_data;
2263 poll_wait(filp, &priv->event_rx_wait, wait);
2264 if (kfifo_len(&priv->event_fifo))
2265 return EPOLLIN | EPOLLRDNORM;
2270 static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2273 struct mport_cdev_priv *priv = filp->private_data;
2280 if (kfifo_is_empty(&priv->event_fifo) &&
2281 (filp->f_flags & O_NONBLOCK))
2284 if (count % sizeof(struct rio_event))
2287 ret = wait_event_interruptible(priv->event_rx_wait,
2288 kfifo_len(&priv->event_fifo) != 0);
2292 while (ret < count) {
2293 if (kfifo_to_user(&priv->event_fifo, buf,
2294 sizeof(struct rio_event), &copied))
2303 static ssize_t mport_write(struct file *filp, const char __user *buf,
2304 size_t count, loff_t *ppos)
2306 struct mport_cdev_priv *priv = filp->private_data;
2307 struct rio_mport *mport = priv->md->mport;
2308 struct rio_event event;
2314 if (count % sizeof(event))
2318 while ((count - len) >= (int)sizeof(event)) {
2319 if (copy_from_user(&event, buf, sizeof(event)))
2322 if (event.header != RIO_DOORBELL)
2325 ret = rio_mport_send_doorbell(mport,
2326 event.u.doorbell.rioid,
2327 event.u.doorbell.payload);
2331 len += sizeof(event);
2332 buf += sizeof(event);
2338 static const struct file_operations mport_fops = {
2339 .owner = THIS_MODULE,
2340 .open = mport_cdev_open,
2341 .release = mport_cdev_release,
2342 .poll = mport_cdev_poll,
2344 .write = mport_write,
2345 .mmap = mport_cdev_mmap,
2346 .fasync = mport_cdev_fasync,
2347 .unlocked_ioctl = mport_cdev_ioctl
2351 * Character device management
2354 static void mport_device_release(struct device *dev)
2356 struct mport_dev *md;
2358 rmcd_debug(EXIT, "%s", dev_name(dev));
2359 md = container_of(dev, struct mport_dev, dev);
2364 * mport_cdev_add() - Create mport_dev from rio_mport
2365 * @mport: RapidIO master port
2367 static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2370 struct mport_dev *md;
2371 struct rio_mport_attr attr;
2373 md = kzalloc(sizeof(*md), GFP_KERNEL);
2375 rmcd_error("Unable allocate a device object");
2380 mutex_init(&md->buf_mutex);
2381 mutex_init(&md->file_mutex);
2382 INIT_LIST_HEAD(&md->file_list);
2384 device_initialize(&md->dev);
2385 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
2386 md->dev.class = dev_class;
2387 md->dev.parent = &mport->dev;
2388 md->dev.release = mport_device_release;
2389 dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2390 atomic_set(&md->active, 1);
2392 cdev_init(&md->cdev, &mport_fops);
2393 md->cdev.owner = THIS_MODULE;
2395 INIT_LIST_HEAD(&md->doorbells);
2396 spin_lock_init(&md->db_lock);
2397 INIT_LIST_HEAD(&md->portwrites);
2398 spin_lock_init(&md->pw_lock);
2399 INIT_LIST_HEAD(&md->mappings);
2401 md->properties.id = mport->id;
2402 md->properties.sys_size = mport->sys_size;
2403 md->properties.hdid = mport->host_deviceid;
2404 md->properties.index = mport->index;
2406 /* The transfer_mode property will be returned through mport query
2409 #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2410 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2412 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2415 ret = cdev_device_add(&md->cdev, &md->dev);
2417 rmcd_error("Failed to register mport %d (err=%d)",
2421 ret = rio_query_mport(mport, &attr);
2423 md->properties.flags = attr.flags;
2424 md->properties.link_speed = attr.link_speed;
2425 md->properties.link_width = attr.link_width;
2426 md->properties.dma_max_sge = attr.dma_max_sge;
2427 md->properties.dma_max_size = attr.dma_max_size;
2428 md->properties.dma_align = attr.dma_align;
2429 md->properties.cap_sys_size = 0;
2430 md->properties.cap_transfer_mode = 0;
2431 md->properties.cap_addr_size = 0;
2433 pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2434 mport->name, MAJOR(dev_number), mport->id);
2436 mutex_lock(&mport_devs_lock);
2437 list_add_tail(&md->node, &mport_devs);
2438 mutex_unlock(&mport_devs_lock);
2440 pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2441 mport->name, MAJOR(dev_number), mport->id);
2446 put_device(&md->dev);
2451 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2452 * associated DMA channels.
2454 static void mport_cdev_terminate_dma(struct mport_dev *md)
2456 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2457 struct mport_cdev_priv *client;
2459 rmcd_debug(DMA, "%s", dev_name(&md->dev));
2461 mutex_lock(&md->file_mutex);
2462 list_for_each_entry(client, &md->file_list, list) {
2463 if (client->dmach) {
2464 dmaengine_terminate_all(client->dmach);
2465 rio_release_dma(client->dmach);
2468 mutex_unlock(&md->file_mutex);
2471 dmaengine_terminate_all(md->dma_chan);
2472 rio_release_dma(md->dma_chan);
2473 md->dma_chan = NULL;
2480 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2483 static int mport_cdev_kill_fasync(struct mport_dev *md)
2485 unsigned int files = 0;
2486 struct mport_cdev_priv *client;
2488 mutex_lock(&md->file_mutex);
2489 list_for_each_entry(client, &md->file_list, list) {
2490 if (client->async_queue)
2491 kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2494 mutex_unlock(&md->file_mutex);
2499 * mport_cdev_remove() - Remove mport character device
2500 * @dev: Mport device to remove
2502 static void mport_cdev_remove(struct mport_dev *md)
2504 struct rio_mport_mapping *map, *_map;
2506 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2507 atomic_set(&md->active, 0);
2508 mport_cdev_terminate_dma(md);
2509 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
2510 cdev_device_del(&md->cdev, &md->dev);
2511 mport_cdev_kill_fasync(md);
2513 /* TODO: do we need to give clients some time to close file
2514 * descriptors? Simple wait for XX, or kref?
2518 * Release DMA buffers allocated for the mport device.
2519 * Disable associated inbound Rapidio requests mapping if applicable.
2521 mutex_lock(&md->buf_mutex);
2522 list_for_each_entry_safe(map, _map, &md->mappings, node) {
2523 kref_put(&map->ref, mport_release_mapping);
2525 mutex_unlock(&md->buf_mutex);
2527 if (!list_empty(&md->mappings))
2528 rmcd_warn("WARNING: %s pending mappings on removal",
2531 rio_release_inb_dbell(md->mport, 0, 0x0fff);
2533 put_device(&md->dev);
2537 * RIO rio_mport_interface driver
2541 * mport_add_mport() - Add rio_mport from LDM device struct
2542 * @dev: Linux device model struct
2543 * @class_intf: Linux class_interface
2545 static int mport_add_mport(struct device *dev,
2546 struct class_interface *class_intf)
2548 struct rio_mport *mport = NULL;
2549 struct mport_dev *chdev = NULL;
2551 mport = to_rio_mport(dev);
2555 chdev = mport_cdev_add(mport);
2563 * mport_remove_mport() - Remove rio_mport from global list
2564 * TODO remove device from global mport_dev list
2566 static void mport_remove_mport(struct device *dev,
2567 struct class_interface *class_intf)
2569 struct rio_mport *mport = NULL;
2570 struct mport_dev *chdev;
2573 mport = to_rio_mport(dev);
2574 rmcd_debug(EXIT, "Remove %s", mport->name);
2576 mutex_lock(&mport_devs_lock);
2577 list_for_each_entry(chdev, &mport_devs, node) {
2578 if (chdev->mport->id == mport->id) {
2579 atomic_set(&chdev->active, 0);
2580 list_del(&chdev->node);
2585 mutex_unlock(&mport_devs_lock);
2588 mport_cdev_remove(chdev);
2591 /* the rio_mport_interface is used to handle local mport devices */
2592 static struct class_interface rio_mport_interface __refdata = {
2593 .class = &rio_mport_class,
2594 .add_dev = mport_add_mport,
2595 .remove_dev = mport_remove_mport,
2599 * Linux kernel module
2603 * mport_init - Driver module loading
2605 static int __init mport_init(void)
2609 /* Create device class needed by udev */
2610 dev_class = class_create(THIS_MODULE, DRV_NAME);
2611 if (IS_ERR(dev_class)) {
2612 rmcd_error("Unable to create " DRV_NAME " class");
2613 return PTR_ERR(dev_class);
2616 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2620 rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2622 /* Register to rio_mport_interface */
2623 ret = class_interface_register(&rio_mport_interface);
2625 rmcd_error("class_interface_register() failed, err=%d", ret);
2632 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2634 class_destroy(dev_class);
2639 * mport_exit - Driver module unloading
2641 static void __exit mport_exit(void)
2643 class_interface_unregister(&rio_mport_interface);
2644 class_destroy(dev_class);
2645 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2648 module_init(mport_init);
2649 module_exit(mport_exit);