1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * RapidIO mport character device
5 * Copyright 2014-2015 Integrated Device Technology, Inc.
6 * Alexandre Bounine <alexandre.bounine@idt.com>
7 * Copyright 2014-2015 Prodrive Technologies
8 * Andre van Herk <andre.van.herk@prodrive-technologies.com>
9 * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
10 * Copyright (C) 2014 Texas Instruments Incorporated
11 * Aurelien Jacquiot <a-jacquiot@ti.com>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/cdev.h>
16 #include <linux/ioctl.h>
17 #include <linux/uaccess.h>
18 #include <linux/list.h>
20 #include <linux/err.h>
21 #include <linux/net.h>
22 #include <linux/poll.h>
23 #include <linux/spinlock.h>
24 #include <linux/sched.h>
25 #include <linux/kfifo.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/mman.h>
32 #include <linux/dma-mapping.h>
33 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
34 #include <linux/dmaengine.h>
37 #include <linux/rio.h>
38 #include <linux/rio_ids.h>
39 #include <linux/rio_drv.h>
40 #include <linux/rio_mport_cdev.h>
44 #define DRV_NAME "rio_mport"
45 #define DRV_PREFIX DRV_NAME ": "
46 #define DEV_NAME "rio_mport"
47 #define DRV_VERSION "1.0.0"
49 /* Debug output filtering masks */
52 DBG_INIT = BIT(0), /* driver init */
53 DBG_EXIT = BIT(1), /* driver exit */
54 DBG_MPORT = BIT(2), /* mport add/remove */
55 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
56 DBG_DMA = BIT(4), /* DMA transfer messages */
57 DBG_MMAP = BIT(5), /* mapping messages */
58 DBG_IBW = BIT(6), /* inbound window */
59 DBG_EVENT = BIT(7), /* event handling messages */
60 DBG_OBW = BIT(8), /* outbound window messages */
61 DBG_DBELL = BIT(9), /* doorbell messages */
66 #define rmcd_debug(level, fmt, arg...) \
68 if (DBG_##level & dbg_level) \
69 pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
72 #define rmcd_debug(level, fmt, arg...) \
73 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
76 #define rmcd_warn(fmt, arg...) \
77 pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
79 #define rmcd_error(fmt, arg...) \
80 pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
82 MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
83 MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
84 MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
85 MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
86 MODULE_DESCRIPTION("RapidIO mport character device driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(DRV_VERSION);
90 static int dma_timeout = 3000; /* DMA transfer timeout in msec */
91 module_param(dma_timeout, int, S_IRUGO);
92 MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
95 static u32 dbg_level = DBG_NONE;
96 module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
97 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
101 * An internal DMA coherent buffer
103 struct mport_dma_buf {
113 * Internal memory mapping structure
115 enum rio_mport_map_dir {
121 struct rio_mport_mapping {
122 struct list_head node;
123 struct mport_dev *md;
124 enum rio_mport_map_dir dir;
127 dma_addr_t phys_addr; /* for mmap */
128 void *virt_addr; /* kernel address, for dma_free_coherent */
130 struct kref ref; /* refcount of vmas sharing the mapping */
134 struct rio_mport_dma_map {
141 #define MPORT_MAX_DMA_BUFS 16
142 #define MPORT_EVENT_DEPTH 10
145 * mport_dev driver-specific structure that represents mport device
146 * @active mport device status flag
147 * @node list node to maintain list of registered mports
148 * @cdev character device
149 * @dev associated device object
150 * @mport associated subsystem's master port device object
151 * @buf_mutex lock for buffer handling
152 * @file_mutex - lock for open files list
153 * @file_list - list of open files on given mport
154 * @properties properties of this mport
155 * @portwrites queue of inbound portwrites
156 * @pw_lock lock for port write queue
157 * @mappings queue for memory mappings
158 * @dma_chan DMA channels associated with this device
164 struct list_head node;
167 struct rio_mport *mport;
168 struct mutex buf_mutex;
169 struct mutex file_mutex;
170 struct list_head file_list;
171 struct rio_mport_properties properties;
172 struct list_head doorbells;
174 struct list_head portwrites;
176 struct list_head mappings;
177 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
178 struct dma_chan *dma_chan;
180 struct completion comp;
185 * mport_cdev_priv - data structure specific to individual file object
186 * associated with an open device
187 * @md master port character device object
188 * @async_queue - asynchronous notification queue
189 * @list - file objects tracking list
190 * @db_filters inbound doorbell filters for this descriptor
191 * @pw_filters portwrite filters for this descriptor
192 * @event_fifo event fifo for this descriptor
193 * @event_rx_wait wait queue for this descriptor
194 * @fifo_lock lock for event_fifo
195 * @event_mask event mask for this descriptor
196 * @dmach DMA engine channel allocated for specific file object
198 struct mport_cdev_priv {
199 struct mport_dev *md;
200 struct fasync_struct *async_queue;
201 struct list_head list;
202 struct list_head db_filters;
203 struct list_head pw_filters;
204 struct kfifo event_fifo;
205 wait_queue_head_t event_rx_wait;
206 spinlock_t fifo_lock;
207 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
208 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
209 struct dma_chan *dmach;
210 struct list_head async_list;
212 struct mutex dma_lock;
214 struct completion comp;
219 * rio_mport_pw_filter - structure to describe a portwrite filter
220 * md_node node in mport device's list
221 * priv_node node in private file object's list
222 * priv reference to private data
223 * filter actual portwrite filter
225 struct rio_mport_pw_filter {
226 struct list_head md_node;
227 struct list_head priv_node;
228 struct mport_cdev_priv *priv;
229 struct rio_pw_filter filter;
233 * rio_mport_db_filter - structure to describe a doorbell filter
234 * @data_node reference to device node
235 * @priv_node node in private data
236 * @priv reference to private data
237 * @filter actual doorbell filter
239 struct rio_mport_db_filter {
240 struct list_head data_node;
241 struct list_head priv_node;
242 struct mport_cdev_priv *priv;
243 struct rio_doorbell_filter filter;
246 static LIST_HEAD(mport_devs);
247 static DEFINE_MUTEX(mport_devs_lock);
249 #if (0) /* used by commented out portion of poll function : FIXME */
250 static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
253 static struct class *dev_class;
254 static dev_t dev_number;
256 static void mport_release_mapping(struct kref *ref);
258 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
261 struct rio_mport *mport = priv->md->mport;
262 struct rio_mport_maint_io maint_io;
268 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
271 if ((maint_io.offset % 4) ||
272 (maint_io.length == 0) || (maint_io.length % 4) ||
273 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
276 buffer = vmalloc(maint_io.length);
279 length = maint_io.length/sizeof(u32);
280 offset = maint_io.offset;
282 for (i = 0; i < length; i++) {
284 ret = __rio_local_read_config_32(mport,
287 ret = rio_mport_read_config_32(mport, maint_io.rioid,
288 maint_io.hopcount, offset, &buffer[i]);
295 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
296 buffer, maint_io.length)))
303 static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
306 struct rio_mport *mport = priv->md->mport;
307 struct rio_mport_maint_io maint_io;
311 int ret = -EINVAL, i;
313 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
316 if ((maint_io.offset % 4) ||
317 (maint_io.length == 0) || (maint_io.length % 4) ||
318 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
321 buffer = vmalloc(maint_io.length);
324 length = maint_io.length;
326 if (unlikely(copy_from_user(buffer,
327 (void __user *)(uintptr_t)maint_io.buffer, length))) {
332 offset = maint_io.offset;
333 length /= sizeof(u32);
335 for (i = 0; i < length; i++) {
337 ret = __rio_local_write_config_32(mport,
340 ret = rio_mport_write_config_32(mport, maint_io.rioid,
356 * Inbound/outbound memory mapping functions
359 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
360 u16 rioid, u64 raddr, u32 size,
363 struct rio_mport *mport = md->mport;
364 struct rio_mport_mapping *map;
367 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
369 map = kzalloc(sizeof(*map), GFP_KERNEL);
373 ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
377 map->dir = MAP_OUTBOUND;
379 map->rio_addr = raddr;
381 map->phys_addr = *paddr;
384 kref_init(&map->ref);
385 list_add_tail(&map->node, &md->mappings);
393 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
394 u16 rioid, u64 raddr, u32 size,
397 struct rio_mport_mapping *map;
400 mutex_lock(&md->buf_mutex);
401 list_for_each_entry(map, &md->mappings, node) {
402 if (map->dir != MAP_OUTBOUND)
404 if (rioid == map->rioid &&
405 raddr == map->rio_addr && size == map->size) {
406 *paddr = map->phys_addr;
409 } else if (rioid == map->rioid &&
410 raddr < (map->rio_addr + map->size - 1) &&
411 (raddr + size) > map->rio_addr) {
417 /* If not found, create new */
419 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
421 mutex_unlock(&md->buf_mutex);
425 static int rio_mport_obw_map(struct file *filp, void __user *arg)
427 struct mport_cdev_priv *priv = filp->private_data;
428 struct mport_dev *data = priv->md;
433 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
436 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
437 map.rioid, map.rio_addr, map.length);
439 ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
440 map.rio_addr, map.length, &paddr);
442 rmcd_error("Failed to set OBW err= %d", ret);
448 if (unlikely(copy_to_user(arg, &map, sizeof(map))))
454 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
456 * @priv: driver private data
457 * @arg: buffer handle returned by allocation routine
459 static int rio_mport_obw_free(struct file *filp, void __user *arg)
461 struct mport_cdev_priv *priv = filp->private_data;
462 struct mport_dev *md = priv->md;
464 struct rio_mport_mapping *map, *_map;
466 if (!md->mport->ops->unmap_outb)
467 return -EPROTONOSUPPORT;
469 if (copy_from_user(&handle, arg, sizeof(handle)))
472 rmcd_debug(OBW, "h=0x%llx", handle);
474 mutex_lock(&md->buf_mutex);
475 list_for_each_entry_safe(map, _map, &md->mappings, node) {
476 if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
477 if (map->filp == filp) {
478 rmcd_debug(OBW, "kref_put h=0x%llx", handle);
480 kref_put(&map->ref, mport_release_mapping);
485 mutex_unlock(&md->buf_mutex);
491 * maint_hdid_set() - Set the host Device ID
492 * @priv: driver private data
495 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
497 struct mport_dev *md = priv->md;
500 if (copy_from_user(&hdid, arg, sizeof(hdid)))
503 md->mport->host_deviceid = hdid;
504 md->properties.hdid = hdid;
505 rio_local_set_device_id(md->mport, hdid);
507 rmcd_debug(MPORT, "Set host device Id to %d", hdid);
513 * maint_comptag_set() - Set the host Component Tag
514 * @priv: driver private data
515 * @arg: Component Tag
517 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
519 struct mport_dev *md = priv->md;
522 if (copy_from_user(&comptag, arg, sizeof(comptag)))
525 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
527 rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
532 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
534 struct mport_dma_req {
535 struct kref refcount;
536 struct list_head node;
538 struct mport_cdev_priv *priv;
539 enum rio_transfer_sync sync;
541 struct page **page_list;
542 unsigned int nr_pages;
543 struct rio_mport_mapping *map;
544 struct dma_chan *dmach;
545 enum dma_data_direction dir;
547 enum dma_status status;
548 struct completion req_comp;
551 static void mport_release_def_dma(struct kref *dma_ref)
553 struct mport_dev *md =
554 container_of(dma_ref, struct mport_dev, dma_ref);
556 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
557 rio_release_dma(md->dma_chan);
561 static void mport_release_dma(struct kref *dma_ref)
563 struct mport_cdev_priv *priv =
564 container_of(dma_ref, struct mport_cdev_priv, dma_ref);
566 rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
567 complete(&priv->comp);
570 static void dma_req_free(struct kref *ref)
572 struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
574 struct mport_cdev_priv *priv = req->priv;
576 dma_unmap_sg(req->dmach->device->dev,
577 req->sgt.sgl, req->sgt.nents, req->dir);
578 sg_free_table(&req->sgt);
579 if (req->page_list) {
580 unpin_user_pages(req->page_list, req->nr_pages);
581 kfree(req->page_list);
585 mutex_lock(&req->map->md->buf_mutex);
586 kref_put(&req->map->ref, mport_release_mapping);
587 mutex_unlock(&req->map->md->buf_mutex);
590 kref_put(&priv->dma_ref, mport_release_dma);
595 static void dma_xfer_callback(void *param)
597 struct mport_dma_req *req = (struct mport_dma_req *)param;
598 struct mport_cdev_priv *priv = req->priv;
600 req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
602 complete(&req->req_comp);
603 kref_put(&req->refcount, dma_req_free);
607 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
609 * Returns pointer to DMA transaction descriptor allocated by DMA driver on
610 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
611 * non-NULL pointer using IS_ERR macro.
613 static struct dma_async_tx_descriptor
614 *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
615 struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
616 enum dma_ctrl_flags flags)
618 struct rio_dma_data tx_data;
620 tx_data.sg = sgt->sgl;
621 tx_data.sg_len = nents;
622 tx_data.rio_addr_u = 0;
623 tx_data.rio_addr = transfer->rio_addr;
624 if (dir == DMA_MEM_TO_DEV) {
625 switch (transfer->method) {
626 case RIO_EXCHANGE_NWRITE:
627 tx_data.wr_type = RDW_ALL_NWRITE;
629 case RIO_EXCHANGE_NWRITE_R_ALL:
630 tx_data.wr_type = RDW_ALL_NWRITE_R;
632 case RIO_EXCHANGE_NWRITE_R:
633 tx_data.wr_type = RDW_LAST_NWRITE_R;
635 case RIO_EXCHANGE_DEFAULT:
636 tx_data.wr_type = RDW_DEFAULT;
639 return ERR_PTR(-EINVAL);
643 return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
646 /* Request DMA channel associated with this mport device.
647 * Try to request DMA channel for every new process that opened given
648 * mport. If a new DMA channel is not available use default channel
649 * which is the first DMA channel opened on mport device.
651 static int get_dma_channel(struct mport_cdev_priv *priv)
653 mutex_lock(&priv->dma_lock);
655 priv->dmach = rio_request_mport_dma(priv->md->mport);
657 /* Use default DMA channel if available */
658 if (priv->md->dma_chan) {
659 priv->dmach = priv->md->dma_chan;
660 kref_get(&priv->md->dma_ref);
662 rmcd_error("Failed to get DMA channel");
663 mutex_unlock(&priv->dma_lock);
666 } else if (!priv->md->dma_chan) {
667 /* Register default DMA channel if we do not have one */
668 priv->md->dma_chan = priv->dmach;
669 kref_init(&priv->md->dma_ref);
670 rmcd_debug(DMA, "Register DMA_chan %d as default",
671 priv->dmach->chan_id);
674 kref_init(&priv->dma_ref);
675 init_completion(&priv->comp);
678 kref_get(&priv->dma_ref);
679 mutex_unlock(&priv->dma_lock);
683 static void put_dma_channel(struct mport_cdev_priv *priv)
685 kref_put(&priv->dma_ref, mport_release_dma);
689 * DMA transfer functions
691 static int do_dma_request(struct mport_dma_req *req,
692 struct rio_transfer_io *xfer,
693 enum rio_transfer_sync sync, int nents)
695 struct mport_cdev_priv *priv;
696 struct sg_table *sgt;
697 struct dma_chan *chan;
698 struct dma_async_tx_descriptor *tx;
700 unsigned long tmo = msecs_to_jiffies(dma_timeout);
701 enum dma_transfer_direction dir;
709 dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
711 rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
712 current->comm, task_pid_nr(current),
713 dev_name(&chan->dev->device),
714 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
716 /* Initialize DMA transaction request */
717 tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
718 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
721 rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
722 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
723 xfer->rio_addr, xfer->length);
726 } else if (IS_ERR(tx)) {
728 rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
729 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
730 xfer->rio_addr, xfer->length);
734 tx->callback = dma_xfer_callback;
735 tx->callback_param = req;
737 req->status = DMA_IN_PROGRESS;
738 kref_get(&req->refcount);
740 cookie = dmaengine_submit(tx);
741 req->cookie = cookie;
743 rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
744 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
746 if (dma_submit_error(cookie)) {
747 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
748 cookie, xfer->rio_addr, xfer->length);
749 kref_put(&req->refcount, dma_req_free);
754 dma_async_issue_pending(chan);
756 if (sync == RIO_TRANSFER_ASYNC) {
757 spin_lock(&priv->req_lock);
758 list_add_tail(&req->node, &priv->async_list);
759 spin_unlock(&priv->req_lock);
761 } else if (sync == RIO_TRANSFER_FAF)
764 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
767 /* Timeout on wait occurred */
768 rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
769 current->comm, task_pid_nr(current),
770 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
772 } else if (wret == -ERESTARTSYS) {
773 /* Wait_for_completion was interrupted by a signal but DMA may
776 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
777 current->comm, task_pid_nr(current),
778 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
782 if (req->status != DMA_COMPLETE) {
783 /* DMA transaction completion was signaled with error */
784 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
785 current->comm, task_pid_nr(current),
786 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
787 cookie, req->status, ret);
796 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
797 * the remote RapidIO device
798 * @filp: file pointer associated with the call
799 * @transfer_mode: DMA transfer mode
800 * @sync: synchronization mode
801 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
802 * DMA_DEV_TO_MEM = read)
803 * @xfer: data transfer descriptor structure
806 rio_dma_transfer(struct file *filp, u32 transfer_mode,
807 enum rio_transfer_sync sync, enum dma_data_direction dir,
808 struct rio_transfer_io *xfer)
810 struct mport_cdev_priv *priv = filp->private_data;
811 unsigned long nr_pages = 0;
812 struct page **page_list = NULL;
813 struct mport_dma_req *req;
814 struct mport_dev *md = priv->md;
815 struct dma_chan *chan;
819 if (xfer->length == 0)
821 req = kzalloc(sizeof(*req), GFP_KERNEL);
825 ret = get_dma_channel(priv);
832 kref_init(&req->refcount);
833 init_completion(&req->req_comp);
841 * If parameter loc_addr != NULL, we are transferring data from/to
842 * data buffer allocated in user-space: lock in memory user-space
843 * buffer pages and build an SG table for DMA transfer request
845 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
846 * used for DMA data transfers: build single entry SG table using
847 * offset within the internal buffer specified by handle parameter.
849 if (xfer->loc_addr) {
853 offset = lower_32_bits(offset_in_page(xfer->loc_addr));
854 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
856 page_list = kmalloc_array(nr_pages,
857 sizeof(*page_list), GFP_KERNEL);
858 if (page_list == NULL) {
863 pinned = pin_user_pages_fast(
864 (unsigned long)xfer->loc_addr & PAGE_MASK,
866 dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
869 if (pinned != nr_pages) {
871 rmcd_error("pin_user_pages_fast err=%ld",
875 rmcd_error("pinned %ld out of %ld pages",
878 * Set nr_pages up to mean "how many pages to unpin, in
887 ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
888 offset, xfer->length, GFP_KERNEL);
890 rmcd_error("sg_alloc_table failed with err=%d", ret);
894 req->page_list = page_list;
895 req->nr_pages = nr_pages;
898 struct rio_mport_mapping *map;
900 baddr = (dma_addr_t)xfer->handle;
902 mutex_lock(&md->buf_mutex);
903 list_for_each_entry(map, &md->mappings, node) {
904 if (baddr >= map->phys_addr &&
905 baddr < (map->phys_addr + map->size)) {
911 mutex_unlock(&md->buf_mutex);
913 if (req->map == NULL) {
918 if (xfer->length + xfer->offset > map->size) {
923 ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
925 rmcd_error("sg_alloc_table failed for internal buf");
929 sg_set_buf(req->sgt.sgl,
930 map->virt_addr + (baddr - map->phys_addr) +
931 xfer->offset, xfer->length);
934 nents = dma_map_sg(chan->device->dev,
935 req->sgt.sgl, req->sgt.nents, dir);
937 rmcd_error("Failed to map SG list");
942 ret = do_dma_request(req, xfer, sync, nents);
945 if (sync == RIO_TRANSFER_ASYNC)
946 return ret; /* return ASYNC cookie */
948 rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
952 if (!req->page_list) {
953 unpin_user_pages(page_list, nr_pages);
957 kref_put(&req->refcount, dma_req_free);
961 static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
963 struct mport_cdev_priv *priv = filp->private_data;
964 struct rio_transaction transaction;
965 struct rio_transfer_io *transfer;
966 enum dma_data_direction dir;
969 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
972 if (transaction.count != 1) /* only single transfer for now */
975 if ((transaction.transfer_mode &
976 priv->md->properties.transfer_mode) == 0)
979 transfer = vmalloc(array_size(sizeof(*transfer), transaction.count));
983 if (unlikely(copy_from_user(transfer,
984 (void __user *)(uintptr_t)transaction.block,
985 array_size(sizeof(*transfer), transaction.count)))) {
990 dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
991 DMA_FROM_DEVICE : DMA_TO_DEVICE;
992 for (i = 0; i < transaction.count && ret == 0; i++)
993 ret = rio_dma_transfer(filp, transaction.transfer_mode,
994 transaction.sync, dir, &transfer[i]);
996 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
998 array_size(sizeof(*transfer), transaction.count))))
1007 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1009 struct mport_cdev_priv *priv;
1010 struct rio_async_tx_wait w_param;
1011 struct mport_dma_req *req;
1012 dma_cookie_t cookie;
1018 priv = (struct mport_cdev_priv *)filp->private_data;
1020 if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1023 cookie = w_param.token;
1024 if (w_param.timeout)
1025 tmo = msecs_to_jiffies(w_param.timeout);
1026 else /* Use default DMA timeout */
1027 tmo = msecs_to_jiffies(dma_timeout);
1029 spin_lock(&priv->req_lock);
1030 list_for_each_entry(req, &priv->async_list, node) {
1031 if (req->cookie == cookie) {
1032 list_del(&req->node);
1037 spin_unlock(&priv->req_lock);
1042 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1045 /* Timeout on wait occurred */
1046 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1047 current->comm, task_pid_nr(current),
1048 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1051 } else if (wret == -ERESTARTSYS) {
1052 /* Wait_for_completion was interrupted by a signal but DMA may
1053 * be still in progress
1055 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1056 current->comm, task_pid_nr(current),
1057 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1062 if (req->status != DMA_COMPLETE) {
1063 /* DMA transaction completion signaled with transfer error */
1064 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1065 current->comm, task_pid_nr(current),
1066 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1072 if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
1073 kref_put(&req->refcount, dma_req_free);
1078 /* Return request back into async queue */
1079 spin_lock(&priv->req_lock);
1080 list_add_tail(&req->node, &priv->async_list);
1081 spin_unlock(&priv->req_lock);
1085 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1086 u64 size, struct rio_mport_mapping **mapping)
1088 struct rio_mport_mapping *map;
1090 map = kzalloc(sizeof(*map), GFP_KERNEL);
1094 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1095 &map->phys_addr, GFP_KERNEL);
1096 if (map->virt_addr == NULL) {
1105 kref_init(&map->ref);
1106 mutex_lock(&md->buf_mutex);
1107 list_add_tail(&map->node, &md->mappings);
1108 mutex_unlock(&md->buf_mutex);
1114 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1116 struct mport_cdev_priv *priv = filp->private_data;
1117 struct mport_dev *md = priv->md;
1118 struct rio_dma_mem map;
1119 struct rio_mport_mapping *mapping = NULL;
1122 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1125 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1129 map.dma_handle = mapping->phys_addr;
1131 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1132 mutex_lock(&md->buf_mutex);
1133 kref_put(&mapping->ref, mport_release_mapping);
1134 mutex_unlock(&md->buf_mutex);
1141 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1143 struct mport_cdev_priv *priv = filp->private_data;
1144 struct mport_dev *md = priv->md;
1147 struct rio_mport_mapping *map, *_map;
1149 if (copy_from_user(&handle, arg, sizeof(handle)))
1151 rmcd_debug(EXIT, "filp=%p", filp);
1153 mutex_lock(&md->buf_mutex);
1154 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1155 if (map->dir == MAP_DMA && map->phys_addr == handle &&
1156 map->filp == filp) {
1157 kref_put(&map->ref, mport_release_mapping);
1162 mutex_unlock(&md->buf_mutex);
1164 if (ret == -EFAULT) {
1165 rmcd_debug(DMA, "ERR no matching mapping");
1172 static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1177 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1182 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1187 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1191 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1194 * Inbound/outbound memory mapping functions
1198 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1199 u64 raddr, u64 size,
1200 struct rio_mport_mapping **mapping)
1202 struct rio_mport *mport = md->mport;
1203 struct rio_mport_mapping *map;
1206 /* rio_map_inb_region() accepts u32 size */
1207 if (size > 0xffffffff)
1210 map = kzalloc(sizeof(*map), GFP_KERNEL);
1214 map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1215 &map->phys_addr, GFP_KERNEL);
1216 if (map->virt_addr == NULL) {
1221 if (raddr == RIO_MAP_ANY_ADDR)
1222 raddr = map->phys_addr;
1223 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1227 map->dir = MAP_INBOUND;
1228 map->rio_addr = raddr;
1232 kref_init(&map->ref);
1233 mutex_lock(&md->buf_mutex);
1234 list_add_tail(&map->node, &md->mappings);
1235 mutex_unlock(&md->buf_mutex);
1240 dma_free_coherent(mport->dev.parent, size,
1241 map->virt_addr, map->phys_addr);
1248 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1249 u64 raddr, u64 size,
1250 struct rio_mport_mapping **mapping)
1252 struct rio_mport_mapping *map;
1255 if (raddr == RIO_MAP_ANY_ADDR)
1258 mutex_lock(&md->buf_mutex);
1259 list_for_each_entry(map, &md->mappings, node) {
1260 if (map->dir != MAP_INBOUND)
1262 if (raddr == map->rio_addr && size == map->size) {
1263 /* allow exact match only */
1267 } else if (raddr < (map->rio_addr + map->size - 1) &&
1268 (raddr + size) > map->rio_addr) {
1273 mutex_unlock(&md->buf_mutex);
1278 /* not found, create new */
1279 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1282 static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1284 struct mport_cdev_priv *priv = filp->private_data;
1285 struct mport_dev *md = priv->md;
1286 struct rio_mmap map;
1287 struct rio_mport_mapping *mapping = NULL;
1290 if (!md->mport->ops->map_inb)
1291 return -EPROTONOSUPPORT;
1292 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1295 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1297 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1298 map.length, &mapping);
1302 map.handle = mapping->phys_addr;
1303 map.rio_addr = mapping->rio_addr;
1305 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1306 /* Delete mapping if it was created by this request */
1307 if (ret == 0 && mapping->filp == filp) {
1308 mutex_lock(&md->buf_mutex);
1309 kref_put(&mapping->ref, mport_release_mapping);
1310 mutex_unlock(&md->buf_mutex);
1319 * rio_mport_inbound_free() - unmap from RapidIO address space and free
1320 * previously allocated inbound DMA coherent buffer
1321 * @priv: driver private data
1322 * @arg: buffer handle returned by allocation routine
1324 static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1326 struct mport_cdev_priv *priv = filp->private_data;
1327 struct mport_dev *md = priv->md;
1329 struct rio_mport_mapping *map, *_map;
1331 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1333 if (!md->mport->ops->unmap_inb)
1334 return -EPROTONOSUPPORT;
1336 if (copy_from_user(&handle, arg, sizeof(handle)))
1339 mutex_lock(&md->buf_mutex);
1340 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1341 if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1342 if (map->filp == filp) {
1344 kref_put(&map->ref, mport_release_mapping);
1349 mutex_unlock(&md->buf_mutex);
1355 * maint_port_idx_get() - Get the port index of the mport instance
1356 * @priv: driver private data
1359 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1361 struct mport_dev *md = priv->md;
1362 u32 port_idx = md->mport->index;
1364 rmcd_debug(MPORT, "port_index=%d", port_idx);
1366 if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1372 static int rio_mport_add_event(struct mport_cdev_priv *priv,
1373 struct rio_event *event)
1377 if (!(priv->event_mask & event->header))
1380 spin_lock(&priv->fifo_lock);
1381 overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1382 || kfifo_in(&priv->event_fifo, (unsigned char *)event,
1383 sizeof(*event)) != sizeof(*event);
1384 spin_unlock(&priv->fifo_lock);
1386 wake_up_interruptible(&priv->event_rx_wait);
1389 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1396 static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1397 u16 src, u16 dst, u16 info)
1399 struct mport_dev *data = dev_id;
1400 struct mport_cdev_priv *priv;
1401 struct rio_mport_db_filter *db_filter;
1402 struct rio_event event;
1405 event.header = RIO_DOORBELL;
1406 event.u.doorbell.rioid = src;
1407 event.u.doorbell.payload = info;
1410 spin_lock(&data->db_lock);
1411 list_for_each_entry(db_filter, &data->doorbells, data_node) {
1412 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1413 db_filter->filter.rioid == src)) &&
1414 info >= db_filter->filter.low &&
1415 info <= db_filter->filter.high) {
1416 priv = db_filter->priv;
1417 rio_mport_add_event(priv, &event);
1421 spin_unlock(&data->db_lock);
1424 dev_warn(&data->dev,
1425 "%s: spurious DB received from 0x%x, info=0x%04x\n",
1426 __func__, src, info);
1429 static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1432 struct mport_dev *md = priv->md;
1433 struct rio_mport_db_filter *db_filter;
1434 struct rio_doorbell_filter filter;
1435 unsigned long flags;
1438 if (copy_from_user(&filter, arg, sizeof(filter)))
1441 if (filter.low > filter.high)
1444 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1445 rio_mport_doorbell_handler);
1447 rmcd_error("%s failed to register IBDB, err=%d",
1448 dev_name(&md->dev), ret);
1452 db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1453 if (db_filter == NULL) {
1454 rio_release_inb_dbell(md->mport, filter.low, filter.high);
1458 db_filter->filter = filter;
1459 db_filter->priv = priv;
1460 spin_lock_irqsave(&md->db_lock, flags);
1461 list_add_tail(&db_filter->priv_node, &priv->db_filters);
1462 list_add_tail(&db_filter->data_node, &md->doorbells);
1463 spin_unlock_irqrestore(&md->db_lock, flags);
1468 static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1470 list_del(&db_filter->data_node);
1471 list_del(&db_filter->priv_node);
1475 static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1478 struct rio_mport_db_filter *db_filter;
1479 struct rio_doorbell_filter filter;
1480 unsigned long flags;
1483 if (copy_from_user(&filter, arg, sizeof(filter)))
1486 if (filter.low > filter.high)
1489 spin_lock_irqsave(&priv->md->db_lock, flags);
1490 list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1491 if (db_filter->filter.rioid == filter.rioid &&
1492 db_filter->filter.low == filter.low &&
1493 db_filter->filter.high == filter.high) {
1494 rio_mport_delete_db_filter(db_filter);
1499 spin_unlock_irqrestore(&priv->md->db_lock, flags);
1502 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1507 static int rio_mport_match_pw(union rio_pw_msg *msg,
1508 struct rio_pw_filter *filter)
1510 if ((msg->em.comptag & filter->mask) < filter->low ||
1511 (msg->em.comptag & filter->mask) > filter->high)
1516 static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1517 union rio_pw_msg *msg, int step)
1519 struct mport_dev *md = context;
1520 struct mport_cdev_priv *priv;
1521 struct rio_mport_pw_filter *pw_filter;
1522 struct rio_event event;
1525 event.header = RIO_PORTWRITE;
1526 memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1529 spin_lock(&md->pw_lock);
1530 list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1531 if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1532 priv = pw_filter->priv;
1533 rio_mport_add_event(priv, &event);
1537 spin_unlock(&md->pw_lock);
1540 printk_ratelimited(KERN_WARNING DRV_NAME
1541 ": mport%d received spurious PW from 0x%08x\n",
1542 mport->id, msg->em.comptag);
1548 static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1551 struct mport_dev *md = priv->md;
1552 struct rio_mport_pw_filter *pw_filter;
1553 struct rio_pw_filter filter;
1554 unsigned long flags;
1557 if (copy_from_user(&filter, arg, sizeof(filter)))
1560 pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1561 if (pw_filter == NULL)
1564 pw_filter->filter = filter;
1565 pw_filter->priv = priv;
1566 spin_lock_irqsave(&md->pw_lock, flags);
1567 if (list_empty(&md->portwrites))
1569 list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1570 list_add_tail(&pw_filter->md_node, &md->portwrites);
1571 spin_unlock_irqrestore(&md->pw_lock, flags);
1576 ret = rio_add_mport_pw_handler(md->mport, md,
1577 rio_mport_pw_handler);
1580 "%s: failed to add IB_PW handler, err=%d\n",
1584 rio_pw_enable(md->mport, 1);
1590 static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1592 list_del(&pw_filter->md_node);
1593 list_del(&pw_filter->priv_node);
1597 static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1598 struct rio_pw_filter *b)
1600 if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1605 static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1608 struct mport_dev *md = priv->md;
1609 struct rio_mport_pw_filter *pw_filter;
1610 struct rio_pw_filter filter;
1611 unsigned long flags;
1615 if (copy_from_user(&filter, arg, sizeof(filter)))
1618 spin_lock_irqsave(&md->pw_lock, flags);
1619 list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1620 if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1621 rio_mport_delete_pw_filter(pw_filter);
1627 if (list_empty(&md->portwrites))
1629 spin_unlock_irqrestore(&md->pw_lock, flags);
1632 rio_del_mport_pw_handler(md->mport, priv->md,
1633 rio_mport_pw_handler);
1634 rio_pw_enable(md->mport, 0);
1641 * rio_release_dev - release routine for kernel RIO device object
1642 * @dev: kernel device object associated with a RIO device structure
1644 * Frees a RIO device struct associated a RIO device struct.
1645 * The RIO device struct is freed.
1647 static void rio_release_dev(struct device *dev)
1649 struct rio_dev *rdev;
1651 rdev = to_rio_dev(dev);
1652 pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1657 static void rio_release_net(struct device *dev)
1659 struct rio_net *net;
1661 net = to_rio_net(dev);
1662 rmcd_debug(RDEV, "net_%d", net->id);
1668 * rio_mport_add_riodev - creates a kernel RIO device object
1670 * Allocates a RIO device data structure and initializes required fields based
1671 * on device's configuration space contents.
1672 * If the device has switch capabilities, then a switch specific portion is
1673 * allocated and configured.
1675 static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1678 struct mport_dev *md = priv->md;
1679 struct rio_rdev_info dev_info;
1680 struct rio_dev *rdev;
1681 struct rio_switch *rswitch = NULL;
1682 struct rio_mport *mport;
1691 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1693 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1695 rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1696 dev_info.comptag, dev_info.destid, dev_info.hopcount);
1698 dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
1700 rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1705 size = sizeof(*rdev);
1707 destid = dev_info.destid;
1708 hopcount = dev_info.hopcount;
1710 if (rio_mport_read_config_32(mport, destid, hopcount,
1711 RIO_PEF_CAR, &rval))
1714 if (rval & RIO_PEF_SWITCH) {
1715 rio_mport_read_config_32(mport, destid, hopcount,
1716 RIO_SWP_INFO_CAR, &swpinfo);
1717 size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo));
1720 rdev = kzalloc(size, GFP_KERNEL);
1724 if (mport->net == NULL) {
1725 struct rio_net *net;
1727 net = rio_alloc_net(mport);
1730 rmcd_debug(RDEV, "failed to allocate net object");
1734 net->id = mport->id;
1736 dev_set_name(&net->dev, "rnet_%d", net->id);
1737 net->dev.parent = &mport->dev;
1738 net->dev.release = rio_release_net;
1739 err = rio_add_net(net);
1741 rmcd_debug(RDEV, "failed to register net, err=%d", err);
1747 rdev->net = mport->net;
1749 rdev->swpinfo = swpinfo;
1750 rio_mport_read_config_32(mport, destid, hopcount,
1751 RIO_DEV_ID_CAR, &rval);
1752 rdev->did = rval >> 16;
1753 rdev->vid = rval & 0xffff;
1754 rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1756 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1758 rdev->asm_did = rval >> 16;
1759 rdev->asm_vid = rval & 0xffff;
1760 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1762 rdev->asm_rev = rval >> 16;
1764 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1765 rdev->efptr = rval & 0xffff;
1766 rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
1767 hopcount, &rdev->phys_rmap);
1769 rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1770 hopcount, RIO_EFB_ERR_MGMNT);
1773 rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1775 rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1778 rdev->comp_tag = dev_info.comptag;
1779 rdev->destid = destid;
1780 /* hopcount is stored as specified by a caller, regardles of EP or SW */
1781 rdev->hopcount = hopcount;
1783 if (rdev->pef & RIO_PEF_SWITCH) {
1784 rswitch = rdev->rswitch;
1785 rswitch->route_table = NULL;
1788 if (strlen(dev_info.name))
1789 dev_set_name(&rdev->dev, "%s", dev_info.name);
1790 else if (rdev->pef & RIO_PEF_SWITCH)
1791 dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1792 rdev->comp_tag & RIO_CTAG_UDEVID);
1794 dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1795 rdev->comp_tag & RIO_CTAG_UDEVID);
1797 INIT_LIST_HEAD(&rdev->net_list);
1798 rdev->dev.parent = &mport->net->dev;
1799 rio_attach_device(rdev);
1800 rdev->dev.release = rio_release_dev;
1802 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1803 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1805 err = rio_add_device(rdev);
1807 put_device(&rdev->dev);
1819 static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1821 struct rio_rdev_info dev_info;
1822 struct rio_dev *rdev = NULL;
1824 struct rio_mport *mport;
1825 struct rio_net *net;
1827 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1829 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1831 mport = priv->md->mport;
1833 /* If device name is specified, removal by name has priority */
1834 if (strlen(dev_info.name)) {
1835 dev = bus_find_device_by_name(&rio_bus_type, NULL,
1838 rdev = to_rio_dev(dev);
1841 rdev = rio_get_comptag(dev_info.comptag, rdev);
1842 if (rdev && rdev->dev.parent == &mport->net->dev &&
1843 rdev->destid == dev_info.destid &&
1844 rdev->hopcount == dev_info.hopcount)
1851 "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1852 dev_info.name, dev_info.comptag, dev_info.destid,
1859 rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1861 if (list_empty(&net->devices)) {
1870 * Mport cdev management
1874 * mport_cdev_open() - Open character device (mport)
1876 static int mport_cdev_open(struct inode *inode, struct file *filp)
1879 int minor = iminor(inode);
1880 struct mport_dev *chdev;
1881 struct mport_cdev_priv *priv;
1883 /* Test for valid device */
1884 if (minor >= RIO_MAX_MPORTS) {
1885 rmcd_error("Invalid minor device number");
1889 chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1891 rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1893 if (atomic_read(&chdev->active) == 0)
1896 get_device(&chdev->dev);
1898 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1900 put_device(&chdev->dev);
1906 INIT_LIST_HEAD(&priv->db_filters);
1907 INIT_LIST_HEAD(&priv->pw_filters);
1908 spin_lock_init(&priv->fifo_lock);
1909 init_waitqueue_head(&priv->event_rx_wait);
1910 ret = kfifo_alloc(&priv->event_fifo,
1911 sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1914 put_device(&chdev->dev);
1915 dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1920 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1921 INIT_LIST_HEAD(&priv->async_list);
1922 spin_lock_init(&priv->req_lock);
1923 mutex_init(&priv->dma_lock);
1925 mutex_lock(&chdev->file_mutex);
1926 list_add_tail(&priv->list, &chdev->file_list);
1927 mutex_unlock(&chdev->file_mutex);
1929 filp->private_data = priv;
1937 static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1939 struct mport_cdev_priv *priv = filp->private_data;
1941 return fasync_helper(fd, filp, mode, &priv->async_queue);
1944 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1945 static void mport_cdev_release_dma(struct file *filp)
1947 struct mport_cdev_priv *priv = filp->private_data;
1948 struct mport_dev *md;
1949 struct mport_dma_req *req, *req_next;
1950 unsigned long tmo = msecs_to_jiffies(dma_timeout);
1954 rmcd_debug(EXIT, "from filp=%p %s(%d)",
1955 filp, current->comm, task_pid_nr(current));
1958 rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
1964 spin_lock(&priv->req_lock);
1965 if (!list_empty(&priv->async_list)) {
1966 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
1967 filp, current->comm, task_pid_nr(current));
1968 list_splice_init(&priv->async_list, &list);
1970 spin_unlock(&priv->req_lock);
1972 if (!list_empty(&list)) {
1973 rmcd_debug(EXIT, "temp list not empty");
1974 list_for_each_entry_safe(req, req_next, &list, node) {
1975 rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
1976 req->filp, req->cookie,
1977 completion_done(&req->req_comp)?"yes":"no");
1978 list_del(&req->node);
1979 kref_put(&req->refcount, dma_req_free);
1983 put_dma_channel(priv);
1984 wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
1987 rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
1988 current->comm, task_pid_nr(current), wret);
1991 if (priv->dmach != priv->md->dma_chan) {
1992 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
1993 filp, current->comm, task_pid_nr(current));
1994 rio_release_dma(priv->dmach);
1996 rmcd_debug(EXIT, "Adjust default DMA channel refcount");
1997 kref_put(&md->dma_ref, mport_release_def_dma);
2003 #define mport_cdev_release_dma(priv) do {} while (0)
2007 * mport_cdev_release() - Release character device
2009 static int mport_cdev_release(struct inode *inode, struct file *filp)
2011 struct mport_cdev_priv *priv = filp->private_data;
2012 struct mport_dev *chdev;
2013 struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
2014 struct rio_mport_db_filter *db_filter, *db_filter_next;
2015 struct rio_mport_mapping *map, *_map;
2016 unsigned long flags;
2018 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2021 mport_cdev_release_dma(filp);
2023 priv->event_mask = 0;
2025 spin_lock_irqsave(&chdev->pw_lock, flags);
2026 if (!list_empty(&priv->pw_filters)) {
2027 list_for_each_entry_safe(pw_filter, pw_filter_next,
2028 &priv->pw_filters, priv_node)
2029 rio_mport_delete_pw_filter(pw_filter);
2031 spin_unlock_irqrestore(&chdev->pw_lock, flags);
2033 spin_lock_irqsave(&chdev->db_lock, flags);
2034 list_for_each_entry_safe(db_filter, db_filter_next,
2035 &priv->db_filters, priv_node) {
2036 rio_mport_delete_db_filter(db_filter);
2038 spin_unlock_irqrestore(&chdev->db_lock, flags);
2040 kfifo_free(&priv->event_fifo);
2042 mutex_lock(&chdev->buf_mutex);
2043 list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2044 if (map->filp == filp) {
2045 rmcd_debug(EXIT, "release mapping %p filp=%p",
2046 map->virt_addr, filp);
2047 kref_put(&map->ref, mport_release_mapping);
2050 mutex_unlock(&chdev->buf_mutex);
2052 mport_cdev_fasync(-1, filp, 0);
2053 filp->private_data = NULL;
2054 mutex_lock(&chdev->file_mutex);
2055 list_del(&priv->list);
2056 mutex_unlock(&chdev->file_mutex);
2057 put_device(&chdev->dev);
2063 * mport_cdev_ioctl() - IOCTLs for character device
2065 static long mport_cdev_ioctl(struct file *filp,
2066 unsigned int cmd, unsigned long arg)
2069 struct mport_cdev_priv *data = filp->private_data;
2070 struct mport_dev *md = data->md;
2072 if (atomic_read(&md->active) == 0)
2076 case RIO_MPORT_MAINT_READ_LOCAL:
2077 return rio_mport_maint_rd(data, (void __user *)arg, 1);
2078 case RIO_MPORT_MAINT_WRITE_LOCAL:
2079 return rio_mport_maint_wr(data, (void __user *)arg, 1);
2080 case RIO_MPORT_MAINT_READ_REMOTE:
2081 return rio_mport_maint_rd(data, (void __user *)arg, 0);
2082 case RIO_MPORT_MAINT_WRITE_REMOTE:
2083 return rio_mport_maint_wr(data, (void __user *)arg, 0);
2084 case RIO_MPORT_MAINT_HDID_SET:
2085 return maint_hdid_set(data, (void __user *)arg);
2086 case RIO_MPORT_MAINT_COMPTAG_SET:
2087 return maint_comptag_set(data, (void __user *)arg);
2088 case RIO_MPORT_MAINT_PORT_IDX_GET:
2089 return maint_port_idx_get(data, (void __user *)arg);
2090 case RIO_MPORT_GET_PROPERTIES:
2091 md->properties.hdid = md->mport->host_deviceid;
2092 if (copy_to_user((void __user *)arg, &(md->properties),
2093 sizeof(md->properties)))
2096 case RIO_ENABLE_DOORBELL_RANGE:
2097 return rio_mport_add_db_filter(data, (void __user *)arg);
2098 case RIO_DISABLE_DOORBELL_RANGE:
2099 return rio_mport_remove_db_filter(data, (void __user *)arg);
2100 case RIO_ENABLE_PORTWRITE_RANGE:
2101 return rio_mport_add_pw_filter(data, (void __user *)arg);
2102 case RIO_DISABLE_PORTWRITE_RANGE:
2103 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2104 case RIO_SET_EVENT_MASK:
2105 data->event_mask = (u32)arg;
2107 case RIO_GET_EVENT_MASK:
2108 if (copy_to_user((void __user *)arg, &data->event_mask,
2112 case RIO_MAP_OUTBOUND:
2113 return rio_mport_obw_map(filp, (void __user *)arg);
2114 case RIO_MAP_INBOUND:
2115 return rio_mport_map_inbound(filp, (void __user *)arg);
2116 case RIO_UNMAP_OUTBOUND:
2117 return rio_mport_obw_free(filp, (void __user *)arg);
2118 case RIO_UNMAP_INBOUND:
2119 return rio_mport_inbound_free(filp, (void __user *)arg);
2121 return rio_mport_alloc_dma(filp, (void __user *)arg);
2123 return rio_mport_free_dma(filp, (void __user *)arg);
2124 case RIO_WAIT_FOR_ASYNC:
2125 return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2127 return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2129 return rio_mport_add_riodev(data, (void __user *)arg);
2131 return rio_mport_del_riodev(data, (void __user *)arg);
2140 * mport_release_mapping - free mapping resources and info structure
2141 * @ref: a pointer to the kref within struct rio_mport_mapping
2143 * NOTE: Shall be called while holding buf_mutex.
2145 static void mport_release_mapping(struct kref *ref)
2147 struct rio_mport_mapping *map =
2148 container_of(ref, struct rio_mport_mapping, ref);
2149 struct rio_mport *mport = map->md->mport;
2151 rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2152 map->dir, map->virt_addr,
2153 &map->phys_addr, mport->name);
2155 list_del(&map->node);
2159 rio_unmap_inb_region(mport, map->phys_addr);
2162 dma_free_coherent(mport->dev.parent, map->size,
2163 map->virt_addr, map->phys_addr);
2166 rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2172 static void mport_mm_open(struct vm_area_struct *vma)
2174 struct rio_mport_mapping *map = vma->vm_private_data;
2176 rmcd_debug(MMAP, "%pad", &map->phys_addr);
2177 kref_get(&map->ref);
2180 static void mport_mm_close(struct vm_area_struct *vma)
2182 struct rio_mport_mapping *map = vma->vm_private_data;
2184 rmcd_debug(MMAP, "%pad", &map->phys_addr);
2185 mutex_lock(&map->md->buf_mutex);
2186 kref_put(&map->ref, mport_release_mapping);
2187 mutex_unlock(&map->md->buf_mutex);
2190 static const struct vm_operations_struct vm_ops = {
2191 .open = mport_mm_open,
2192 .close = mport_mm_close,
2195 static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2197 struct mport_cdev_priv *priv = filp->private_data;
2198 struct mport_dev *md;
2199 size_t size = vma->vm_end - vma->vm_start;
2201 unsigned long offset;
2203 struct rio_mport_mapping *map;
2205 rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2206 (unsigned int)size, vma->vm_pgoff);
2209 baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2211 mutex_lock(&md->buf_mutex);
2212 list_for_each_entry(map, &md->mappings, node) {
2213 if (baddr >= map->phys_addr &&
2214 baddr < (map->phys_addr + map->size)) {
2219 mutex_unlock(&md->buf_mutex);
2224 offset = baddr - map->phys_addr;
2226 if (size + offset > map->size)
2229 vma->vm_pgoff = offset >> PAGE_SHIFT;
2230 rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2232 if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2233 ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2234 map->virt_addr, map->phys_addr, map->size);
2235 else if (map->dir == MAP_OUTBOUND) {
2236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2237 ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2239 rmcd_error("Attempt to mmap unsupported mapping type");
2244 vma->vm_private_data = map;
2245 vma->vm_ops = &vm_ops;
2248 rmcd_error("MMAP exit with err=%d", ret);
2254 static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
2256 struct mport_cdev_priv *priv = filp->private_data;
2258 poll_wait(filp, &priv->event_rx_wait, wait);
2259 if (kfifo_len(&priv->event_fifo))
2260 return EPOLLIN | EPOLLRDNORM;
2265 static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2268 struct mport_cdev_priv *priv = filp->private_data;
2275 if (kfifo_is_empty(&priv->event_fifo) &&
2276 (filp->f_flags & O_NONBLOCK))
2279 if (count % sizeof(struct rio_event))
2282 ret = wait_event_interruptible(priv->event_rx_wait,
2283 kfifo_len(&priv->event_fifo) != 0);
2287 while (ret < count) {
2288 if (kfifo_to_user(&priv->event_fifo, buf,
2289 sizeof(struct rio_event), &copied))
2298 static ssize_t mport_write(struct file *filp, const char __user *buf,
2299 size_t count, loff_t *ppos)
2301 struct mport_cdev_priv *priv = filp->private_data;
2302 struct rio_mport *mport = priv->md->mport;
2303 struct rio_event event;
2309 if (count % sizeof(event))
2313 while ((count - len) >= (int)sizeof(event)) {
2314 if (copy_from_user(&event, buf, sizeof(event)))
2317 if (event.header != RIO_DOORBELL)
2320 ret = rio_mport_send_doorbell(mport,
2321 event.u.doorbell.rioid,
2322 event.u.doorbell.payload);
2326 len += sizeof(event);
2327 buf += sizeof(event);
2333 static const struct file_operations mport_fops = {
2334 .owner = THIS_MODULE,
2335 .open = mport_cdev_open,
2336 .release = mport_cdev_release,
2337 .poll = mport_cdev_poll,
2339 .write = mport_write,
2340 .mmap = mport_cdev_mmap,
2341 .fasync = mport_cdev_fasync,
2342 .unlocked_ioctl = mport_cdev_ioctl
2346 * Character device management
2349 static void mport_device_release(struct device *dev)
2351 struct mport_dev *md;
2353 rmcd_debug(EXIT, "%s", dev_name(dev));
2354 md = container_of(dev, struct mport_dev, dev);
2359 * mport_cdev_add() - Create mport_dev from rio_mport
2360 * @mport: RapidIO master port
2362 static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2365 struct mport_dev *md;
2366 struct rio_mport_attr attr;
2368 md = kzalloc(sizeof(*md), GFP_KERNEL);
2370 rmcd_error("Unable allocate a device object");
2375 mutex_init(&md->buf_mutex);
2376 mutex_init(&md->file_mutex);
2377 INIT_LIST_HEAD(&md->file_list);
2379 device_initialize(&md->dev);
2380 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
2381 md->dev.class = dev_class;
2382 md->dev.parent = &mport->dev;
2383 md->dev.release = mport_device_release;
2384 dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2385 atomic_set(&md->active, 1);
2387 cdev_init(&md->cdev, &mport_fops);
2388 md->cdev.owner = THIS_MODULE;
2390 INIT_LIST_HEAD(&md->doorbells);
2391 spin_lock_init(&md->db_lock);
2392 INIT_LIST_HEAD(&md->portwrites);
2393 spin_lock_init(&md->pw_lock);
2394 INIT_LIST_HEAD(&md->mappings);
2396 md->properties.id = mport->id;
2397 md->properties.sys_size = mport->sys_size;
2398 md->properties.hdid = mport->host_deviceid;
2399 md->properties.index = mport->index;
2401 /* The transfer_mode property will be returned through mport query
2404 #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2405 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2407 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2410 ret = cdev_device_add(&md->cdev, &md->dev);
2412 rmcd_error("Failed to register mport %d (err=%d)",
2416 ret = rio_query_mport(mport, &attr);
2418 md->properties.flags = attr.flags;
2419 md->properties.link_speed = attr.link_speed;
2420 md->properties.link_width = attr.link_width;
2421 md->properties.dma_max_sge = attr.dma_max_sge;
2422 md->properties.dma_max_size = attr.dma_max_size;
2423 md->properties.dma_align = attr.dma_align;
2424 md->properties.cap_sys_size = 0;
2425 md->properties.cap_transfer_mode = 0;
2426 md->properties.cap_addr_size = 0;
2428 pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2429 mport->name, MAJOR(dev_number), mport->id);
2431 mutex_lock(&mport_devs_lock);
2432 list_add_tail(&md->node, &mport_devs);
2433 mutex_unlock(&mport_devs_lock);
2435 pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2436 mport->name, MAJOR(dev_number), mport->id);
2441 put_device(&md->dev);
2446 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2447 * associated DMA channels.
2449 static void mport_cdev_terminate_dma(struct mport_dev *md)
2451 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2452 struct mport_cdev_priv *client;
2454 rmcd_debug(DMA, "%s", dev_name(&md->dev));
2456 mutex_lock(&md->file_mutex);
2457 list_for_each_entry(client, &md->file_list, list) {
2458 if (client->dmach) {
2459 dmaengine_terminate_all(client->dmach);
2460 rio_release_dma(client->dmach);
2463 mutex_unlock(&md->file_mutex);
2466 dmaengine_terminate_all(md->dma_chan);
2467 rio_release_dma(md->dma_chan);
2468 md->dma_chan = NULL;
2475 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2478 static int mport_cdev_kill_fasync(struct mport_dev *md)
2480 unsigned int files = 0;
2481 struct mport_cdev_priv *client;
2483 mutex_lock(&md->file_mutex);
2484 list_for_each_entry(client, &md->file_list, list) {
2485 if (client->async_queue)
2486 kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2489 mutex_unlock(&md->file_mutex);
2494 * mport_cdev_remove() - Remove mport character device
2495 * @dev: Mport device to remove
2497 static void mport_cdev_remove(struct mport_dev *md)
2499 struct rio_mport_mapping *map, *_map;
2501 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2502 atomic_set(&md->active, 0);
2503 mport_cdev_terminate_dma(md);
2504 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
2505 cdev_device_del(&md->cdev, &md->dev);
2506 mport_cdev_kill_fasync(md);
2508 /* TODO: do we need to give clients some time to close file
2509 * descriptors? Simple wait for XX, or kref?
2513 * Release DMA buffers allocated for the mport device.
2514 * Disable associated inbound Rapidio requests mapping if applicable.
2516 mutex_lock(&md->buf_mutex);
2517 list_for_each_entry_safe(map, _map, &md->mappings, node) {
2518 kref_put(&map->ref, mport_release_mapping);
2520 mutex_unlock(&md->buf_mutex);
2522 if (!list_empty(&md->mappings))
2523 rmcd_warn("WARNING: %s pending mappings on removal",
2526 rio_release_inb_dbell(md->mport, 0, 0x0fff);
2528 put_device(&md->dev);
2532 * RIO rio_mport_interface driver
2536 * mport_add_mport() - Add rio_mport from LDM device struct
2537 * @dev: Linux device model struct
2538 * @class_intf: Linux class_interface
2540 static int mport_add_mport(struct device *dev,
2541 struct class_interface *class_intf)
2543 struct rio_mport *mport = NULL;
2544 struct mport_dev *chdev = NULL;
2546 mport = to_rio_mport(dev);
2550 chdev = mport_cdev_add(mport);
2558 * mport_remove_mport() - Remove rio_mport from global list
2559 * TODO remove device from global mport_dev list
2561 static void mport_remove_mport(struct device *dev,
2562 struct class_interface *class_intf)
2564 struct rio_mport *mport = NULL;
2565 struct mport_dev *chdev;
2568 mport = to_rio_mport(dev);
2569 rmcd_debug(EXIT, "Remove %s", mport->name);
2571 mutex_lock(&mport_devs_lock);
2572 list_for_each_entry(chdev, &mport_devs, node) {
2573 if (chdev->mport->id == mport->id) {
2574 atomic_set(&chdev->active, 0);
2575 list_del(&chdev->node);
2580 mutex_unlock(&mport_devs_lock);
2583 mport_cdev_remove(chdev);
2586 /* the rio_mport_interface is used to handle local mport devices */
2587 static struct class_interface rio_mport_interface __refdata = {
2588 .class = &rio_mport_class,
2589 .add_dev = mport_add_mport,
2590 .remove_dev = mport_remove_mport,
2594 * Linux kernel module
2598 * mport_init - Driver module loading
2600 static int __init mport_init(void)
2604 /* Create device class needed by udev */
2605 dev_class = class_create(THIS_MODULE, DRV_NAME);
2606 if (IS_ERR(dev_class)) {
2607 rmcd_error("Unable to create " DRV_NAME " class");
2608 return PTR_ERR(dev_class);
2611 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2615 rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2617 /* Register to rio_mport_interface */
2618 ret = class_interface_register(&rio_mport_interface);
2620 rmcd_error("class_interface_register() failed, err=%d", ret);
2627 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2629 class_destroy(dev_class);
2634 * mport_exit - Driver module unloading
2636 static void __exit mport_exit(void)
2638 class_interface_unregister(&rio_mport_interface);
2639 class_destroy(dev_class);
2640 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2643 module_init(mport_init);
2644 module_exit(mport_exit);