2 * RapidIO mport character device
4 * Copyright 2014-2015 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * Copyright 2014-2015 Prodrive Technologies
7 * Andre van Herk <andre.van.herk@prodrive-technologies.com>
8 * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
9 * Copyright (C) 2014 Texas Instruments Incorporated
10 * Aurelien Jacquiot <a-jacquiot@ti.com>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/cdev.h>
20 #include <linux/ioctl.h>
21 #include <linux/uaccess.h>
22 #include <linux/list.h>
24 #include <linux/err.h>
25 #include <linux/net.h>
26 #include <linux/poll.h>
27 #include <linux/spinlock.h>
28 #include <linux/sched.h>
29 #include <linux/kfifo.h>
32 #include <linux/slab.h>
33 #include <linux/vmalloc.h>
34 #include <linux/mman.h>
36 #include <linux/dma-mapping.h>
37 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
38 #include <linux/dmaengine.h>
41 #include <linux/rio.h>
42 #include <linux/rio_ids.h>
43 #include <linux/rio_drv.h>
44 #include <linux/rio_mport_cdev.h>
48 #define DRV_NAME "rio_mport"
49 #define DRV_PREFIX DRV_NAME ": "
50 #define DEV_NAME "rio_mport"
51 #define DRV_VERSION "1.0.0"
53 /* Debug output filtering masks */
56 DBG_INIT = BIT(0), /* driver init */
57 DBG_EXIT = BIT(1), /* driver exit */
58 DBG_MPORT = BIT(2), /* mport add/remove */
59 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
60 DBG_DMA = BIT(4), /* DMA transfer messages */
61 DBG_MMAP = BIT(5), /* mapping messages */
62 DBG_IBW = BIT(6), /* inbound window */
63 DBG_EVENT = BIT(7), /* event handling messages */
64 DBG_OBW = BIT(8), /* outbound window messages */
65 DBG_DBELL = BIT(9), /* doorbell messages */
70 #define rmcd_debug(level, fmt, arg...) \
72 if (DBG_##level & dbg_level) \
73 pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
76 #define rmcd_debug(level, fmt, arg...) \
77 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
80 #define rmcd_warn(fmt, arg...) \
81 pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
83 #define rmcd_error(fmt, arg...) \
84 pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
86 MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
87 MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
88 MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
89 MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
90 MODULE_DESCRIPTION("RapidIO mport character device driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION);
94 static int dma_timeout = 3000; /* DMA transfer timeout in msec */
95 module_param(dma_timeout, int, S_IRUGO);
96 MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
99 static u32 dbg_level = DBG_NONE;
100 module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
101 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
105 * An internal DMA coherent buffer
107 struct mport_dma_buf {
117 * Internal memory mapping structure
119 enum rio_mport_map_dir {
125 struct rio_mport_mapping {
126 struct list_head node;
127 struct mport_dev *md;
128 enum rio_mport_map_dir dir;
131 dma_addr_t phys_addr; /* for mmap */
132 void *virt_addr; /* kernel address, for dma_free_coherent */
134 struct kref ref; /* refcount of vmas sharing the mapping */
138 struct rio_mport_dma_map {
145 #define MPORT_MAX_DMA_BUFS 16
146 #define MPORT_EVENT_DEPTH 10
149 * mport_dev driver-specific structure that represents mport device
150 * @active mport device status flag
151 * @node list node to maintain list of registered mports
152 * @cdev character device
153 * @dev associated device object
154 * @mport associated subsystem's master port device object
155 * @buf_mutex lock for buffer handling
156 * @file_mutex - lock for open files list
157 * @file_list - list of open files on given mport
158 * @properties properties of this mport
159 * @portwrites queue of inbound portwrites
160 * @pw_lock lock for port write queue
161 * @mappings queue for memory mappings
162 * @dma_chan DMA channels associated with this device
168 struct list_head node;
171 struct rio_mport *mport;
172 struct mutex buf_mutex;
173 struct mutex file_mutex;
174 struct list_head file_list;
175 struct rio_mport_properties properties;
176 struct list_head doorbells;
178 struct list_head portwrites;
180 struct list_head mappings;
181 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
182 struct dma_chan *dma_chan;
184 struct completion comp;
189 * mport_cdev_priv - data structure specific to individual file object
190 * associated with an open device
191 * @md master port character device object
192 * @async_queue - asynchronous notification queue
193 * @list - file objects tracking list
194 * @db_filters inbound doorbell filters for this descriptor
195 * @pw_filters portwrite filters for this descriptor
196 * @event_fifo event fifo for this descriptor
197 * @event_rx_wait wait queue for this descriptor
198 * @fifo_lock lock for event_fifo
199 * @event_mask event mask for this descriptor
200 * @dmach DMA engine channel allocated for specific file object
202 struct mport_cdev_priv {
203 struct mport_dev *md;
204 struct fasync_struct *async_queue;
205 struct list_head list;
206 struct list_head db_filters;
207 struct list_head pw_filters;
208 struct kfifo event_fifo;
209 wait_queue_head_t event_rx_wait;
210 spinlock_t fifo_lock;
211 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
212 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
213 struct dma_chan *dmach;
214 struct list_head async_list;
215 struct list_head pend_list;
217 struct mutex dma_lock;
219 struct completion comp;
224 * rio_mport_pw_filter - structure to describe a portwrite filter
225 * md_node node in mport device's list
226 * priv_node node in private file object's list
227 * priv reference to private data
228 * filter actual portwrite filter
230 struct rio_mport_pw_filter {
231 struct list_head md_node;
232 struct list_head priv_node;
233 struct mport_cdev_priv *priv;
234 struct rio_pw_filter filter;
238 * rio_mport_db_filter - structure to describe a doorbell filter
239 * @data_node reference to device node
240 * @priv_node node in private data
241 * @priv reference to private data
242 * @filter actual doorbell filter
244 struct rio_mport_db_filter {
245 struct list_head data_node;
246 struct list_head priv_node;
247 struct mport_cdev_priv *priv;
248 struct rio_doorbell_filter filter;
251 static LIST_HEAD(mport_devs);
252 static DEFINE_MUTEX(mport_devs_lock);
254 #if (0) /* used by commented out portion of poll function : FIXME */
255 static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
258 static struct class *dev_class;
259 static dev_t dev_number;
261 static struct workqueue_struct *dma_wq;
263 static void mport_release_mapping(struct kref *ref);
265 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
268 struct rio_mport *mport = priv->md->mport;
269 struct rio_mport_maint_io maint_io;
275 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
278 if ((maint_io.offset % 4) ||
279 (maint_io.length == 0) || (maint_io.length % 4) ||
280 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
283 buffer = vmalloc(maint_io.length);
286 length = maint_io.length/sizeof(u32);
287 offset = maint_io.offset;
289 for (i = 0; i < length; i++) {
291 ret = __rio_local_read_config_32(mport,
294 ret = rio_mport_read_config_32(mport, maint_io.rioid,
295 maint_io.hopcount, offset, &buffer[i]);
302 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
303 buffer, maint_io.length)))
310 static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
313 struct rio_mport *mport = priv->md->mport;
314 struct rio_mport_maint_io maint_io;
318 int ret = -EINVAL, i;
320 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
323 if ((maint_io.offset % 4) ||
324 (maint_io.length == 0) || (maint_io.length % 4) ||
325 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
328 buffer = vmalloc(maint_io.length);
331 length = maint_io.length;
333 if (unlikely(copy_from_user(buffer,
334 (void __user *)(uintptr_t)maint_io.buffer, length))) {
339 offset = maint_io.offset;
340 length /= sizeof(u32);
342 for (i = 0; i < length; i++) {
344 ret = __rio_local_write_config_32(mport,
347 ret = rio_mport_write_config_32(mport, maint_io.rioid,
363 * Inbound/outbound memory mapping functions
366 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
367 u16 rioid, u64 raddr, u32 size,
370 struct rio_mport *mport = md->mport;
371 struct rio_mport_mapping *map;
374 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
376 map = kzalloc(sizeof(*map), GFP_KERNEL);
380 ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
384 map->dir = MAP_OUTBOUND;
386 map->rio_addr = raddr;
388 map->phys_addr = *paddr;
391 kref_init(&map->ref);
392 list_add_tail(&map->node, &md->mappings);
400 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
401 u16 rioid, u64 raddr, u32 size,
404 struct rio_mport_mapping *map;
407 mutex_lock(&md->buf_mutex);
408 list_for_each_entry(map, &md->mappings, node) {
409 if (map->dir != MAP_OUTBOUND)
411 if (rioid == map->rioid &&
412 raddr == map->rio_addr && size == map->size) {
413 *paddr = map->phys_addr;
416 } else if (rioid == map->rioid &&
417 raddr < (map->rio_addr + map->size - 1) &&
418 (raddr + size) > map->rio_addr) {
424 /* If not found, create new */
426 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
428 mutex_unlock(&md->buf_mutex);
432 static int rio_mport_obw_map(struct file *filp, void __user *arg)
434 struct mport_cdev_priv *priv = filp->private_data;
435 struct mport_dev *data = priv->md;
440 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
443 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
444 map.rioid, map.rio_addr, map.length);
446 ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
447 map.rio_addr, map.length, &paddr);
449 rmcd_error("Failed to set OBW err= %d", ret);
455 if (unlikely(copy_to_user(arg, &map, sizeof(map))))
461 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
463 * @priv: driver private data
464 * @arg: buffer handle returned by allocation routine
466 static int rio_mport_obw_free(struct file *filp, void __user *arg)
468 struct mport_cdev_priv *priv = filp->private_data;
469 struct mport_dev *md = priv->md;
471 struct rio_mport_mapping *map, *_map;
473 if (!md->mport->ops->unmap_outb)
474 return -EPROTONOSUPPORT;
476 if (copy_from_user(&handle, arg, sizeof(handle)))
479 rmcd_debug(OBW, "h=0x%llx", handle);
481 mutex_lock(&md->buf_mutex);
482 list_for_each_entry_safe(map, _map, &md->mappings, node) {
483 if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
484 if (map->filp == filp) {
485 rmcd_debug(OBW, "kref_put h=0x%llx", handle);
487 kref_put(&map->ref, mport_release_mapping);
492 mutex_unlock(&md->buf_mutex);
498 * maint_hdid_set() - Set the host Device ID
499 * @priv: driver private data
502 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
504 struct mport_dev *md = priv->md;
507 if (copy_from_user(&hdid, arg, sizeof(hdid)))
510 md->mport->host_deviceid = hdid;
511 md->properties.hdid = hdid;
512 rio_local_set_device_id(md->mport, hdid);
514 rmcd_debug(MPORT, "Set host device Id to %d", hdid);
520 * maint_comptag_set() - Set the host Component Tag
521 * @priv: driver private data
522 * @arg: Component Tag
524 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
526 struct mport_dev *md = priv->md;
529 if (copy_from_user(&comptag, arg, sizeof(comptag)))
532 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
534 rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
539 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
541 struct mport_dma_req {
542 struct list_head node;
544 struct mport_cdev_priv *priv;
545 enum rio_transfer_sync sync;
547 struct page **page_list;
548 unsigned int nr_pages;
549 struct rio_mport_mapping *map;
550 struct dma_chan *dmach;
551 enum dma_data_direction dir;
553 enum dma_status status;
554 struct completion req_comp;
557 struct mport_faf_work {
558 struct work_struct work;
559 struct mport_dma_req *req;
562 static void mport_release_def_dma(struct kref *dma_ref)
564 struct mport_dev *md =
565 container_of(dma_ref, struct mport_dev, dma_ref);
567 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
568 rio_release_dma(md->dma_chan);
572 static void mport_release_dma(struct kref *dma_ref)
574 struct mport_cdev_priv *priv =
575 container_of(dma_ref, struct mport_cdev_priv, dma_ref);
577 rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
578 complete(&priv->comp);
581 static void dma_req_free(struct mport_dma_req *req)
583 struct mport_cdev_priv *priv = req->priv;
586 dma_unmap_sg(req->dmach->device->dev,
587 req->sgt.sgl, req->sgt.nents, req->dir);
588 sg_free_table(&req->sgt);
589 if (req->page_list) {
590 for (i = 0; i < req->nr_pages; i++)
591 put_page(req->page_list[i]);
592 kfree(req->page_list);
596 mutex_lock(&req->map->md->buf_mutex);
597 kref_put(&req->map->ref, mport_release_mapping);
598 mutex_unlock(&req->map->md->buf_mutex);
601 kref_put(&priv->dma_ref, mport_release_dma);
606 static void dma_xfer_callback(void *param)
608 struct mport_dma_req *req = (struct mport_dma_req *)param;
609 struct mport_cdev_priv *priv = req->priv;
611 req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
613 complete(&req->req_comp);
616 static void dma_faf_cleanup(struct work_struct *_work)
618 struct mport_faf_work *work = container_of(_work,
619 struct mport_faf_work, work);
620 struct mport_dma_req *req = work->req;
626 static void dma_faf_callback(void *param)
628 struct mport_dma_req *req = (struct mport_dma_req *)param;
629 struct mport_faf_work *work;
631 work = kmalloc(sizeof(*work), GFP_ATOMIC);
635 INIT_WORK(&work->work, dma_faf_cleanup);
637 queue_work(dma_wq, &work->work);
641 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
643 * Returns pointer to DMA transaction descriptor allocated by DMA driver on
644 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
645 * non-NULL pointer using IS_ERR macro.
647 static struct dma_async_tx_descriptor
648 *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
649 struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
650 enum dma_ctrl_flags flags)
652 struct rio_dma_data tx_data;
654 tx_data.sg = sgt->sgl;
655 tx_data.sg_len = nents;
656 tx_data.rio_addr_u = 0;
657 tx_data.rio_addr = transfer->rio_addr;
658 if (dir == DMA_MEM_TO_DEV) {
659 switch (transfer->method) {
660 case RIO_EXCHANGE_NWRITE:
661 tx_data.wr_type = RDW_ALL_NWRITE;
663 case RIO_EXCHANGE_NWRITE_R_ALL:
664 tx_data.wr_type = RDW_ALL_NWRITE_R;
666 case RIO_EXCHANGE_NWRITE_R:
667 tx_data.wr_type = RDW_LAST_NWRITE_R;
669 case RIO_EXCHANGE_DEFAULT:
670 tx_data.wr_type = RDW_DEFAULT;
673 return ERR_PTR(-EINVAL);
677 return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
680 /* Request DMA channel associated with this mport device.
681 * Try to request DMA channel for every new process that opened given
682 * mport. If a new DMA channel is not available use default channel
683 * which is the first DMA channel opened on mport device.
685 static int get_dma_channel(struct mport_cdev_priv *priv)
687 mutex_lock(&priv->dma_lock);
689 priv->dmach = rio_request_mport_dma(priv->md->mport);
691 /* Use default DMA channel if available */
692 if (priv->md->dma_chan) {
693 priv->dmach = priv->md->dma_chan;
694 kref_get(&priv->md->dma_ref);
696 rmcd_error("Failed to get DMA channel");
697 mutex_unlock(&priv->dma_lock);
700 } else if (!priv->md->dma_chan) {
701 /* Register default DMA channel if we do not have one */
702 priv->md->dma_chan = priv->dmach;
703 kref_init(&priv->md->dma_ref);
704 rmcd_debug(DMA, "Register DMA_chan %d as default",
705 priv->dmach->chan_id);
708 kref_init(&priv->dma_ref);
709 init_completion(&priv->comp);
712 kref_get(&priv->dma_ref);
713 mutex_unlock(&priv->dma_lock);
717 static void put_dma_channel(struct mport_cdev_priv *priv)
719 kref_put(&priv->dma_ref, mport_release_dma);
723 * DMA transfer functions
725 static int do_dma_request(struct mport_dma_req *req,
726 struct rio_transfer_io *xfer,
727 enum rio_transfer_sync sync, int nents)
729 struct mport_cdev_priv *priv;
730 struct sg_table *sgt;
731 struct dma_chan *chan;
732 struct dma_async_tx_descriptor *tx;
734 unsigned long tmo = msecs_to_jiffies(dma_timeout);
735 enum dma_transfer_direction dir;
743 dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
745 rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
746 current->comm, task_pid_nr(current),
747 dev_name(&chan->dev->device),
748 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
750 /* Initialize DMA transaction request */
751 tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
752 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
755 rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
756 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
757 xfer->rio_addr, xfer->length);
760 } else if (IS_ERR(tx)) {
762 rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
763 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
764 xfer->rio_addr, xfer->length);
768 if (sync == RIO_TRANSFER_FAF)
769 tx->callback = dma_faf_callback;
771 tx->callback = dma_xfer_callback;
772 tx->callback_param = req;
776 req->status = DMA_IN_PROGRESS;
777 init_completion(&req->req_comp);
779 cookie = dmaengine_submit(tx);
780 req->cookie = cookie;
782 rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
783 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
785 if (dma_submit_error(cookie)) {
786 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
787 cookie, xfer->rio_addr, xfer->length);
792 dma_async_issue_pending(chan);
794 if (sync == RIO_TRANSFER_ASYNC) {
795 spin_lock(&priv->req_lock);
796 list_add_tail(&req->node, &priv->async_list);
797 spin_unlock(&priv->req_lock);
799 } else if (sync == RIO_TRANSFER_FAF)
802 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
805 /* Timeout on wait occurred */
806 rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
807 current->comm, task_pid_nr(current),
808 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
810 } else if (wret == -ERESTARTSYS) {
811 /* Wait_for_completion was interrupted by a signal but DMA may
814 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
815 current->comm, task_pid_nr(current),
816 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
820 if (req->status != DMA_COMPLETE) {
821 /* DMA transaction completion was signaled with error */
822 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
823 current->comm, task_pid_nr(current),
824 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
825 cookie, req->status, ret);
834 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
835 * the remote RapidIO device
836 * @filp: file pointer associated with the call
837 * @transfer_mode: DMA transfer mode
838 * @sync: synchronization mode
839 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
840 * DMA_DEV_TO_MEM = read)
841 * @xfer: data transfer descriptor structure
844 rio_dma_transfer(struct file *filp, u32 transfer_mode,
845 enum rio_transfer_sync sync, enum dma_data_direction dir,
846 struct rio_transfer_io *xfer)
848 struct mport_cdev_priv *priv = filp->private_data;
849 unsigned long nr_pages = 0;
850 struct page **page_list = NULL;
851 struct mport_dma_req *req;
852 struct mport_dev *md = priv->md;
853 struct dma_chan *chan;
857 if (xfer->length == 0)
859 req = kzalloc(sizeof(*req), GFP_KERNEL);
863 ret = get_dma_channel(priv);
870 * If parameter loc_addr != NULL, we are transferring data from/to
871 * data buffer allocated in user-space: lock in memory user-space
872 * buffer pages and build an SG table for DMA transfer request
874 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
875 * used for DMA data transfers: build single entry SG table using
876 * offset within the internal buffer specified by handle parameter.
878 if (xfer->loc_addr) {
879 unsigned long offset;
882 offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
883 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
885 page_list = kmalloc_array(nr_pages,
886 sizeof(*page_list), GFP_KERNEL);
887 if (page_list == NULL) {
892 pinned = get_user_pages_unlocked(
893 (unsigned long)xfer->loc_addr & PAGE_MASK,
896 dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0);
898 if (pinned != nr_pages) {
900 rmcd_error("get_user_pages_unlocked err=%ld",
904 rmcd_error("pinned %ld out of %ld pages",
907 * Set nr_pages up to mean "how many pages to unpin, in
916 ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
917 offset, xfer->length, GFP_KERNEL);
919 rmcd_error("sg_alloc_table failed with err=%d", ret);
923 req->page_list = page_list;
924 req->nr_pages = nr_pages;
927 struct rio_mport_mapping *map;
929 baddr = (dma_addr_t)xfer->handle;
931 mutex_lock(&md->buf_mutex);
932 list_for_each_entry(map, &md->mappings, node) {
933 if (baddr >= map->phys_addr &&
934 baddr < (map->phys_addr + map->size)) {
940 mutex_unlock(&md->buf_mutex);
942 if (req->map == NULL) {
947 if (xfer->length + xfer->offset > map->size) {
952 ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
954 rmcd_error("sg_alloc_table failed for internal buf");
958 sg_set_buf(req->sgt.sgl,
959 map->virt_addr + (baddr - map->phys_addr) +
960 xfer->offset, xfer->length);
968 nents = dma_map_sg(chan->device->dev,
969 req->sgt.sgl, req->sgt.nents, dir);
970 if (nents == -EFAULT) {
971 rmcd_error("Failed to map SG list");
976 ret = do_dma_request(req, xfer, sync, nents);
979 if (sync == RIO_TRANSFER_SYNC)
981 return ret; /* return ASYNC cookie */
984 if (ret == -ETIMEDOUT || ret == -EINTR) {
986 * This can happen only in case of SYNC transfer.
987 * Do not free unfinished request structure immediately.
988 * Place it into pending list and deal with it later
990 spin_lock(&priv->req_lock);
991 list_add_tail(&req->node, &priv->pend_list);
992 spin_unlock(&priv->req_lock);
997 rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
999 dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir);
1000 sg_free_table(&req->sgt);
1003 for (i = 0; i < nr_pages; i++)
1004 put_page(page_list[i]);
1009 mutex_lock(&md->buf_mutex);
1010 kref_put(&req->map->ref, mport_release_mapping);
1011 mutex_unlock(&md->buf_mutex);
1013 put_dma_channel(priv);
1018 static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
1020 struct mport_cdev_priv *priv = filp->private_data;
1021 struct rio_transaction transaction;
1022 struct rio_transfer_io *transfer;
1023 enum dma_data_direction dir;
1026 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
1029 if (transaction.count != 1) /* only single transfer for now */
1032 if ((transaction.transfer_mode &
1033 priv->md->properties.transfer_mode) == 0)
1036 transfer = vmalloc(transaction.count * sizeof(*transfer));
1040 if (unlikely(copy_from_user(transfer,
1041 (void __user *)(uintptr_t)transaction.block,
1042 transaction.count * sizeof(*transfer)))) {
1047 dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
1048 DMA_FROM_DEVICE : DMA_TO_DEVICE;
1049 for (i = 0; i < transaction.count && ret == 0; i++)
1050 ret = rio_dma_transfer(filp, transaction.transfer_mode,
1051 transaction.sync, dir, &transfer[i]);
1053 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
1055 transaction.count * sizeof(*transfer))))
1064 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1066 struct mport_cdev_priv *priv;
1067 struct mport_dev *md;
1068 struct rio_async_tx_wait w_param;
1069 struct mport_dma_req *req;
1070 dma_cookie_t cookie;
1076 priv = (struct mport_cdev_priv *)filp->private_data;
1079 if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1082 cookie = w_param.token;
1083 if (w_param.timeout)
1084 tmo = msecs_to_jiffies(w_param.timeout);
1085 else /* Use default DMA timeout */
1086 tmo = msecs_to_jiffies(dma_timeout);
1088 spin_lock(&priv->req_lock);
1089 list_for_each_entry(req, &priv->async_list, node) {
1090 if (req->cookie == cookie) {
1091 list_del(&req->node);
1096 spin_unlock(&priv->req_lock);
1101 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1104 /* Timeout on wait occurred */
1105 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1106 current->comm, task_pid_nr(current),
1107 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1110 } else if (wret == -ERESTARTSYS) {
1111 /* Wait_for_completion was interrupted by a signal but DMA may
1112 * be still in progress
1114 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1115 current->comm, task_pid_nr(current),
1116 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1121 if (req->status != DMA_COMPLETE) {
1122 /* DMA transaction completion signaled with transfer error */
1123 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1124 current->comm, task_pid_nr(current),
1125 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1131 if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
1137 /* Return request back into async queue */
1138 spin_lock(&priv->req_lock);
1139 list_add_tail(&req->node, &priv->async_list);
1140 spin_unlock(&priv->req_lock);
1144 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1145 u64 size, struct rio_mport_mapping **mapping)
1147 struct rio_mport_mapping *map;
1149 map = kzalloc(sizeof(*map), GFP_KERNEL);
1153 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1154 &map->phys_addr, GFP_KERNEL);
1155 if (map->virt_addr == NULL) {
1164 kref_init(&map->ref);
1165 mutex_lock(&md->buf_mutex);
1166 list_add_tail(&map->node, &md->mappings);
1167 mutex_unlock(&md->buf_mutex);
1173 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1175 struct mport_cdev_priv *priv = filp->private_data;
1176 struct mport_dev *md = priv->md;
1177 struct rio_dma_mem map;
1178 struct rio_mport_mapping *mapping = NULL;
1181 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1184 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1188 map.dma_handle = mapping->phys_addr;
1190 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1191 mutex_lock(&md->buf_mutex);
1192 kref_put(&mapping->ref, mport_release_mapping);
1193 mutex_unlock(&md->buf_mutex);
1200 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1202 struct mport_cdev_priv *priv = filp->private_data;
1203 struct mport_dev *md = priv->md;
1206 struct rio_mport_mapping *map, *_map;
1208 if (copy_from_user(&handle, arg, sizeof(handle)))
1210 rmcd_debug(EXIT, "filp=%p", filp);
1212 mutex_lock(&md->buf_mutex);
1213 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1214 if (map->dir == MAP_DMA && map->phys_addr == handle &&
1215 map->filp == filp) {
1216 kref_put(&map->ref, mport_release_mapping);
1221 mutex_unlock(&md->buf_mutex);
1223 if (ret == -EFAULT) {
1224 rmcd_debug(DMA, "ERR no matching mapping");
1231 static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1236 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1241 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1246 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1250 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1253 * Inbound/outbound memory mapping functions
1257 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1258 u64 raddr, u64 size,
1259 struct rio_mport_mapping **mapping)
1261 struct rio_mport *mport = md->mport;
1262 struct rio_mport_mapping *map;
1265 /* rio_map_inb_region() accepts u32 size */
1266 if (size > 0xffffffff)
1269 map = kzalloc(sizeof(*map), GFP_KERNEL);
1273 map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1274 &map->phys_addr, GFP_KERNEL);
1275 if (map->virt_addr == NULL) {
1280 if (raddr == RIO_MAP_ANY_ADDR)
1281 raddr = map->phys_addr;
1282 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1286 map->dir = MAP_INBOUND;
1287 map->rio_addr = raddr;
1291 kref_init(&map->ref);
1292 mutex_lock(&md->buf_mutex);
1293 list_add_tail(&map->node, &md->mappings);
1294 mutex_unlock(&md->buf_mutex);
1299 dma_free_coherent(mport->dev.parent, size,
1300 map->virt_addr, map->phys_addr);
1307 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1308 u64 raddr, u64 size,
1309 struct rio_mport_mapping **mapping)
1311 struct rio_mport_mapping *map;
1314 if (raddr == RIO_MAP_ANY_ADDR)
1317 mutex_lock(&md->buf_mutex);
1318 list_for_each_entry(map, &md->mappings, node) {
1319 if (map->dir != MAP_INBOUND)
1321 if (raddr == map->rio_addr && size == map->size) {
1322 /* allow exact match only */
1326 } else if (raddr < (map->rio_addr + map->size - 1) &&
1327 (raddr + size) > map->rio_addr) {
1332 mutex_unlock(&md->buf_mutex);
1337 /* not found, create new */
1338 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1341 static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1343 struct mport_cdev_priv *priv = filp->private_data;
1344 struct mport_dev *md = priv->md;
1345 struct rio_mmap map;
1346 struct rio_mport_mapping *mapping = NULL;
1349 if (!md->mport->ops->map_inb)
1350 return -EPROTONOSUPPORT;
1351 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1354 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1356 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1357 map.length, &mapping);
1361 map.handle = mapping->phys_addr;
1362 map.rio_addr = mapping->rio_addr;
1364 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1365 /* Delete mapping if it was created by this request */
1366 if (ret == 0 && mapping->filp == filp) {
1367 mutex_lock(&md->buf_mutex);
1368 kref_put(&mapping->ref, mport_release_mapping);
1369 mutex_unlock(&md->buf_mutex);
1378 * rio_mport_inbound_free() - unmap from RapidIO address space and free
1379 * previously allocated inbound DMA coherent buffer
1380 * @priv: driver private data
1381 * @arg: buffer handle returned by allocation routine
1383 static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1385 struct mport_cdev_priv *priv = filp->private_data;
1386 struct mport_dev *md = priv->md;
1388 struct rio_mport_mapping *map, *_map;
1390 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1392 if (!md->mport->ops->unmap_inb)
1393 return -EPROTONOSUPPORT;
1395 if (copy_from_user(&handle, arg, sizeof(handle)))
1398 mutex_lock(&md->buf_mutex);
1399 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1400 if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1401 if (map->filp == filp) {
1403 kref_put(&map->ref, mport_release_mapping);
1408 mutex_unlock(&md->buf_mutex);
1414 * maint_port_idx_get() - Get the port index of the mport instance
1415 * @priv: driver private data
1418 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1420 struct mport_dev *md = priv->md;
1421 u32 port_idx = md->mport->index;
1423 rmcd_debug(MPORT, "port_index=%d", port_idx);
1425 if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1431 static int rio_mport_add_event(struct mport_cdev_priv *priv,
1432 struct rio_event *event)
1436 if (!(priv->event_mask & event->header))
1439 spin_lock(&priv->fifo_lock);
1440 overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1441 || kfifo_in(&priv->event_fifo, (unsigned char *)event,
1442 sizeof(*event)) != sizeof(*event);
1443 spin_unlock(&priv->fifo_lock);
1445 wake_up_interruptible(&priv->event_rx_wait);
1448 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1455 static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1456 u16 src, u16 dst, u16 info)
1458 struct mport_dev *data = dev_id;
1459 struct mport_cdev_priv *priv;
1460 struct rio_mport_db_filter *db_filter;
1461 struct rio_event event;
1464 event.header = RIO_DOORBELL;
1465 event.u.doorbell.rioid = src;
1466 event.u.doorbell.payload = info;
1469 spin_lock(&data->db_lock);
1470 list_for_each_entry(db_filter, &data->doorbells, data_node) {
1471 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1472 db_filter->filter.rioid == src)) &&
1473 info >= db_filter->filter.low &&
1474 info <= db_filter->filter.high) {
1475 priv = db_filter->priv;
1476 rio_mport_add_event(priv, &event);
1480 spin_unlock(&data->db_lock);
1483 dev_warn(&data->dev,
1484 "%s: spurious DB received from 0x%x, info=0x%04x\n",
1485 __func__, src, info);
1488 static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1491 struct mport_dev *md = priv->md;
1492 struct rio_mport_db_filter *db_filter;
1493 struct rio_doorbell_filter filter;
1494 unsigned long flags;
1497 if (copy_from_user(&filter, arg, sizeof(filter)))
1500 if (filter.low > filter.high)
1503 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1504 rio_mport_doorbell_handler);
1506 rmcd_error("%s failed to register IBDB, err=%d",
1507 dev_name(&md->dev), ret);
1511 db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1512 if (db_filter == NULL) {
1513 rio_release_inb_dbell(md->mport, filter.low, filter.high);
1517 db_filter->filter = filter;
1518 db_filter->priv = priv;
1519 spin_lock_irqsave(&md->db_lock, flags);
1520 list_add_tail(&db_filter->priv_node, &priv->db_filters);
1521 list_add_tail(&db_filter->data_node, &md->doorbells);
1522 spin_unlock_irqrestore(&md->db_lock, flags);
1527 static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1529 list_del(&db_filter->data_node);
1530 list_del(&db_filter->priv_node);
1534 static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1537 struct rio_mport_db_filter *db_filter;
1538 struct rio_doorbell_filter filter;
1539 unsigned long flags;
1542 if (copy_from_user(&filter, arg, sizeof(filter)))
1545 if (filter.low > filter.high)
1548 spin_lock_irqsave(&priv->md->db_lock, flags);
1549 list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1550 if (db_filter->filter.rioid == filter.rioid &&
1551 db_filter->filter.low == filter.low &&
1552 db_filter->filter.high == filter.high) {
1553 rio_mport_delete_db_filter(db_filter);
1558 spin_unlock_irqrestore(&priv->md->db_lock, flags);
1561 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1566 static int rio_mport_match_pw(union rio_pw_msg *msg,
1567 struct rio_pw_filter *filter)
1569 if ((msg->em.comptag & filter->mask) < filter->low ||
1570 (msg->em.comptag & filter->mask) > filter->high)
1575 static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1576 union rio_pw_msg *msg, int step)
1578 struct mport_dev *md = context;
1579 struct mport_cdev_priv *priv;
1580 struct rio_mport_pw_filter *pw_filter;
1581 struct rio_event event;
1584 event.header = RIO_PORTWRITE;
1585 memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1588 spin_lock(&md->pw_lock);
1589 list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1590 if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1591 priv = pw_filter->priv;
1592 rio_mport_add_event(priv, &event);
1596 spin_unlock(&md->pw_lock);
1599 printk_ratelimited(KERN_WARNING DRV_NAME
1600 ": mport%d received spurious PW from 0x%08x\n",
1601 mport->id, msg->em.comptag);
1607 static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1610 struct mport_dev *md = priv->md;
1611 struct rio_mport_pw_filter *pw_filter;
1612 struct rio_pw_filter filter;
1613 unsigned long flags;
1616 if (copy_from_user(&filter, arg, sizeof(filter)))
1619 pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1620 if (pw_filter == NULL)
1623 pw_filter->filter = filter;
1624 pw_filter->priv = priv;
1625 spin_lock_irqsave(&md->pw_lock, flags);
1626 if (list_empty(&md->portwrites))
1628 list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1629 list_add_tail(&pw_filter->md_node, &md->portwrites);
1630 spin_unlock_irqrestore(&md->pw_lock, flags);
1635 ret = rio_add_mport_pw_handler(md->mport, md,
1636 rio_mport_pw_handler);
1639 "%s: failed to add IB_PW handler, err=%d\n",
1643 rio_pw_enable(md->mport, 1);
1649 static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1651 list_del(&pw_filter->md_node);
1652 list_del(&pw_filter->priv_node);
1656 static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1657 struct rio_pw_filter *b)
1659 if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1664 static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1667 struct mport_dev *md = priv->md;
1668 struct rio_mport_pw_filter *pw_filter;
1669 struct rio_pw_filter filter;
1670 unsigned long flags;
1674 if (copy_from_user(&filter, arg, sizeof(filter)))
1677 spin_lock_irqsave(&md->pw_lock, flags);
1678 list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1679 if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1680 rio_mport_delete_pw_filter(pw_filter);
1686 if (list_empty(&md->portwrites))
1688 spin_unlock_irqrestore(&md->pw_lock, flags);
1691 rio_del_mport_pw_handler(md->mport, priv->md,
1692 rio_mport_pw_handler);
1693 rio_pw_enable(md->mport, 0);
1700 * rio_release_dev - release routine for kernel RIO device object
1701 * @dev: kernel device object associated with a RIO device structure
1703 * Frees a RIO device struct associated a RIO device struct.
1704 * The RIO device struct is freed.
1706 static void rio_release_dev(struct device *dev)
1708 struct rio_dev *rdev;
1710 rdev = to_rio_dev(dev);
1711 pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1716 static void rio_release_net(struct device *dev)
1718 struct rio_net *net;
1720 net = to_rio_net(dev);
1721 rmcd_debug(RDEV, "net_%d", net->id);
1727 * rio_mport_add_riodev - creates a kernel RIO device object
1729 * Allocates a RIO device data structure and initializes required fields based
1730 * on device's configuration space contents.
1731 * If the device has switch capabilities, then a switch specific portion is
1732 * allocated and configured.
1734 static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1737 struct mport_dev *md = priv->md;
1738 struct rio_rdev_info dev_info;
1739 struct rio_dev *rdev;
1740 struct rio_switch *rswitch = NULL;
1741 struct rio_mport *mport;
1750 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1752 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1754 rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1755 dev_info.comptag, dev_info.destid, dev_info.hopcount);
1757 dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
1759 rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1764 size = sizeof(*rdev);
1766 destid = dev_info.destid;
1767 hopcount = dev_info.hopcount;
1769 if (rio_mport_read_config_32(mport, destid, hopcount,
1770 RIO_PEF_CAR, &rval))
1773 if (rval & RIO_PEF_SWITCH) {
1774 rio_mport_read_config_32(mport, destid, hopcount,
1775 RIO_SWP_INFO_CAR, &swpinfo);
1776 size += (RIO_GET_TOTAL_PORTS(swpinfo) *
1777 sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
1780 rdev = kzalloc(size, GFP_KERNEL);
1784 if (mport->net == NULL) {
1785 struct rio_net *net;
1787 net = rio_alloc_net(mport);
1790 rmcd_debug(RDEV, "failed to allocate net object");
1794 net->id = mport->id;
1796 dev_set_name(&net->dev, "rnet_%d", net->id);
1797 net->dev.parent = &mport->dev;
1798 net->dev.release = rio_release_net;
1799 err = rio_add_net(net);
1801 rmcd_debug(RDEV, "failed to register net, err=%d", err);
1807 rdev->net = mport->net;
1809 rdev->swpinfo = swpinfo;
1810 rio_mport_read_config_32(mport, destid, hopcount,
1811 RIO_DEV_ID_CAR, &rval);
1812 rdev->did = rval >> 16;
1813 rdev->vid = rval & 0xffff;
1814 rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1816 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1818 rdev->asm_did = rval >> 16;
1819 rdev->asm_vid = rval & 0xffff;
1820 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1822 rdev->asm_rev = rval >> 16;
1824 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1825 rdev->efptr = rval & 0xffff;
1826 rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
1827 hopcount, &rdev->phys_rmap);
1829 rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1830 hopcount, RIO_EFB_ERR_MGMNT);
1833 rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1835 rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1838 rdev->comp_tag = dev_info.comptag;
1839 rdev->destid = destid;
1840 /* hopcount is stored as specified by a caller, regardles of EP or SW */
1841 rdev->hopcount = hopcount;
1843 if (rdev->pef & RIO_PEF_SWITCH) {
1844 rswitch = rdev->rswitch;
1845 rswitch->route_table = NULL;
1848 if (strlen(dev_info.name))
1849 dev_set_name(&rdev->dev, "%s", dev_info.name);
1850 else if (rdev->pef & RIO_PEF_SWITCH)
1851 dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1852 rdev->comp_tag & RIO_CTAG_UDEVID);
1854 dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1855 rdev->comp_tag & RIO_CTAG_UDEVID);
1857 INIT_LIST_HEAD(&rdev->net_list);
1858 rdev->dev.parent = &mport->net->dev;
1859 rio_attach_device(rdev);
1860 rdev->dev.release = rio_release_dev;
1862 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1863 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1865 err = rio_add_device(rdev);
1876 static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1878 struct rio_rdev_info dev_info;
1879 struct rio_dev *rdev = NULL;
1881 struct rio_mport *mport;
1882 struct rio_net *net;
1884 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1886 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1888 mport = priv->md->mport;
1890 /* If device name is specified, removal by name has priority */
1891 if (strlen(dev_info.name)) {
1892 dev = bus_find_device_by_name(&rio_bus_type, NULL,
1895 rdev = to_rio_dev(dev);
1898 rdev = rio_get_comptag(dev_info.comptag, rdev);
1899 if (rdev && rdev->dev.parent == &mport->net->dev &&
1900 rdev->destid == dev_info.destid &&
1901 rdev->hopcount == dev_info.hopcount)
1908 "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1909 dev_info.name, dev_info.comptag, dev_info.destid,
1916 rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1918 if (list_empty(&net->devices)) {
1927 * Mport cdev management
1931 * mport_cdev_open() - Open character device (mport)
1933 static int mport_cdev_open(struct inode *inode, struct file *filp)
1936 int minor = iminor(inode);
1937 struct mport_dev *chdev;
1938 struct mport_cdev_priv *priv;
1940 /* Test for valid device */
1941 if (minor >= RIO_MAX_MPORTS) {
1942 rmcd_error("Invalid minor device number");
1946 chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1948 rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1950 if (atomic_read(&chdev->active) == 0)
1953 get_device(&chdev->dev);
1955 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1957 put_device(&chdev->dev);
1963 mutex_lock(&chdev->file_mutex);
1964 list_add_tail(&priv->list, &chdev->file_list);
1965 mutex_unlock(&chdev->file_mutex);
1967 INIT_LIST_HEAD(&priv->db_filters);
1968 INIT_LIST_HEAD(&priv->pw_filters);
1969 spin_lock_init(&priv->fifo_lock);
1970 init_waitqueue_head(&priv->event_rx_wait);
1971 ret = kfifo_alloc(&priv->event_fifo,
1972 sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1975 dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1980 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1981 INIT_LIST_HEAD(&priv->async_list);
1982 INIT_LIST_HEAD(&priv->pend_list);
1983 spin_lock_init(&priv->req_lock);
1984 mutex_init(&priv->dma_lock);
1987 filp->private_data = priv;
1995 static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1997 struct mport_cdev_priv *priv = filp->private_data;
1999 return fasync_helper(fd, filp, mode, &priv->async_queue);
2002 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2003 static void mport_cdev_release_dma(struct file *filp)
2005 struct mport_cdev_priv *priv = filp->private_data;
2006 struct mport_dev *md;
2007 struct mport_dma_req *req, *req_next;
2008 unsigned long tmo = msecs_to_jiffies(dma_timeout);
2012 rmcd_debug(EXIT, "from filp=%p %s(%d)",
2013 filp, current->comm, task_pid_nr(current));
2016 rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
2022 flush_workqueue(dma_wq);
2024 spin_lock(&priv->req_lock);
2025 if (!list_empty(&priv->async_list)) {
2026 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
2027 filp, current->comm, task_pid_nr(current));
2028 list_splice_init(&priv->async_list, &list);
2030 spin_unlock(&priv->req_lock);
2032 if (!list_empty(&list)) {
2033 rmcd_debug(EXIT, "temp list not empty");
2034 list_for_each_entry_safe(req, req_next, &list, node) {
2035 rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
2036 req->filp, req->cookie,
2037 completion_done(&req->req_comp)?"yes":"no");
2038 list_del(&req->node);
2043 if (!list_empty(&priv->pend_list)) {
2044 rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)",
2045 filp, current->comm, task_pid_nr(current));
2046 list_for_each_entry_safe(req,
2047 req_next, &priv->pend_list, node) {
2048 rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
2049 req->filp, req->cookie,
2050 completion_done(&req->req_comp)?"yes":"no");
2051 list_del(&req->node);
2056 put_dma_channel(priv);
2057 wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
2060 rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
2061 current->comm, task_pid_nr(current), wret);
2064 spin_lock(&priv->req_lock);
2066 if (!list_empty(&priv->pend_list)) {
2067 rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)",
2068 filp, current->comm, task_pid_nr(current));
2071 spin_unlock(&priv->req_lock);
2073 if (priv->dmach != priv->md->dma_chan) {
2074 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
2075 filp, current->comm, task_pid_nr(current));
2076 rio_release_dma(priv->dmach);
2078 rmcd_debug(EXIT, "Adjust default DMA channel refcount");
2079 kref_put(&md->dma_ref, mport_release_def_dma);
2085 #define mport_cdev_release_dma(priv) do {} while (0)
2089 * mport_cdev_release() - Release character device
2091 static int mport_cdev_release(struct inode *inode, struct file *filp)
2093 struct mport_cdev_priv *priv = filp->private_data;
2094 struct mport_dev *chdev;
2095 struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
2096 struct rio_mport_db_filter *db_filter, *db_filter_next;
2097 struct rio_mport_mapping *map, *_map;
2098 unsigned long flags;
2100 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2103 mport_cdev_release_dma(filp);
2105 priv->event_mask = 0;
2107 spin_lock_irqsave(&chdev->pw_lock, flags);
2108 if (!list_empty(&priv->pw_filters)) {
2109 list_for_each_entry_safe(pw_filter, pw_filter_next,
2110 &priv->pw_filters, priv_node)
2111 rio_mport_delete_pw_filter(pw_filter);
2113 spin_unlock_irqrestore(&chdev->pw_lock, flags);
2115 spin_lock_irqsave(&chdev->db_lock, flags);
2116 list_for_each_entry_safe(db_filter, db_filter_next,
2117 &priv->db_filters, priv_node) {
2118 rio_mport_delete_db_filter(db_filter);
2120 spin_unlock_irqrestore(&chdev->db_lock, flags);
2122 kfifo_free(&priv->event_fifo);
2124 mutex_lock(&chdev->buf_mutex);
2125 list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2126 if (map->filp == filp) {
2127 rmcd_debug(EXIT, "release mapping %p filp=%p",
2128 map->virt_addr, filp);
2129 kref_put(&map->ref, mport_release_mapping);
2132 mutex_unlock(&chdev->buf_mutex);
2134 mport_cdev_fasync(-1, filp, 0);
2135 filp->private_data = NULL;
2136 mutex_lock(&chdev->file_mutex);
2137 list_del(&priv->list);
2138 mutex_unlock(&chdev->file_mutex);
2139 put_device(&chdev->dev);
2145 * mport_cdev_ioctl() - IOCTLs for character device
2147 static long mport_cdev_ioctl(struct file *filp,
2148 unsigned int cmd, unsigned long arg)
2151 struct mport_cdev_priv *data = filp->private_data;
2152 struct mport_dev *md = data->md;
2154 if (atomic_read(&md->active) == 0)
2158 case RIO_MPORT_MAINT_READ_LOCAL:
2159 return rio_mport_maint_rd(data, (void __user *)arg, 1);
2160 case RIO_MPORT_MAINT_WRITE_LOCAL:
2161 return rio_mport_maint_wr(data, (void __user *)arg, 1);
2162 case RIO_MPORT_MAINT_READ_REMOTE:
2163 return rio_mport_maint_rd(data, (void __user *)arg, 0);
2164 case RIO_MPORT_MAINT_WRITE_REMOTE:
2165 return rio_mport_maint_wr(data, (void __user *)arg, 0);
2166 case RIO_MPORT_MAINT_HDID_SET:
2167 return maint_hdid_set(data, (void __user *)arg);
2168 case RIO_MPORT_MAINT_COMPTAG_SET:
2169 return maint_comptag_set(data, (void __user *)arg);
2170 case RIO_MPORT_MAINT_PORT_IDX_GET:
2171 return maint_port_idx_get(data, (void __user *)arg);
2172 case RIO_MPORT_GET_PROPERTIES:
2173 md->properties.hdid = md->mport->host_deviceid;
2174 if (copy_to_user((void __user *)arg, &(md->properties),
2175 sizeof(md->properties)))
2178 case RIO_ENABLE_DOORBELL_RANGE:
2179 return rio_mport_add_db_filter(data, (void __user *)arg);
2180 case RIO_DISABLE_DOORBELL_RANGE:
2181 return rio_mport_remove_db_filter(data, (void __user *)arg);
2182 case RIO_ENABLE_PORTWRITE_RANGE:
2183 return rio_mport_add_pw_filter(data, (void __user *)arg);
2184 case RIO_DISABLE_PORTWRITE_RANGE:
2185 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2186 case RIO_SET_EVENT_MASK:
2187 data->event_mask = (u32)arg;
2189 case RIO_GET_EVENT_MASK:
2190 if (copy_to_user((void __user *)arg, &data->event_mask,
2194 case RIO_MAP_OUTBOUND:
2195 return rio_mport_obw_map(filp, (void __user *)arg);
2196 case RIO_MAP_INBOUND:
2197 return rio_mport_map_inbound(filp, (void __user *)arg);
2198 case RIO_UNMAP_OUTBOUND:
2199 return rio_mport_obw_free(filp, (void __user *)arg);
2200 case RIO_UNMAP_INBOUND:
2201 return rio_mport_inbound_free(filp, (void __user *)arg);
2203 return rio_mport_alloc_dma(filp, (void __user *)arg);
2205 return rio_mport_free_dma(filp, (void __user *)arg);
2206 case RIO_WAIT_FOR_ASYNC:
2207 return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2209 return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2211 return rio_mport_add_riodev(data, (void __user *)arg);
2213 return rio_mport_del_riodev(data, (void __user *)arg);
2222 * mport_release_mapping - free mapping resources and info structure
2223 * @ref: a pointer to the kref within struct rio_mport_mapping
2225 * NOTE: Shall be called while holding buf_mutex.
2227 static void mport_release_mapping(struct kref *ref)
2229 struct rio_mport_mapping *map =
2230 container_of(ref, struct rio_mport_mapping, ref);
2231 struct rio_mport *mport = map->md->mport;
2233 rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2234 map->dir, map->virt_addr,
2235 &map->phys_addr, mport->name);
2237 list_del(&map->node);
2241 rio_unmap_inb_region(mport, map->phys_addr);
2243 dma_free_coherent(mport->dev.parent, map->size,
2244 map->virt_addr, map->phys_addr);
2247 rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2253 static void mport_mm_open(struct vm_area_struct *vma)
2255 struct rio_mport_mapping *map = vma->vm_private_data;
2257 rmcd_debug(MMAP, "%pad", &map->phys_addr);
2258 kref_get(&map->ref);
2261 static void mport_mm_close(struct vm_area_struct *vma)
2263 struct rio_mport_mapping *map = vma->vm_private_data;
2265 rmcd_debug(MMAP, "%pad", &map->phys_addr);
2266 mutex_lock(&map->md->buf_mutex);
2267 kref_put(&map->ref, mport_release_mapping);
2268 mutex_unlock(&map->md->buf_mutex);
2271 static const struct vm_operations_struct vm_ops = {
2272 .open = mport_mm_open,
2273 .close = mport_mm_close,
2276 static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2278 struct mport_cdev_priv *priv = filp->private_data;
2279 struct mport_dev *md;
2280 size_t size = vma->vm_end - vma->vm_start;
2282 unsigned long offset;
2284 struct rio_mport_mapping *map;
2286 rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2287 (unsigned int)size, vma->vm_pgoff);
2290 baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2292 mutex_lock(&md->buf_mutex);
2293 list_for_each_entry(map, &md->mappings, node) {
2294 if (baddr >= map->phys_addr &&
2295 baddr < (map->phys_addr + map->size)) {
2300 mutex_unlock(&md->buf_mutex);
2305 offset = baddr - map->phys_addr;
2307 if (size + offset > map->size)
2310 vma->vm_pgoff = offset >> PAGE_SHIFT;
2311 rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2313 if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2314 ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2315 map->virt_addr, map->phys_addr, map->size);
2316 else if (map->dir == MAP_OUTBOUND) {
2317 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2318 ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2320 rmcd_error("Attempt to mmap unsupported mapping type");
2325 vma->vm_private_data = map;
2326 vma->vm_ops = &vm_ops;
2329 rmcd_error("MMAP exit with err=%d", ret);
2335 static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait)
2337 struct mport_cdev_priv *priv = filp->private_data;
2339 poll_wait(filp, &priv->event_rx_wait, wait);
2340 if (kfifo_len(&priv->event_fifo))
2341 return POLLIN | POLLRDNORM;
2346 static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2349 struct mport_cdev_priv *priv = filp->private_data;
2356 if (kfifo_is_empty(&priv->event_fifo) &&
2357 (filp->f_flags & O_NONBLOCK))
2360 if (count % sizeof(struct rio_event))
2363 ret = wait_event_interruptible(priv->event_rx_wait,
2364 kfifo_len(&priv->event_fifo) != 0);
2368 while (ret < count) {
2369 if (kfifo_to_user(&priv->event_fifo, buf,
2370 sizeof(struct rio_event), &copied))
2379 static ssize_t mport_write(struct file *filp, const char __user *buf,
2380 size_t count, loff_t *ppos)
2382 struct mport_cdev_priv *priv = filp->private_data;
2383 struct rio_mport *mport = priv->md->mport;
2384 struct rio_event event;
2390 if (count % sizeof(event))
2394 while ((count - len) >= (int)sizeof(event)) {
2395 if (copy_from_user(&event, buf, sizeof(event)))
2398 if (event.header != RIO_DOORBELL)
2401 ret = rio_mport_send_doorbell(mport,
2402 event.u.doorbell.rioid,
2403 event.u.doorbell.payload);
2407 len += sizeof(event);
2408 buf += sizeof(event);
2414 static const struct file_operations mport_fops = {
2415 .owner = THIS_MODULE,
2416 .open = mport_cdev_open,
2417 .release = mport_cdev_release,
2418 .poll = mport_cdev_poll,
2420 .write = mport_write,
2421 .mmap = mport_cdev_mmap,
2422 .fasync = mport_cdev_fasync,
2423 .unlocked_ioctl = mport_cdev_ioctl
2427 * Character device management
2430 static void mport_device_release(struct device *dev)
2432 struct mport_dev *md;
2434 rmcd_debug(EXIT, "%s", dev_name(dev));
2435 md = container_of(dev, struct mport_dev, dev);
2440 * mport_cdev_add() - Create mport_dev from rio_mport
2441 * @mport: RapidIO master port
2443 static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2446 struct mport_dev *md;
2447 struct rio_mport_attr attr;
2449 md = kzalloc(sizeof(*md), GFP_KERNEL);
2451 rmcd_error("Unable allocate a device object");
2456 mutex_init(&md->buf_mutex);
2457 mutex_init(&md->file_mutex);
2458 INIT_LIST_HEAD(&md->file_list);
2460 device_initialize(&md->dev);
2461 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
2462 md->dev.class = dev_class;
2463 md->dev.parent = &mport->dev;
2464 md->dev.release = mport_device_release;
2465 dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2466 atomic_set(&md->active, 1);
2468 cdev_init(&md->cdev, &mport_fops);
2469 md->cdev.owner = THIS_MODULE;
2471 INIT_LIST_HEAD(&md->doorbells);
2472 spin_lock_init(&md->db_lock);
2473 INIT_LIST_HEAD(&md->portwrites);
2474 spin_lock_init(&md->pw_lock);
2475 INIT_LIST_HEAD(&md->mappings);
2477 md->properties.id = mport->id;
2478 md->properties.sys_size = mport->sys_size;
2479 md->properties.hdid = mport->host_deviceid;
2480 md->properties.index = mport->index;
2482 /* The transfer_mode property will be returned through mport query
2485 #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2486 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2488 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2491 ret = cdev_device_add(&md->cdev, &md->dev);
2493 rmcd_error("Failed to register mport %d (err=%d)",
2497 ret = rio_query_mport(mport, &attr);
2499 md->properties.flags = attr.flags;
2500 md->properties.link_speed = attr.link_speed;
2501 md->properties.link_width = attr.link_width;
2502 md->properties.dma_max_sge = attr.dma_max_sge;
2503 md->properties.dma_max_size = attr.dma_max_size;
2504 md->properties.dma_align = attr.dma_align;
2505 md->properties.cap_sys_size = 0;
2506 md->properties.cap_transfer_mode = 0;
2507 md->properties.cap_addr_size = 0;
2509 pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2510 mport->name, MAJOR(dev_number), mport->id);
2512 mutex_lock(&mport_devs_lock);
2513 list_add_tail(&md->node, &mport_devs);
2514 mutex_unlock(&mport_devs_lock);
2516 pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2517 mport->name, MAJOR(dev_number), mport->id);
2522 put_device(&md->dev);
2527 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2528 * associated DMA channels.
2530 static void mport_cdev_terminate_dma(struct mport_dev *md)
2532 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2533 struct mport_cdev_priv *client;
2535 rmcd_debug(DMA, "%s", dev_name(&md->dev));
2537 mutex_lock(&md->file_mutex);
2538 list_for_each_entry(client, &md->file_list, list) {
2539 if (client->dmach) {
2540 dmaengine_terminate_all(client->dmach);
2541 rio_release_dma(client->dmach);
2544 mutex_unlock(&md->file_mutex);
2547 dmaengine_terminate_all(md->dma_chan);
2548 rio_release_dma(md->dma_chan);
2549 md->dma_chan = NULL;
2556 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2559 static int mport_cdev_kill_fasync(struct mport_dev *md)
2561 unsigned int files = 0;
2562 struct mport_cdev_priv *client;
2564 mutex_lock(&md->file_mutex);
2565 list_for_each_entry(client, &md->file_list, list) {
2566 if (client->async_queue)
2567 kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2570 mutex_unlock(&md->file_mutex);
2575 * mport_cdev_remove() - Remove mport character device
2576 * @dev: Mport device to remove
2578 static void mport_cdev_remove(struct mport_dev *md)
2580 struct rio_mport_mapping *map, *_map;
2582 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2583 atomic_set(&md->active, 0);
2584 mport_cdev_terminate_dma(md);
2585 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
2586 cdev_device_del(&md->cdev, &md->dev);
2587 mport_cdev_kill_fasync(md);
2589 flush_workqueue(dma_wq);
2591 /* TODO: do we need to give clients some time to close file
2592 * descriptors? Simple wait for XX, or kref?
2596 * Release DMA buffers allocated for the mport device.
2597 * Disable associated inbound Rapidio requests mapping if applicable.
2599 mutex_lock(&md->buf_mutex);
2600 list_for_each_entry_safe(map, _map, &md->mappings, node) {
2601 kref_put(&map->ref, mport_release_mapping);
2603 mutex_unlock(&md->buf_mutex);
2605 if (!list_empty(&md->mappings))
2606 rmcd_warn("WARNING: %s pending mappings on removal",
2609 rio_release_inb_dbell(md->mport, 0, 0x0fff);
2611 put_device(&md->dev);
2615 * RIO rio_mport_interface driver
2619 * mport_add_mport() - Add rio_mport from LDM device struct
2620 * @dev: Linux device model struct
2621 * @class_intf: Linux class_interface
2623 static int mport_add_mport(struct device *dev,
2624 struct class_interface *class_intf)
2626 struct rio_mport *mport = NULL;
2627 struct mport_dev *chdev = NULL;
2629 mport = to_rio_mport(dev);
2633 chdev = mport_cdev_add(mport);
2641 * mport_remove_mport() - Remove rio_mport from global list
2642 * TODO remove device from global mport_dev list
2644 static void mport_remove_mport(struct device *dev,
2645 struct class_interface *class_intf)
2647 struct rio_mport *mport = NULL;
2648 struct mport_dev *chdev;
2651 mport = to_rio_mport(dev);
2652 rmcd_debug(EXIT, "Remove %s", mport->name);
2654 mutex_lock(&mport_devs_lock);
2655 list_for_each_entry(chdev, &mport_devs, node) {
2656 if (chdev->mport->id == mport->id) {
2657 atomic_set(&chdev->active, 0);
2658 list_del(&chdev->node);
2663 mutex_unlock(&mport_devs_lock);
2666 mport_cdev_remove(chdev);
2669 /* the rio_mport_interface is used to handle local mport devices */
2670 static struct class_interface rio_mport_interface __refdata = {
2671 .class = &rio_mport_class,
2672 .add_dev = mport_add_mport,
2673 .remove_dev = mport_remove_mport,
2677 * Linux kernel module
2681 * mport_init - Driver module loading
2683 static int __init mport_init(void)
2687 /* Create device class needed by udev */
2688 dev_class = class_create(THIS_MODULE, DRV_NAME);
2689 if (IS_ERR(dev_class)) {
2690 rmcd_error("Unable to create " DRV_NAME " class");
2691 return PTR_ERR(dev_class);
2694 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2698 rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2700 /* Register to rio_mport_interface */
2701 ret = class_interface_register(&rio_mport_interface);
2703 rmcd_error("class_interface_register() failed, err=%d", ret);
2707 dma_wq = create_singlethread_workqueue("dma_wq");
2709 rmcd_error("failed to create DMA work queue");
2717 class_interface_unregister(&rio_mport_interface);
2719 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2721 class_destroy(dev_class);
2726 * mport_exit - Driver module unloading
2728 static void __exit mport_exit(void)
2730 class_interface_unregister(&rio_mport_interface);
2731 class_destroy(dev_class);
2732 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2733 destroy_workqueue(dma_wq);
2736 module_init(mport_init);
2737 module_exit(mport_exit);