1 // SPDX-License-Identifier: GPL-2.0+
2 #include <linux/module.h>
3 #include <linux/init.h>
5 #include <linux/kernel.h> /* printk() */
6 #include <linux/slab.h> /* kmalloc() */
7 #include <linux/fs.h> /* everything... */
8 #include <linux/errno.h> /* error codes */
9 #include <linux/types.h> /* size_t */
10 #include <linux/cdev.h>
11 #include <linux/uaccess.h> /* copy_*_user */
12 #include <linux/highmem.h>
13 #include <linux/pagemap.h>
14 #include "kpc_dma_driver.h"
17 /********** Helper Functions **********/
19 unsigned int count_pages(unsigned long iov_base, size_t iov_len)
21 unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT;
22 unsigned long last = ((iov_base + iov_len - 1) & PAGE_MASK) >> PAGE_SHIFT;
24 return last - first + 1;
28 unsigned int count_parts_for_sge(struct scatterlist *sg)
30 return DIV_ROUND_UP(sg_dma_len(sg), 0x80000);
33 /********** Transfer Helpers **********/
34 static int kpc_dma_transfer(struct dev_private_data *priv,
35 unsigned long iov_base, size_t iov_len)
38 int rv = 0, nr_pages = 0;
39 struct kpc_dma_device *ldev;
40 struct aio_cb_data *acd;
41 DECLARE_COMPLETION_ONSTACK(done);
43 struct scatterlist *sg;
45 struct kpc_dma_descriptor *desc;
54 acd = kzalloc(sizeof(*acd), GFP_KERNEL);
56 dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for the aio data\n");
59 memset(acd, 0x66, sizeof(struct aio_cb_data));
62 acd->ldev = priv->ldev;
66 acd->page_count = count_pages(iov_base, iov_len);
68 // Allocate an array of page pointers
69 acd->user_pages = kcalloc(acd->page_count, sizeof(struct page *),
71 if (!acd->user_pages) {
72 dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for the page pointers\n");
74 goto err_alloc_userpages;
77 // Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
78 mmap_read_lock(current->mm); /* get memory map semaphore */
79 rv = pin_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE, acd->user_pages, NULL);
80 mmap_read_unlock(current->mm); /* release the semaphore */
81 if (rv != acd->page_count) {
86 dev_err(&priv->ldev->pldev->dev, "Couldn't pin_user_pages (%d)\n", rv);
89 nr_pages = acd->page_count;
91 // Allocate and setup the sg_table (scatterlist entries)
92 rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE - 1), iov_len, GFP_KERNEL);
94 dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%d)\n", rv);
98 // Setup the DMA mapping for all the sg entries
99 acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
100 if (acd->mapped_entry_count <= 0) {
101 dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n", acd->mapped_entry_count);
105 // Calculate how many descriptors are actually needed for this transfer.
106 for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i) {
107 desc_needed += count_parts_for_sge(sg);
112 // Figoure out how many descriptors are available and return an error if there aren't enough
113 num_descrs_avail = count_descriptors_available(ldev);
114 dev_dbg(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
115 if (desc_needed >= ldev->desc_pool_cnt) {
116 dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
118 goto err_descr_too_many;
120 if (desc_needed > num_descrs_avail) {
121 dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
123 goto err_descr_too_many;
126 // Loop through all the sg table entries and fill out a descriptor for each one.
127 desc = ldev->desc_next;
128 card_addr = acd->priv->card_addr;
129 for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i) {
130 pcnt = count_parts_for_sge(sg);
131 for (p = 0 ; p < pcnt ; p++) {
132 // Fill out the descriptor
136 desc->DescByteCount = 0x80000;
138 desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000);
140 desc->DescBufferByteCount = desc->DescByteCount;
142 desc->DescControlFlags |= DMA_DESC_CTL_IRQONERR;
143 if (i == 0 && p == 0)
144 desc->DescControlFlags |= DMA_DESC_CTL_SOP;
145 if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
146 desc->DescControlFlags |= DMA_DESC_CTL_EOP | DMA_DESC_CTL_IRQONDONE;
148 desc->DescCardAddrLS = (card_addr & 0xFFFFFFFF);
149 desc->DescCardAddrMS = (card_addr >> 32) & 0xF;
150 card_addr += desc->DescByteCount;
152 dma_addr = sg_dma_address(sg) + (p * 0x80000);
153 desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFFUL) >> 0;
154 desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000UL) >> 32;
156 user_ctl = acd->priv->user_ctl;
157 if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
158 user_ctl = acd->priv->user_ctl_last;
160 desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFFUL) >> 0;
161 desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000UL) >> 32;
163 if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
166 dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n", desc, desc->acd);
168 ldev->desc_next = desc->Next;
173 // Send the filled descriptors off to the hardware to process!
174 SetEngineSWPtr(ldev, ldev->desc_next);
178 rv = wait_for_completion_interruptible(&done);
180 * If the user aborted (rv == -ERESTARTSYS), we're no longer responsible
181 * for cleaning up the acd
183 if (rv == -ERESTARTSYS)
193 dma_unmap_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
195 sg_free_table(&acd->sgt);
199 unpin_user_pages(acd->user_pages, nr_pages);
200 kfree(acd->user_pages);
203 dev_dbg(&priv->ldev->pldev->dev, "%s returning with error %d\n", __func__, rv);
207 void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags)
212 BUG_ON(!acd->user_pages);
213 BUG_ON(!acd->sgt.sgl);
215 BUG_ON(!acd->ldev->pldev);
217 dma_unmap_sg(&acd->ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, acd->ldev->dir);
219 for (i = 0 ; i < acd->page_count ; i++) {
220 if (!PageReserved(acd->user_pages[i]))
221 set_page_dirty_lock(acd->user_pages[i]);
224 unpin_user_pages(acd->user_pages, acd->page_count);
226 sg_free_table(&acd->sgt);
228 kfree(acd->user_pages);
236 * There's no completion, so we're responsible for cleaning up
243 /********** Fileops **********/
245 int kpc_dma_open(struct inode *inode, struct file *filp)
247 struct dev_private_data *priv;
248 struct kpc_dma_device *ldev = kpc_dma_lookup_device(iminor(inode));
253 if (!atomic_dec_and_test(&ldev->open_count)) {
254 atomic_inc(&ldev->open_count);
255 return -EBUSY; /* already open */
258 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
263 filp->private_data = priv;
269 int kpc_dma_close(struct inode *inode, struct file *filp)
271 struct kpc_dma_descriptor *cur;
272 struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
273 struct kpc_dma_device *eng = priv->ldev;
277 stop_dma_engine(eng);
279 cur = eng->desc_completed->Next;
280 while (cur != eng->desc_next) {
281 dev_dbg(&eng->pldev->dev, "Aborting descriptor %p (acd = %p)\n", cur, cur->acd);
282 if (cur->DescControlFlags & DMA_DESC_CTL_EOP) {
284 transfer_complete_cb(cur->acd, 0, ACD_FLAG_ABORT);
288 eng->desc_completed = cur;
293 start_dma_engine(eng);
297 atomic_inc(&priv->ldev->open_count); /* release the device */
303 ssize_t kpc_dma_read(struct file *filp, char __user *user_buf, size_t count, loff_t *ppos)
305 struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
307 if (priv->ldev->dir != DMA_FROM_DEVICE)
310 return kpc_dma_transfer(priv, (unsigned long)user_buf, count);
314 ssize_t kpc_dma_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos)
316 struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
318 if (priv->ldev->dir != DMA_TO_DEVICE)
321 return kpc_dma_transfer(priv, (unsigned long)user_buf, count);
325 long kpc_dma_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param)
327 struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
330 case KND_IOCTL_SET_CARD_ADDR:
331 priv->card_addr = ioctl_param; return priv->card_addr;
332 case KND_IOCTL_SET_USER_CTL:
333 priv->user_ctl = ioctl_param; return priv->user_ctl;
334 case KND_IOCTL_SET_USER_CTL_LAST:
335 priv->user_ctl_last = ioctl_param; return priv->user_ctl_last;
336 case KND_IOCTL_GET_USER_STS:
337 return priv->user_sts;
343 const struct file_operations kpc_dma_fops = {
344 .owner = THIS_MODULE,
345 .open = kpc_dma_open,
346 .release = kpc_dma_close,
347 .read = kpc_dma_read,
348 .write = kpc_dma_write,
349 .unlocked_ioctl = kpc_dma_ioctl,