2 * Topcliff PCH DMA controller driver
3 * Copyright (c) 2010 Intel Corporation
4 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/pch_dma.h>
25 #include "dmaengine.h"
27 #define DRV_NAME "pch-dma"
29 #define DMA_CTL0_DISABLE 0x0
30 #define DMA_CTL0_SG 0x1
31 #define DMA_CTL0_ONESHOT 0x2
32 #define DMA_CTL0_MODE_MASK_BITS 0x3
33 #define DMA_CTL0_DIR_SHIFT_BITS 2
34 #define DMA_CTL0_BITS_PER_CH 4
36 #define DMA_CTL2_START_SHIFT_BITS 8
37 #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
39 #define DMA_STATUS_IDLE 0x0
40 #define DMA_STATUS_DESC_READ 0x1
41 #define DMA_STATUS_WAIT 0x2
42 #define DMA_STATUS_ACCESS 0x3
43 #define DMA_STATUS_BITS_PER_CH 2
44 #define DMA_STATUS_MASK_BITS 0x3
45 #define DMA_STATUS_SHIFT_BITS 16
46 #define DMA_STATUS_IRQ(x) (0x1 << (x))
47 #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
48 #define DMA_STATUS2_ERR(x) (0x1 << (x))
50 #define DMA_DESC_WIDTH_SHIFT_BITS 12
51 #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
52 #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
53 #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
54 #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
55 #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
56 #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
57 #define DMA_DESC_END_WITHOUT_IRQ 0x0
58 #define DMA_DESC_END_WITH_IRQ 0x1
59 #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
60 #define DMA_DESC_FOLLOW_WITH_IRQ 0x3
62 #define MAX_CHAN_NR 12
64 #define DMA_MASK_CTL0_MODE 0x33333333
65 #define DMA_MASK_CTL2_MODE 0x00003333
67 static unsigned int init_nr_desc_per_channel = 64;
68 module_param(init_nr_desc_per_channel, uint, 0644);
69 MODULE_PARM_DESC(init_nr_desc_per_channel,
70 "initial descriptors per channel (default: 64)");
72 struct pch_dma_desc_regs {
88 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
92 struct pch_dma_desc_regs regs;
93 struct dma_async_tx_descriptor txd;
94 struct list_head desc_node;
95 struct list_head tx_list;
100 void __iomem *membase;
101 enum dma_transfer_direction dir;
102 struct tasklet_struct tasklet;
103 unsigned long err_status;
107 struct list_head active_list;
108 struct list_head queue;
109 struct list_head free_list;
110 unsigned int descs_allocated;
113 #define PDC_DEV_ADDR 0x00
114 #define PDC_MEM_ADDR 0x04
115 #define PDC_SIZE 0x08
116 #define PDC_NEXT 0x0C
118 #define channel_readl(pdc, name) \
119 readl((pdc)->membase + PDC_##name)
120 #define channel_writel(pdc, name, val) \
121 writel((val), (pdc)->membase + PDC_##name)
124 struct dma_device dma;
125 void __iomem *membase;
126 struct pci_pool *pool;
127 struct pch_dma_regs regs;
128 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
129 struct pch_dma_chan channels[MAX_CHAN_NR];
132 #define PCH_DMA_CTL0 0x00
133 #define PCH_DMA_CTL1 0x04
134 #define PCH_DMA_CTL2 0x08
135 #define PCH_DMA_CTL3 0x0C
136 #define PCH_DMA_STS0 0x10
137 #define PCH_DMA_STS1 0x14
138 #define PCH_DMA_STS2 0x18
140 #define dma_readl(pd, name) \
141 readl((pd)->membase + PCH_DMA_##name)
142 #define dma_writel(pd, name, val) \
143 writel((val), (pd)->membase + PCH_DMA_##name)
146 struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
148 return container_of(txd, struct pch_dma_desc, txd);
151 static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
153 return container_of(chan, struct pch_dma_chan, chan);
156 static inline struct pch_dma *to_pd(struct dma_device *ddev)
158 return container_of(ddev, struct pch_dma, dma);
161 static inline struct device *chan2dev(struct dma_chan *chan)
163 return &chan->dev->device;
166 static inline struct device *chan2parent(struct dma_chan *chan)
168 return chan->dev->device.parent;
172 struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
174 return list_first_entry(&pd_chan->active_list,
175 struct pch_dma_desc, desc_node);
179 struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
181 return list_first_entry(&pd_chan->queue,
182 struct pch_dma_desc, desc_node);
185 static void pdc_enable_irq(struct dma_chan *chan, int enable)
187 struct pch_dma *pd = to_pd(chan->device);
191 if (chan->chan_id < 8)
194 pos = chan->chan_id + 8;
196 val = dma_readl(pd, CTL2);
201 val &= ~(0x1 << pos);
203 dma_writel(pd, CTL2, val);
205 dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
209 static void pdc_set_dir(struct dma_chan *chan)
211 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
212 struct pch_dma *pd = to_pd(chan->device);
217 if (chan->chan_id < 8) {
218 val = dma_readl(pd, CTL0);
220 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
221 (DMA_CTL0_BITS_PER_CH * chan->chan_id);
222 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
223 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
225 if (pd_chan->dir == DMA_MEM_TO_DEV)
226 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
227 DMA_CTL0_DIR_SHIFT_BITS);
229 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
230 DMA_CTL0_DIR_SHIFT_BITS));
233 dma_writel(pd, CTL0, val);
235 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
236 val = dma_readl(pd, CTL3);
238 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
239 (DMA_CTL0_BITS_PER_CH * ch);
240 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
241 (DMA_CTL0_BITS_PER_CH * ch));
243 if (pd_chan->dir == DMA_MEM_TO_DEV)
244 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
245 DMA_CTL0_DIR_SHIFT_BITS);
247 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
248 DMA_CTL0_DIR_SHIFT_BITS));
250 dma_writel(pd, CTL3, val);
253 dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
257 static void pdc_set_mode(struct dma_chan *chan, u32 mode)
259 struct pch_dma *pd = to_pd(chan->device);
264 if (chan->chan_id < 8) {
265 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
266 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
267 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
268 DMA_CTL0_DIR_SHIFT_BITS);
269 val = dma_readl(pd, CTL0);
271 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
273 dma_writel(pd, CTL0, val);
275 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
276 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
277 (DMA_CTL0_BITS_PER_CH * ch));
278 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
279 DMA_CTL0_DIR_SHIFT_BITS);
280 val = dma_readl(pd, CTL3);
282 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
284 dma_writel(pd, CTL3, val);
287 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
291 static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
293 struct pch_dma *pd = to_pd(pd_chan->chan.device);
296 val = dma_readl(pd, STS0);
297 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
298 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
301 static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
303 struct pch_dma *pd = to_pd(pd_chan->chan.device);
306 val = dma_readl(pd, STS2);
307 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
308 DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
311 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
315 if (pd_chan->chan.chan_id < 8)
316 sts = pdc_get_status0(pd_chan);
318 sts = pdc_get_status2(pd_chan);
321 if (sts == DMA_STATUS_IDLE)
327 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
329 if (!pdc_is_idle(pd_chan)) {
330 dev_err(chan2dev(&pd_chan->chan),
331 "BUG: Attempt to start non-idle channel\n");
335 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
336 pd_chan->chan.chan_id, desc->regs.dev_addr);
337 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
338 pd_chan->chan.chan_id, desc->regs.mem_addr);
339 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
340 pd_chan->chan.chan_id, desc->regs.size);
341 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
342 pd_chan->chan.chan_id, desc->regs.next);
344 if (list_empty(&desc->tx_list)) {
345 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
346 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
347 channel_writel(pd_chan, SIZE, desc->regs.size);
348 channel_writel(pd_chan, NEXT, desc->regs.next);
349 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
351 channel_writel(pd_chan, NEXT, desc->txd.phys);
352 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
356 static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
357 struct pch_dma_desc *desc)
359 struct dma_async_tx_descriptor *txd = &desc->txd;
360 dma_async_tx_callback callback = txd->callback;
361 void *param = txd->callback_param;
363 list_splice_init(&desc->tx_list, &pd_chan->free_list);
364 list_move(&desc->desc_node, &pd_chan->free_list);
370 static void pdc_complete_all(struct pch_dma_chan *pd_chan)
372 struct pch_dma_desc *desc, *_d;
375 BUG_ON(!pdc_is_idle(pd_chan));
377 if (!list_empty(&pd_chan->queue))
378 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
380 list_splice_init(&pd_chan->active_list, &list);
381 list_splice_init(&pd_chan->queue, &pd_chan->active_list);
383 list_for_each_entry_safe(desc, _d, &list, desc_node)
384 pdc_chain_complete(pd_chan, desc);
387 static void pdc_handle_error(struct pch_dma_chan *pd_chan)
389 struct pch_dma_desc *bad_desc;
391 bad_desc = pdc_first_active(pd_chan);
392 list_del(&bad_desc->desc_node);
394 list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
396 if (!list_empty(&pd_chan->active_list))
397 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
399 dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
400 dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
401 bad_desc->txd.cookie);
403 pdc_chain_complete(pd_chan, bad_desc);
406 static void pdc_advance_work(struct pch_dma_chan *pd_chan)
408 if (list_empty(&pd_chan->active_list) ||
409 list_is_singular(&pd_chan->active_list)) {
410 pdc_complete_all(pd_chan);
412 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
413 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
417 static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
419 struct pch_dma_desc *desc = to_pd_desc(txd);
420 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
423 spin_lock(&pd_chan->lock);
424 cookie = dma_cookie_assign(txd);
426 if (list_empty(&pd_chan->active_list)) {
427 list_add_tail(&desc->desc_node, &pd_chan->active_list);
428 pdc_dostart(pd_chan, desc);
430 list_add_tail(&desc->desc_node, &pd_chan->queue);
433 spin_unlock(&pd_chan->lock);
437 static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
439 struct pch_dma_desc *desc = NULL;
440 struct pch_dma *pd = to_pd(chan->device);
443 desc = pci_pool_alloc(pd->pool, flags, &addr);
445 memset(desc, 0, sizeof(struct pch_dma_desc));
446 INIT_LIST_HEAD(&desc->tx_list);
447 dma_async_tx_descriptor_init(&desc->txd, chan);
448 desc->txd.tx_submit = pd_tx_submit;
449 desc->txd.flags = DMA_CTRL_ACK;
450 desc->txd.phys = addr;
456 static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
458 struct pch_dma_desc *desc, *_d;
459 struct pch_dma_desc *ret = NULL;
462 spin_lock(&pd_chan->lock);
463 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
465 if (async_tx_test_ack(&desc->txd)) {
466 list_del(&desc->desc_node);
470 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
472 spin_unlock(&pd_chan->lock);
473 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
476 ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
478 spin_lock(&pd_chan->lock);
479 pd_chan->descs_allocated++;
480 spin_unlock(&pd_chan->lock);
482 dev_err(chan2dev(&pd_chan->chan),
483 "failed to alloc desc\n");
490 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
491 struct pch_dma_desc *desc)
494 spin_lock(&pd_chan->lock);
495 list_splice_init(&desc->tx_list, &pd_chan->free_list);
496 list_add(&desc->desc_node, &pd_chan->free_list);
497 spin_unlock(&pd_chan->lock);
501 static int pd_alloc_chan_resources(struct dma_chan *chan)
503 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
504 struct pch_dma_desc *desc;
508 if (!pdc_is_idle(pd_chan)) {
509 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
513 if (!list_empty(&pd_chan->free_list))
514 return pd_chan->descs_allocated;
516 for (i = 0; i < init_nr_desc_per_channel; i++) {
517 desc = pdc_alloc_desc(chan, GFP_KERNEL);
520 dev_warn(chan2dev(chan),
521 "Only allocated %d initial descriptors\n", i);
525 list_add_tail(&desc->desc_node, &tmp_list);
528 spin_lock_irq(&pd_chan->lock);
529 list_splice(&tmp_list, &pd_chan->free_list);
530 pd_chan->descs_allocated = i;
531 dma_cookie_init(chan);
532 spin_unlock_irq(&pd_chan->lock);
534 pdc_enable_irq(chan, 1);
536 return pd_chan->descs_allocated;
539 static void pd_free_chan_resources(struct dma_chan *chan)
541 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
542 struct pch_dma *pd = to_pd(chan->device);
543 struct pch_dma_desc *desc, *_d;
546 BUG_ON(!pdc_is_idle(pd_chan));
547 BUG_ON(!list_empty(&pd_chan->active_list));
548 BUG_ON(!list_empty(&pd_chan->queue));
550 spin_lock_irq(&pd_chan->lock);
551 list_splice_init(&pd_chan->free_list, &tmp_list);
552 pd_chan->descs_allocated = 0;
553 spin_unlock_irq(&pd_chan->lock);
555 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
556 pci_pool_free(pd->pool, desc, desc->txd.phys);
558 pdc_enable_irq(chan, 0);
561 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
562 struct dma_tx_state *txstate)
564 return dma_cookie_status(chan, cookie, txstate);
567 static void pd_issue_pending(struct dma_chan *chan)
569 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
571 if (pdc_is_idle(pd_chan)) {
572 spin_lock(&pd_chan->lock);
573 pdc_advance_work(pd_chan);
574 spin_unlock(&pd_chan->lock);
578 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
579 struct scatterlist *sgl, unsigned int sg_len,
580 enum dma_transfer_direction direction, unsigned long flags,
583 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
584 struct pch_dma_slave *pd_slave = chan->private;
585 struct pch_dma_desc *first = NULL;
586 struct pch_dma_desc *prev = NULL;
587 struct pch_dma_desc *desc = NULL;
588 struct scatterlist *sg;
592 if (unlikely(!sg_len)) {
593 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
597 if (direction == DMA_DEV_TO_MEM)
598 reg = pd_slave->rx_reg;
599 else if (direction == DMA_MEM_TO_DEV)
600 reg = pd_slave->tx_reg;
604 pd_chan->dir = direction;
607 for_each_sg(sgl, sg, sg_len, i) {
608 desc = pdc_desc_get(pd_chan);
613 desc->regs.dev_addr = reg;
614 desc->regs.mem_addr = sg_dma_address(sg);
615 desc->regs.size = sg_dma_len(sg);
616 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
618 switch (pd_slave->width) {
619 case PCH_DMA_WIDTH_1_BYTE:
620 if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
622 desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
624 case PCH_DMA_WIDTH_2_BYTES:
625 if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
627 desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
629 case PCH_DMA_WIDTH_4_BYTES:
630 if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
632 desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
641 prev->regs.next |= desc->txd.phys;
642 list_add_tail(&desc->desc_node, &first->tx_list);
648 if (flags & DMA_PREP_INTERRUPT)
649 desc->regs.next = DMA_DESC_END_WITH_IRQ;
651 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
653 first->txd.cookie = -EBUSY;
654 desc->txd.flags = flags;
659 dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
660 pdc_desc_put(pd_chan, first);
664 static int pd_device_terminate_all(struct dma_chan *chan)
666 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
667 struct pch_dma_desc *desc, *_d;
670 spin_lock_irq(&pd_chan->lock);
672 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
674 list_splice_init(&pd_chan->active_list, &list);
675 list_splice_init(&pd_chan->queue, &list);
677 list_for_each_entry_safe(desc, _d, &list, desc_node)
678 pdc_chain_complete(pd_chan, desc);
680 spin_unlock_irq(&pd_chan->lock);
685 static void pdc_tasklet(unsigned long data)
687 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
690 if (!pdc_is_idle(pd_chan)) {
691 dev_err(chan2dev(&pd_chan->chan),
692 "BUG: handle non-idle channel in tasklet\n");
696 spin_lock_irqsave(&pd_chan->lock, flags);
697 if (test_and_clear_bit(0, &pd_chan->err_status))
698 pdc_handle_error(pd_chan);
700 pdc_advance_work(pd_chan);
701 spin_unlock_irqrestore(&pd_chan->lock, flags);
704 static irqreturn_t pd_irq(int irq, void *devid)
706 struct pch_dma *pd = (struct pch_dma *)devid;
707 struct pch_dma_chan *pd_chan;
714 sts0 = dma_readl(pd, STS0);
715 sts2 = dma_readl(pd, STS2);
717 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
719 for (i = 0; i < pd->dma.chancnt; i++) {
720 pd_chan = &pd->channels[i];
723 if (sts0 & DMA_STATUS_IRQ(i)) {
724 if (sts0 & DMA_STATUS0_ERR(i))
725 set_bit(0, &pd_chan->err_status);
727 tasklet_schedule(&pd_chan->tasklet);
731 if (sts2 & DMA_STATUS_IRQ(i - 8)) {
732 if (sts2 & DMA_STATUS2_ERR(i))
733 set_bit(0, &pd_chan->err_status);
735 tasklet_schedule(&pd_chan->tasklet);
741 /* clear interrupt bits in status register */
743 dma_writel(pd, STS0, sts0);
745 dma_writel(pd, STS2, sts2);
751 static void pch_dma_save_regs(struct pch_dma *pd)
753 struct pch_dma_chan *pd_chan;
754 struct dma_chan *chan, *_c;
757 pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
758 pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
759 pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
760 pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
762 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
763 pd_chan = to_pd_chan(chan);
765 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
766 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
767 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
768 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
774 static void pch_dma_restore_regs(struct pch_dma *pd)
776 struct pch_dma_chan *pd_chan;
777 struct dma_chan *chan, *_c;
780 dma_writel(pd, CTL0, pd->regs.dma_ctl0);
781 dma_writel(pd, CTL1, pd->regs.dma_ctl1);
782 dma_writel(pd, CTL2, pd->regs.dma_ctl2);
783 dma_writel(pd, CTL3, pd->regs.dma_ctl3);
785 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
786 pd_chan = to_pd_chan(chan);
788 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
789 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
790 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
791 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
797 static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
799 struct pch_dma *pd = pci_get_drvdata(pdev);
802 pch_dma_save_regs(pd);
804 pci_save_state(pdev);
805 pci_disable_device(pdev);
806 pci_set_power_state(pdev, pci_choose_state(pdev, state));
811 static int pch_dma_resume(struct pci_dev *pdev)
813 struct pch_dma *pd = pci_get_drvdata(pdev);
816 pci_set_power_state(pdev, PCI_D0);
817 pci_restore_state(pdev);
819 err = pci_enable_device(pdev);
821 dev_dbg(&pdev->dev, "failed to enable device\n");
826 pch_dma_restore_regs(pd);
832 static int pch_dma_probe(struct pci_dev *pdev,
833 const struct pci_device_id *id)
836 struct pch_dma_regs *regs;
837 unsigned int nr_channels;
841 nr_channels = id->driver_data;
842 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
846 pci_set_drvdata(pdev, pd);
848 err = pci_enable_device(pdev);
850 dev_err(&pdev->dev, "Cannot enable PCI device\n");
854 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
855 dev_err(&pdev->dev, "Cannot find proper base address\n");
857 goto err_disable_pdev;
860 err = pci_request_regions(pdev, DRV_NAME);
862 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
863 goto err_disable_pdev;
866 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
868 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
872 regs = pd->membase = pci_iomap(pdev, 1, 0);
874 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
879 pci_set_master(pdev);
880 pd->dma.dev = &pdev->dev;
882 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
884 dev_err(&pdev->dev, "Failed to request IRQ\n");
888 pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
889 sizeof(struct pch_dma_desc), 4, 0);
891 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
897 INIT_LIST_HEAD(&pd->dma.channels);
899 for (i = 0; i < nr_channels; i++) {
900 struct pch_dma_chan *pd_chan = &pd->channels[i];
902 pd_chan->chan.device = &pd->dma;
903 dma_cookie_init(&pd_chan->chan);
905 pd_chan->membase = ®s->desc[i];
907 spin_lock_init(&pd_chan->lock);
909 INIT_LIST_HEAD(&pd_chan->active_list);
910 INIT_LIST_HEAD(&pd_chan->queue);
911 INIT_LIST_HEAD(&pd_chan->free_list);
913 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
914 (unsigned long)pd_chan);
915 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
918 dma_cap_zero(pd->dma.cap_mask);
919 dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
920 dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
922 pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
923 pd->dma.device_free_chan_resources = pd_free_chan_resources;
924 pd->dma.device_tx_status = pd_tx_status;
925 pd->dma.device_issue_pending = pd_issue_pending;
926 pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
927 pd->dma.device_terminate_all = pd_device_terminate_all;
929 err = dma_async_device_register(&pd->dma);
931 dev_err(&pdev->dev, "Failed to register DMA device\n");
938 pci_pool_destroy(pd->pool);
940 free_irq(pdev->irq, pd);
942 pci_iounmap(pdev, pd->membase);
944 pci_release_regions(pdev);
946 pci_disable_device(pdev);
952 static void pch_dma_remove(struct pci_dev *pdev)
954 struct pch_dma *pd = pci_get_drvdata(pdev);
955 struct pch_dma_chan *pd_chan;
956 struct dma_chan *chan, *_c;
959 dma_async_device_unregister(&pd->dma);
961 free_irq(pdev->irq, pd);
963 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
965 pd_chan = to_pd_chan(chan);
967 tasklet_kill(&pd_chan->tasklet);
970 pci_pool_destroy(pd->pool);
971 pci_iounmap(pdev, pd->membase);
972 pci_release_regions(pdev);
973 pci_disable_device(pdev);
978 /* PCI Device ID of DMA device */
979 #define PCI_VENDOR_ID_ROHM 0x10DB
980 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
981 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
982 #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
983 #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
984 #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
985 #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
986 #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
987 #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
988 #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
989 #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
990 #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
991 #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
993 static const struct pci_device_id pch_dma_id_table[] = {
994 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
995 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
996 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
997 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
998 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
999 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
1000 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
1001 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1002 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1003 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
1004 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
1005 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
1009 static struct pci_driver pch_dma_driver = {
1011 .id_table = pch_dma_id_table,
1012 .probe = pch_dma_probe,
1013 .remove = pch_dma_remove,
1015 .suspend = pch_dma_suspend,
1016 .resume = pch_dma_resume,
1020 module_pci_driver(pch_dma_driver);
1022 MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1023 "DMA controller driver");
1024 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1025 MODULE_LICENSE("GPL v2");
1026 MODULE_DEVICE_TABLE(pci, pch_dma_id_table);