2 * Renesas R-Car Audio DMAC support
4 * Copyright (C) 2015 Renesas Electronics Corp.
5 * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/delay.h>
12 #include <linux/of_dma.h>
16 * Audio DMAC peri peri register
23 #define PDMACHCR_DE (1 << 0)
27 struct dma_chan *chan;
31 unsigned int dma_period;
42 struct rsnd_mod *mod_from;
43 struct rsnd_mod *mod_to;
52 struct rsnd_dma_ctrl {
58 #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
59 #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
60 #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
61 #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
66 #define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1)
67 #define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0)
68 static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io,
71 struct device *dev = dmaen->chan->device->dev;
72 enum dma_data_direction dir;
73 int is_play = rsnd_io_is_play(io);
79 period = dmaen->dma_period;
82 buf = dmaen->dma_buf + (period * i);
84 dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
87 dma_sync_single_for_device(dev, buf, period, dir);
89 dma_sync_single_for_cpu(dev, buf, period, dir);
92 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
93 struct rsnd_dai_stream *io)
95 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
96 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
97 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
102 * Renesas sound Gen1 needs 1 DMAC,
104 * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
105 * But, Audio-DMAC-peri-peri doesn't have interrupt,
106 * and this driver is assuming that here.
108 spin_lock_irqsave(&priv->lock, flags);
110 if (rsnd_io_is_working(io)) {
111 rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt);
114 * Next period is already started.
115 * Let's sync Next Next period
119 rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
126 spin_unlock_irqrestore(&priv->lock, flags);
129 rsnd_dai_period_elapsed(io);
132 static void rsnd_dmaen_complete(void *data)
134 struct rsnd_mod *mod = data;
136 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
139 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
140 struct rsnd_mod *mod_from,
141 struct rsnd_mod *mod_to)
143 if ((!mod_from && !mod_to) ||
144 (mod_from && mod_to))
148 return rsnd_mod_dma_req(io, mod_from);
150 return rsnd_mod_dma_req(io, mod_to);
153 static int rsnd_dmaen_stop(struct rsnd_mod *mod,
154 struct rsnd_dai_stream *io,
155 struct rsnd_priv *priv)
157 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
158 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
161 int is_play = rsnd_io_is_play(io);
163 dmaengine_terminate_all(dmaen->chan);
164 dma_unmap_single(dmaen->chan->device->dev,
165 dmaen->dma_buf, dmaen->dma_len,
166 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
172 static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
173 struct rsnd_dai_stream *io,
174 struct rsnd_priv *priv)
176 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
177 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
180 * DMAEngine release uses mutex lock.
181 * Thus, it shouldn't be called under spinlock.
182 * Let's call it under nolock_start
185 dma_release_channel(dmaen->chan);
192 static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
193 struct rsnd_dai_stream *io,
194 struct rsnd_priv *priv)
196 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
197 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
198 struct device *dev = rsnd_priv_to_dev(priv);
201 dev_err(dev, "it already has dma channel\n");
206 * DMAEngine request uses mutex lock.
207 * Thus, it shouldn't be called under spinlock.
208 * Let's call it under nolock_start
210 dmaen->chan = rsnd_dmaen_request_channel(io,
213 if (IS_ERR_OR_NULL(dmaen->chan)) {
214 int ret = PTR_ERR(dmaen->chan);
217 dev_err(dev, "can't get dma channel\n");
224 static int rsnd_dmaen_start(struct rsnd_mod *mod,
225 struct rsnd_dai_stream *io,
226 struct rsnd_priv *priv)
228 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
229 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
230 struct snd_pcm_substream *substream = io->substream;
231 struct device *dev = rsnd_priv_to_dev(priv);
232 struct dma_async_tx_descriptor *desc;
233 struct dma_slave_config cfg = {};
237 int is_play = rsnd_io_is_play(io);
241 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
242 cfg.src_addr = dma->src_addr;
243 cfg.dst_addr = dma->dst_addr;
244 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
245 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
247 dev_dbg(dev, "%s[%d] %pad -> %pad\n",
248 rsnd_mod_name(mod), rsnd_mod_id(mod),
249 &cfg.src_addr, &cfg.dst_addr);
251 ret = dmaengine_slave_config(dmaen->chan, &cfg);
255 len = snd_pcm_lib_buffer_bytes(substream);
256 period = snd_pcm_lib_period_bytes(substream);
257 buf = dma_map_single(dmaen->chan->device->dev,
258 substream->runtime->dma_area,
260 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
261 if (dma_mapping_error(dmaen->chan->device->dev, buf)) {
262 dev_err(dev, "dma map failed\n");
266 desc = dmaengine_prep_dma_cyclic(dmaen->chan,
268 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
269 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
272 dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
276 desc->callback = rsnd_dmaen_complete;
277 desc->callback_param = rsnd_mod_get(dma);
279 dmaen->dma_buf = buf;
280 dmaen->dma_len = len;
281 dmaen->dma_period = period;
285 * synchronize this and next period
287 * __rsnd_dmaen_complete()
289 for (i = 0; i < 2; i++)
290 rsnd_dmaen_sync(dmaen, io, i);
292 dmaen->cookie = dmaengine_submit(desc);
293 if (dmaen->cookie < 0) {
294 dev_err(dev, "dmaengine_submit() fail\n");
298 dma_async_issue_pending(dmaen->chan);
303 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
304 struct rsnd_mod *mod, char *name)
306 struct dma_chan *chan = NULL;
307 struct device_node *np;
310 for_each_child_of_node(of_node, np) {
311 if (i == rsnd_mod_id(mod) && (!chan))
312 chan = of_dma_request_slave_channel(np, name);
316 /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
317 of_node_put(of_node);
322 static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
323 struct rsnd_dma *dma,
324 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
326 struct rsnd_priv *priv = rsnd_io_to_priv(io);
327 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
328 struct dma_chan *chan;
330 /* try to get DMAEngine channel */
331 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
332 if (IS_ERR_OR_NULL(chan)) {
333 /* Let's follow when -EPROBE_DEFER case */
334 if (PTR_ERR(chan) == -EPROBE_DEFER)
335 return PTR_ERR(chan);
338 * DMA failed. try to PIO mode
340 * rsnd_ssi_fallback()
341 * rsnd_rdai_continuance_probe()
346 dma_release_channel(chan);
353 static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
354 struct rsnd_dai_stream *io,
355 snd_pcm_uframes_t *pointer)
357 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
358 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
359 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
360 struct dma_tx_state state;
361 enum dma_status status;
362 unsigned int pos = 0;
364 status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
365 if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
366 if (state.residue > 0 && state.residue <= dmaen->dma_len)
367 pos = dmaen->dma_len - state.residue;
369 *pointer = bytes_to_frames(runtime, pos);
374 static struct rsnd_mod_ops rsnd_dmaen_ops = {
376 .nolock_start = rsnd_dmaen_nolock_start,
377 .nolock_stop = rsnd_dmaen_nolock_stop,
378 .start = rsnd_dmaen_start,
379 .stop = rsnd_dmaen_stop,
380 .pointer= rsnd_dmaen_pointer,
384 * Audio DMAC peri peri
386 static const u8 gen2_id_table_ssiu[] = {
398 static const u8 gen2_id_table_scu[] = {
399 0x2d, /* SCU_SRCI0 */
400 0x2e, /* SCU_SRCI1 */
401 0x2f, /* SCU_SRCI2 */
402 0x30, /* SCU_SRCI3 */
403 0x31, /* SCU_SRCI4 */
404 0x32, /* SCU_SRCI5 */
405 0x33, /* SCU_SRCI6 */
406 0x34, /* SCU_SRCI7 */
407 0x35, /* SCU_SRCI8 */
408 0x36, /* SCU_SRCI9 */
410 static const u8 gen2_id_table_cmd[] = {
415 static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
416 struct rsnd_mod *mod)
418 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
419 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
420 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
421 const u8 *entry = NULL;
422 int id = rsnd_mod_id(mod);
426 entry = gen2_id_table_ssiu;
427 size = ARRAY_SIZE(gen2_id_table_ssiu);
428 } else if (mod == src) {
429 entry = gen2_id_table_scu;
430 size = ARRAY_SIZE(gen2_id_table_scu);
431 } else if (mod == dvc) {
432 entry = gen2_id_table_cmd;
433 size = ARRAY_SIZE(gen2_id_table_cmd);
436 if ((!entry) || (size <= id)) {
437 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
439 dev_err(dev, "unknown connection (%s[%d])\n",
440 rsnd_mod_name(mod), rsnd_mod_id(mod));
442 /* use non-prohibited SRS number as error */
443 return 0x00; /* SSI00 */
449 static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
450 struct rsnd_mod *mod_from,
451 struct rsnd_mod *mod_to)
453 return (rsnd_dmapp_get_id(io, mod_from) << 24) +
454 (rsnd_dmapp_get_id(io, mod_to) << 16);
457 #define rsnd_dmapp_addr(dmac, dma, reg) \
458 (dmac->base + 0x20 + reg + \
459 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
460 static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
462 struct rsnd_mod *mod = rsnd_mod_get(dma);
463 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
464 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
465 struct device *dev = rsnd_priv_to_dev(priv);
467 dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
469 iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
472 static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
474 struct rsnd_mod *mod = rsnd_mod_get(dma);
475 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
476 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
478 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
481 static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
483 struct rsnd_mod *mod = rsnd_mod_get(dma);
484 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
485 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
486 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
487 u32 val = ioread32(addr);
490 val |= (data & mask);
492 iowrite32(val, addr);
495 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
496 struct rsnd_dai_stream *io,
497 struct rsnd_priv *priv)
499 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
502 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
504 for (i = 0; i < 1024; i++) {
505 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
513 static int rsnd_dmapp_start(struct rsnd_mod *mod,
514 struct rsnd_dai_stream *io,
515 struct rsnd_priv *priv)
517 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
518 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
520 rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
521 rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
522 rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
527 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
528 struct rsnd_dma *dma,
529 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
531 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
532 struct rsnd_priv *priv = rsnd_io_to_priv(io);
533 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
534 struct device *dev = rsnd_priv_to_dev(priv);
536 dmapp->dmapp_id = dmac->dmapp_num;
537 dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
541 dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
542 dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
547 static struct rsnd_mod_ops rsnd_dmapp_ops = {
549 .start = rsnd_dmapp_start,
550 .stop = rsnd_dmapp_stop,
551 .quit = rsnd_dmapp_stop,
555 * Common DMAC Interface
559 * DMA read/write register offset
561 * RSND_xxx_I_N for Audio DMAC input
562 * RSND_xxx_O_N for Audio DMAC output
563 * RSND_xxx_I_P for Audio DMAC peri peri input
564 * RSND_xxx_O_P for Audio DMAC peri peri output
567 * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
568 * SSI : 0xec541000 / 0xec241008 / 0xec24100c
569 * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
570 * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
571 * CMD : 0xec500000 / / 0xec008000 0xec308000
573 #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
574 #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
576 #define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
577 #define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
579 #define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
580 #define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
582 #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
583 #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
585 #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
586 #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
588 #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
589 #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
592 rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
593 struct rsnd_mod *mod,
594 int is_play, int is_from)
596 struct rsnd_priv *priv = rsnd_io_to_priv(io);
597 struct device *dev = rsnd_priv_to_dev(priv);
598 phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
599 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
600 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
601 int use_src = !!rsnd_io_to_mod_src(io);
602 int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
603 !!rsnd_io_to_mod_mix(io) ||
604 !!rsnd_io_to_mod_ctu(io);
605 int id = rsnd_mod_id(mod);
609 } dma_addrs[3][2][3] = {
613 { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
614 { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
617 { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
618 { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
622 {{{ RDMA_SSI_O_N(ssi, id), 0 },
623 { RDMA_SSIU_O_P(ssi, id), 0 },
624 { RDMA_SSIU_O_P(ssi, id), 0 } },
626 {{ 0, RDMA_SSI_I_N(ssi, id) },
627 { 0, RDMA_SSIU_I_P(ssi, id) },
628 { 0, RDMA_SSIU_I_P(ssi, id) } }
632 {{{ RDMA_SSIU_O_N(ssi, id), 0 },
633 { RDMA_SSIU_O_P(ssi, id), 0 },
634 { RDMA_SSIU_O_P(ssi, id), 0 } },
636 {{ 0, RDMA_SSIU_I_N(ssi, id) },
637 { 0, RDMA_SSIU_I_P(ssi, id) },
638 { 0, RDMA_SSIU_I_P(ssi, id) } } },
641 /* it shouldn't happen */
642 if (use_cmd && !use_src)
643 dev_err(dev, "DVC is selected without SRC\n");
645 /* use SSIU or SSI ? */
646 if (is_ssi && rsnd_ssi_use_busif(io))
650 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
651 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
654 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
655 struct rsnd_mod *mod,
656 int is_play, int is_from)
658 struct rsnd_priv *priv = rsnd_io_to_priv(io);
661 * gen1 uses default DMA addr
663 if (rsnd_is_gen1(priv))
669 return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
672 #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
673 static void rsnd_dma_of_path(struct rsnd_mod *this,
674 struct rsnd_dai_stream *io,
676 struct rsnd_mod **mod_from,
677 struct rsnd_mod **mod_to)
679 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
680 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
681 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
682 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
683 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
684 struct rsnd_mod *mod[MOD_MAX];
685 struct rsnd_mod *mod_start, *mod_end;
686 struct rsnd_priv *priv = rsnd_mod_to_priv(this);
687 struct device *dev = rsnd_priv_to_dev(priv);
694 for (i = 0; i < MOD_MAX; i++) {
696 nr += !!rsnd_io_to_mod(io, i);
701 * [S] -*-> SRC -o-> [E]
702 * [S] -*-> SRC -> DVC -o-> [E]
703 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
712 * -o-> Audio DMAC peri peri
714 mod_start = (is_play) ? NULL : ssi;
715 mod_end = (is_play) ? ssi : NULL;
718 mod[idx++] = mod_start;
719 for (i = 1; i < nr; i++) {
738 * -------------+-----+-----+
742 if ((this == ssi) == (is_play)) {
743 *mod_from = mod[idx - 1];
750 dev_dbg(dev, "module connection (this is %s[%d])\n",
751 rsnd_mod_name(this), rsnd_mod_id(this));
752 for (i = 0; i <= idx; i++) {
753 dev_dbg(dev, " %s[%d]%s\n",
754 rsnd_mod_name(mod[i]), rsnd_mod_id(mod[i]),
755 (mod[i] == *mod_from) ? " from" :
756 (mod[i] == *mod_to) ? " to" : "");
760 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
761 struct rsnd_mod **dma_mod)
763 struct rsnd_mod *mod_from = NULL;
764 struct rsnd_mod *mod_to = NULL;
765 struct rsnd_priv *priv = rsnd_io_to_priv(io);
766 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
767 struct device *dev = rsnd_priv_to_dev(priv);
768 struct rsnd_mod_ops *ops;
769 enum rsnd_mod_type type;
770 int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
771 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
772 int is_play = rsnd_io_is_play(io);
776 * DMA failed. try to PIO mode
778 * rsnd_ssi_fallback()
779 * rsnd_rdai_continuance_probe()
784 rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
787 if (mod_from && mod_to) {
788 ops = &rsnd_dmapp_ops;
789 attach = rsnd_dmapp_attach;
790 dma_id = dmac->dmapp_num;
791 type = RSND_MOD_AUDMAPP;
793 ops = &rsnd_dmaen_ops;
794 attach = rsnd_dmaen_attach;
795 dma_id = dmac->dmaen_num;
796 type = RSND_MOD_AUDMA;
799 /* for Gen1, overwrite */
800 if (rsnd_is_gen1(priv)) {
801 ops = &rsnd_dmaen_ops;
802 attach = rsnd_dmaen_attach;
803 dma_id = dmac->dmaen_num;
804 type = RSND_MOD_AUDMA;
808 struct rsnd_dma *dma;
810 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
814 *dma_mod = rsnd_mod_get(dma);
816 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
817 rsnd_mod_get_status, type, dma_id);
821 dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
822 rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
823 rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
824 rsnd_mod_name(mod_to), rsnd_mod_id(mod_to));
826 ret = attach(io, dma, mod_from, mod_to);
830 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
831 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
832 dma->mod_from = mod_from;
833 dma->mod_to = mod_to;
836 ret = rsnd_dai_connect(*dma_mod, io, type);
843 int rsnd_dma_probe(struct rsnd_priv *priv)
845 struct platform_device *pdev = rsnd_priv_to_pdev(priv);
846 struct device *dev = rsnd_priv_to_dev(priv);
847 struct rsnd_dma_ctrl *dmac;
848 struct resource *res;
853 if (rsnd_is_gen1(priv))
859 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
860 dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
862 dev_err(dev, "dma allocate failed\n");
863 return 0; /* it will be PIO mode */
867 dmac->base = devm_ioremap_resource(dev, res);
868 if (IS_ERR(dmac->base))
869 return PTR_ERR(dmac->base);