1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2014 Texas Instruments Incorporated
4 * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
5 * Sandeep Nair <sandeep_n@ti.com>
6 * Cyril Chemparathy <cyril@ti.com>
10 #include <linux/sched.h>
11 #include <linux/module.h>
12 #include <linux/dma-direction.h>
13 #include <linux/interrupt.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/of_dma.h>
16 #include <linux/of_address.h>
17 #include <linux/platform_device.h>
18 #include <linux/soc/ti/knav_dma.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
22 #define REG_MASK 0xffffffff
24 #define DMA_LOOPBACK BIT(31)
25 #define DMA_ENABLE BIT(31)
26 #define DMA_TEARDOWN BIT(30)
28 #define DMA_TX_FILT_PSWORDS BIT(29)
29 #define DMA_TX_FILT_EINFO BIT(30)
30 #define DMA_TX_PRIO_SHIFT 0
31 #define DMA_RX_PRIO_SHIFT 16
32 #define DMA_PRIO_MASK GENMASK(3, 0)
33 #define DMA_PRIO_DEFAULT 0
34 #define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */
35 #define DMA_RX_TIMEOUT_MASK GENMASK(16, 0)
36 #define DMA_RX_TIMEOUT_SHIFT 0
38 #define CHAN_HAS_EPIB BIT(30)
39 #define CHAN_HAS_PSINFO BIT(29)
40 #define CHAN_ERR_RETRY BIT(28)
41 #define CHAN_PSINFO_AT_SOP BIT(25)
42 #define CHAN_SOP_OFF_SHIFT 16
43 #define CHAN_SOP_OFF_MASK GENMASK(9, 0)
44 #define DESC_TYPE_SHIFT 26
45 #define DESC_TYPE_MASK GENMASK(2, 0)
48 * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
49 * navigator cloud mapping scheme.
50 * using the 14bit physical queue numbers directly maps into this scheme.
52 #define CHAN_QNUM_MASK GENMASK(14, 0)
54 #define DMA_TIMEOUT 1 /* msecs */
55 #define DMA_INVALID_ID 0xffff
60 u32 emulation_control;
62 u32 qm_base_address[DMA_MAX_QMS];
83 struct knav_dma_pool_device {
85 struct list_head list;
88 struct knav_dma_device {
89 bool loopback, enable_all;
90 unsigned tx_priority, rx_priority, rx_timeout;
91 unsigned logical_queue_managers;
92 unsigned qm_base_address[DMA_MAX_QMS];
93 struct reg_global __iomem *reg_global;
94 struct reg_chan __iomem *reg_tx_chan;
95 struct reg_rx_flow __iomem *reg_rx_flow;
96 struct reg_chan __iomem *reg_rx_chan;
97 struct reg_tx_sched __iomem *reg_tx_sched;
98 unsigned max_rx_chan, max_tx_chan;
102 struct list_head list;
103 struct list_head chan_list;
107 struct knav_dma_chan {
108 enum dma_transfer_direction direction;
109 struct knav_dma_device *dma;
113 struct reg_chan __iomem *reg_chan;
114 struct reg_tx_sched __iomem *reg_tx_sched;
115 struct reg_rx_flow __iomem *reg_rx_flow;
117 /* configuration stuff */
118 unsigned channel, flow;
119 struct knav_dma_cfg cfg;
120 struct list_head list;
124 #define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
125 ch->channel : ch->flow)
127 static struct knav_dma_pool_device *kdev;
129 static bool device_ready;
130 bool knav_dma_device_ready(void)
134 EXPORT_SYMBOL_GPL(knav_dma_device_ready);
136 static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
138 if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
144 static int chan_start(struct knav_dma_chan *chan,
145 struct knav_dma_cfg *cfg)
149 spin_lock(&chan->lock);
150 if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
151 if (cfg->u.tx.filt_pswords)
152 v |= DMA_TX_FILT_PSWORDS;
153 if (cfg->u.tx.filt_einfo)
154 v |= DMA_TX_FILT_EINFO;
155 writel_relaxed(v, &chan->reg_chan->mode);
156 writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
159 if (chan->reg_tx_sched)
160 writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
162 if (chan->reg_rx_flow) {
165 if (cfg->u.rx.einfo_present)
167 if (cfg->u.rx.psinfo_present)
168 v |= CHAN_HAS_PSINFO;
169 if (cfg->u.rx.err_mode == DMA_RETRY)
171 v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
172 if (cfg->u.rx.psinfo_at_sop)
173 v |= CHAN_PSINFO_AT_SOP;
174 v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
175 << CHAN_SOP_OFF_SHIFT;
176 v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
178 writel_relaxed(v, &chan->reg_rx_flow->control);
179 writel_relaxed(0, &chan->reg_rx_flow->tags);
180 writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
182 v = cfg->u.rx.fdq[0] << 16;
183 v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
184 writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
186 v = cfg->u.rx.fdq[2] << 16;
187 v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
188 writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
190 writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
191 writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
192 writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
195 /* Keep a copy of the cfg */
196 memcpy(&chan->cfg, cfg, sizeof(*cfg));
197 spin_unlock(&chan->lock);
202 static int chan_teardown(struct knav_dma_chan *chan)
204 unsigned long end, value;
209 /* indicate teardown */
210 writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
212 /* wait for the dma to shut itself down */
213 end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
215 value = readl_relaxed(&chan->reg_chan->control);
216 if ((value & DMA_ENABLE) == 0)
218 } while (time_after(end, jiffies));
220 if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
221 dev_err(kdev->dev, "timeout waiting for teardown\n");
228 static void chan_stop(struct knav_dma_chan *chan)
230 spin_lock(&chan->lock);
231 if (chan->reg_rx_flow) {
232 /* first detach fdqs, starve out the flow */
233 writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
234 writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
235 writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
236 writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
237 writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
240 /* teardown the dma channel */
243 /* then disconnect the completion side */
244 if (chan->reg_rx_flow) {
245 writel_relaxed(0, &chan->reg_rx_flow->control);
246 writel_relaxed(0, &chan->reg_rx_flow->tags);
247 writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
250 memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
251 spin_unlock(&chan->lock);
253 dev_dbg(kdev->dev, "channel stopped\n");
256 static void dma_hw_enable_all(struct knav_dma_device *dma)
260 for (i = 0; i < dma->max_tx_chan; i++) {
261 writel_relaxed(0, &dma->reg_tx_chan[i].mode);
262 writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
267 static void knav_dma_hw_init(struct knav_dma_device *dma)
272 spin_lock(&dma->lock);
273 v = dma->loopback ? DMA_LOOPBACK : 0;
274 writel_relaxed(v, &dma->reg_global->emulation_control);
276 v = readl_relaxed(&dma->reg_global->perf_control);
277 v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
278 writel_relaxed(v, &dma->reg_global->perf_control);
280 v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
281 (dma->rx_priority << DMA_RX_PRIO_SHIFT));
283 writel_relaxed(v, &dma->reg_global->priority_control);
285 /* Always enable all Rx channels. Rx paths are managed using flows */
286 for (i = 0; i < dma->max_rx_chan; i++)
287 writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
289 for (i = 0; i < dma->logical_queue_managers; i++)
290 writel_relaxed(dma->qm_base_address[i],
291 &dma->reg_global->qm_base_address[i]);
292 spin_unlock(&dma->lock);
295 static void knav_dma_hw_destroy(struct knav_dma_device *dma)
300 spin_lock(&dma->lock);
301 v = ~DMA_ENABLE & REG_MASK;
303 for (i = 0; i < dma->max_rx_chan; i++)
304 writel_relaxed(v, &dma->reg_rx_chan[i].control);
306 for (i = 0; i < dma->max_tx_chan; i++)
307 writel_relaxed(v, &dma->reg_tx_chan[i].control);
308 spin_unlock(&dma->lock);
311 static void dma_debug_show_channels(struct seq_file *s,
312 struct knav_dma_chan *chan)
316 seq_printf(s, "\t%s %d:\t",
317 ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
320 if (chan->direction == DMA_MEM_TO_DEV) {
321 seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
322 chan->cfg.u.tx.filt_einfo,
323 chan->cfg.u.tx.filt_pswords,
324 chan->cfg.u.tx.priority);
326 seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
327 chan->cfg.u.rx.einfo_present,
328 chan->cfg.u.rx.psinfo_present,
329 chan->cfg.u.rx.desc_type);
330 seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
331 chan->cfg.u.rx.dst_q,
332 chan->cfg.u.rx.thresh);
333 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
334 seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
339 static void dma_debug_show_devices(struct seq_file *s,
340 struct knav_dma_device *dma)
342 struct knav_dma_chan *chan;
344 list_for_each_entry(chan, &dma->chan_list, list) {
345 if (atomic_read(&chan->ref_count))
346 dma_debug_show_channels(s, chan);
350 static int knav_dma_debug_show(struct seq_file *s, void *v)
352 struct knav_dma_device *dma;
354 list_for_each_entry(dma, &kdev->list, list) {
355 if (atomic_read(&dma->ref_count)) {
356 seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
357 dma->name, dma->max_tx_chan, dma->max_rx_flow);
358 dma_debug_show_devices(s, dma);
365 DEFINE_SHOW_ATTRIBUTE(knav_dma_debug);
367 static int of_channel_match_helper(struct device_node *np, const char *name,
368 const char **dma_instance)
370 struct of_phandle_args args;
371 struct device_node *dma_node;
374 dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
378 *dma_instance = dma_node->name;
379 index = of_property_match_string(np, "ti,navigator-dma-names", name);
381 dev_err(kdev->dev, "No 'ti,navigator-dma-names' property\n");
385 if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
387 dev_err(kdev->dev, "Missing the phandle args name %s\n", name);
391 if (args.args[0] < 0) {
392 dev_err(kdev->dev, "Missing args for %s\n", name);
400 * knav_dma_open_channel() - try to setup an exclusive slave channel
401 * @dev: pointer to client device structure
402 * @name: slave channel name
403 * @config: dma configuration parameters
405 * Returns pointer to appropriate DMA channel on success or error.
407 void *knav_dma_open_channel(struct device *dev, const char *name,
408 struct knav_dma_cfg *config)
410 struct knav_dma_device *dma = NULL, *iter1;
411 struct knav_dma_chan *chan = NULL, *iter2;
413 const char *instance;
416 pr_err("keystone-navigator-dma driver not registered\n");
417 return (void *)-EINVAL;
420 chan_num = of_channel_match_helper(dev->of_node, name, &instance);
422 dev_err(kdev->dev, "No DMA instance with name %s\n", name);
423 return (void *)-EINVAL;
426 dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
427 config->direction == DMA_MEM_TO_DEV ? "transmit" :
428 config->direction == DMA_DEV_TO_MEM ? "receive" :
429 "unknown", chan_num, instance);
431 if (config->direction != DMA_MEM_TO_DEV &&
432 config->direction != DMA_DEV_TO_MEM) {
433 dev_err(kdev->dev, "bad direction\n");
434 return (void *)-EINVAL;
437 /* Look for correct dma instance */
438 list_for_each_entry(iter1, &kdev->list, list) {
439 if (!strcmp(iter1->name, instance)) {
445 dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
446 return (void *)-EINVAL;
449 /* Look for correct dma channel from dma instance */
450 list_for_each_entry(iter2, &dma->chan_list, list) {
451 if (config->direction == DMA_MEM_TO_DEV) {
452 if (iter2->channel == chan_num) {
457 if (iter2->flow == chan_num) {
464 dev_err(kdev->dev, "channel %d is not in DMA %s\n",
466 return (void *)-EINVAL;
469 if (atomic_read(&chan->ref_count) >= 1) {
470 if (!check_config(chan, config)) {
471 dev_err(kdev->dev, "channel %d config miss-match\n",
473 return (void *)-EINVAL;
477 if (atomic_inc_return(&chan->dma->ref_count) <= 1)
478 knav_dma_hw_init(chan->dma);
480 if (atomic_inc_return(&chan->ref_count) <= 1)
481 chan_start(chan, config);
483 dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
488 EXPORT_SYMBOL_GPL(knav_dma_open_channel);
491 * knav_dma_close_channel() - Destroy a dma channel
493 * @channel: dma channel handle
496 void knav_dma_close_channel(void *channel)
498 struct knav_dma_chan *chan = channel;
501 pr_err("keystone-navigator-dma driver not registered\n");
505 if (atomic_dec_return(&chan->ref_count) <= 0)
508 if (atomic_dec_return(&chan->dma->ref_count) <= 0)
509 knav_dma_hw_destroy(chan->dma);
511 dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
512 chan->channel, chan->flow, chan->dma->name);
514 EXPORT_SYMBOL_GPL(knav_dma_close_channel);
516 static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
517 struct device_node *node,
518 unsigned index, resource_size_t *_size)
520 struct device *dev = kdev->dev;
525 ret = of_address_to_resource(node, index, &res);
527 dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n",
532 regs = devm_ioremap_resource(kdev->dev, &res);
534 dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n",
537 *_size = resource_size(&res);
542 static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
544 struct knav_dma_device *dma = chan->dma;
547 chan->reg_rx_flow = dma->reg_rx_flow + flow;
548 chan->channel = DMA_INVALID_ID;
549 dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
554 static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
556 struct knav_dma_device *dma = chan->dma;
558 chan->channel = channel;
559 chan->reg_chan = dma->reg_tx_chan + channel;
560 chan->reg_tx_sched = dma->reg_tx_sched + channel;
561 chan->flow = DMA_INVALID_ID;
562 dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
567 static int pktdma_init_chan(struct knav_dma_device *dma,
568 enum dma_transfer_direction dir,
571 struct device *dev = kdev->dev;
572 struct knav_dma_chan *chan;
575 chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
579 INIT_LIST_HEAD(&chan->list);
581 chan->direction = DMA_TRANS_NONE;
582 atomic_set(&chan->ref_count, 0);
583 spin_lock_init(&chan->lock);
585 if (dir == DMA_MEM_TO_DEV) {
586 chan->direction = dir;
587 ret = pktdma_init_tx_chan(chan, chan_num);
588 } else if (dir == DMA_DEV_TO_MEM) {
589 chan->direction = dir;
590 ret = pktdma_init_rx_chan(chan, chan_num);
592 dev_err(dev, "channel(%d) direction unknown\n", chan_num);
595 list_add_tail(&chan->list, &dma->chan_list);
600 static int dma_init(struct device_node *cloud, struct device_node *dma_node)
602 unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
603 struct device_node *node = dma_node;
604 struct knav_dma_device *dma;
605 int ret, len, num_chan = 0;
606 resource_size_t size;
610 dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
612 dev_err(kdev->dev, "could not allocate driver mem\n");
615 INIT_LIST_HEAD(&dma->list);
616 INIT_LIST_HEAD(&dma->chan_list);
618 if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
619 dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
623 dma->logical_queue_managers = len / sizeof(u32);
624 if (dma->logical_queue_managers > DMA_MAX_QMS) {
625 dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
626 dma->logical_queue_managers);
627 dma->logical_queue_managers = DMA_MAX_QMS;
630 ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
631 dma->qm_base_address,
632 dma->logical_queue_managers);
634 dev_err(kdev->dev, "invalid navigator cloud addresses\n");
638 dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
639 if (IS_ERR(dma->reg_global))
640 return PTR_ERR(dma->reg_global);
641 if (size < sizeof(struct reg_global)) {
642 dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
646 dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
647 if (IS_ERR(dma->reg_tx_chan))
648 return PTR_ERR(dma->reg_tx_chan);
650 max_tx_chan = size / sizeof(struct reg_chan);
651 dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
652 if (IS_ERR(dma->reg_rx_chan))
653 return PTR_ERR(dma->reg_rx_chan);
655 max_rx_chan = size / sizeof(struct reg_chan);
656 dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
657 if (IS_ERR(dma->reg_tx_sched))
658 return PTR_ERR(dma->reg_tx_sched);
660 max_tx_sched = size / sizeof(struct reg_tx_sched);
661 dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
662 if (IS_ERR(dma->reg_rx_flow))
663 return PTR_ERR(dma->reg_rx_flow);
665 max_rx_flow = size / sizeof(struct reg_rx_flow);
666 dma->rx_priority = DMA_PRIO_DEFAULT;
667 dma->tx_priority = DMA_PRIO_DEFAULT;
669 dma->enable_all = of_property_read_bool(node, "ti,enable-all");
670 dma->loopback = of_property_read_bool(node, "ti,loop-back");
672 ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
674 dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
675 DMA_RX_TIMEOUT_DEFAULT);
676 timeout = DMA_RX_TIMEOUT_DEFAULT;
679 dma->rx_timeout = timeout;
680 dma->max_rx_chan = max_rx_chan;
681 dma->max_rx_flow = max_rx_flow;
682 dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
683 atomic_set(&dma->ref_count, 0);
684 strcpy(dma->name, node->name);
685 spin_lock_init(&dma->lock);
687 for (i = 0; i < dma->max_tx_chan; i++) {
688 if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
692 for (i = 0; i < dma->max_rx_flow; i++) {
693 if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
697 list_add_tail(&dma->list, &kdev->list);
700 * For DSP software usecases or userpace transport software, setup all
701 * the DMA hardware resources.
703 if (dma->enable_all) {
704 atomic_inc(&dma->ref_count);
705 knav_dma_hw_init(dma);
706 dma_hw_enable_all(dma);
709 dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
710 dma->name, num_chan, dma->max_rx_flow,
711 dma->max_tx_chan, dma->max_rx_chan,
712 dma->loopback ? ", loopback" : "");
717 static int knav_dma_probe(struct platform_device *pdev)
719 struct device *dev = &pdev->dev;
720 struct device_node *node = pdev->dev.of_node;
721 struct device_node *child;
725 dev_err(&pdev->dev, "could not find device info\n");
729 kdev = devm_kzalloc(dev,
730 sizeof(struct knav_dma_pool_device), GFP_KERNEL);
732 dev_err(dev, "could not allocate driver mem\n");
737 INIT_LIST_HEAD(&kdev->list);
739 pm_runtime_enable(kdev->dev);
740 ret = pm_runtime_resume_and_get(kdev->dev);
742 dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
746 /* Initialise all packet dmas */
747 for_each_child_of_node(node, child) {
748 ret = dma_init(node, child);
751 dev_err(&pdev->dev, "init failed with %d\n", ret);
756 if (list_empty(&kdev->list)) {
757 dev_err(dev, "no valid dma instance\n");
762 debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
763 &knav_dma_debug_fops);
769 pm_runtime_put_sync(kdev->dev);
771 pm_runtime_disable(kdev->dev);
776 static void knav_dma_remove(struct platform_device *pdev)
778 struct knav_dma_device *dma;
780 list_for_each_entry(dma, &kdev->list, list) {
781 if (atomic_dec_return(&dma->ref_count) == 0)
782 knav_dma_hw_destroy(dma);
785 pm_runtime_put_sync(&pdev->dev);
786 pm_runtime_disable(&pdev->dev);
789 static struct of_device_id of_match[] = {
790 { .compatible = "ti,keystone-navigator-dma", },
794 MODULE_DEVICE_TABLE(of, of_match);
796 static struct platform_driver knav_dma_driver = {
797 .probe = knav_dma_probe,
798 .remove_new = knav_dma_remove,
800 .name = "keystone-navigator-dma",
801 .of_match_table = of_match,
804 module_platform_driver(knav_dma_driver);
806 MODULE_LICENSE("GPL v2");
807 MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
808 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
809 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");