1 // SPDX-License-Identifier: GPL-2.0-only
3 * Keystone Queue Manager subsystem driver
5 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
6 * Authors: Sandeep Nair <sandeep_n@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
11 #include <linux/debugfs.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/firmware.h>
14 #include <linux/interrupt.h>
16 #include <linux/module.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/property.h>
23 #include <linux/slab.h>
24 #include <linux/soc/ti/knav_qmss.h>
26 #include "knav_qmss.h"
28 static struct knav_device *kdev;
29 static DEFINE_MUTEX(knav_dev_lock);
30 #define knav_dev_lock_held() \
31 lockdep_is_held(&knav_dev_lock)
33 /* Queue manager register indices in DTS */
34 #define KNAV_QUEUE_PEEK_REG_INDEX 0
35 #define KNAV_QUEUE_STATUS_REG_INDEX 1
36 #define KNAV_QUEUE_CONFIG_REG_INDEX 2
37 #define KNAV_QUEUE_REGION_REG_INDEX 3
38 #define KNAV_QUEUE_PUSH_REG_INDEX 4
39 #define KNAV_QUEUE_POP_REG_INDEX 5
41 /* Queue manager register indices in DTS for QMSS in K2G NAVSS.
42 * There are no status and vbusm push registers on this version
43 * of QMSS. Push registers are same as pop, So all indices above 1
44 * are to be re-defined
46 #define KNAV_L_QUEUE_CONFIG_REG_INDEX 1
47 #define KNAV_L_QUEUE_REGION_REG_INDEX 2
48 #define KNAV_L_QUEUE_PUSH_REG_INDEX 3
50 /* PDSP register indices in DTS */
51 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
52 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
53 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
54 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
56 #define knav_queue_idx_to_inst(kdev, idx) \
57 (kdev->instances + (idx << kdev->inst_shift))
59 #define for_each_handle_rcu(qh, inst) \
60 list_for_each_entry_rcu(qh, &inst->handles, list, \
63 #define for_each_instance(idx, inst, kdev) \
64 for (idx = 0, inst = kdev->instances; \
65 idx < (kdev)->num_queues_in_use; \
66 idx++, inst = knav_queue_idx_to_inst(kdev, idx))
68 /* All firmware file names end up here. List the firmware file names below.
69 * Newest followed by older ones. Search is done from start of the array
70 * until a firmware file is found.
72 static const char * const knav_acc_firmwares[] = {"/*(DEBLOBBED)*/"};
74 static bool device_ready;
75 bool knav_qmss_device_ready(void)
79 EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
82 * knav_queue_notify: qmss queue notfier call
84 * @inst: - qmss queue instance like accumulator
86 void knav_queue_notify(struct knav_queue_inst *inst)
88 struct knav_queue *qh;
94 for_each_handle_rcu(qh, inst) {
95 if (atomic_read(&qh->notifier_enabled) <= 0)
97 if (WARN_ON(!qh->notifier_fn))
99 this_cpu_inc(qh->stats->notifies);
100 qh->notifier_fn(qh->notifier_fn_arg);
104 EXPORT_SYMBOL_GPL(knav_queue_notify);
106 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
108 struct knav_queue_inst *inst = _instdata;
110 knav_queue_notify(inst);
114 static int knav_queue_setup_irq(struct knav_range_info *range,
115 struct knav_queue_inst *inst)
117 unsigned queue = inst->id - range->queue_base;
120 if (range->flags & RANGE_HAS_IRQ) {
121 irq = range->irqs[queue].irq;
122 ret = request_irq(irq, knav_queue_int_handler, 0,
123 inst->irq_name, inst);
127 if (range->irqs[queue].cpu_mask) {
128 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
130 dev_warn(range->kdev->dev,
131 "Failed to set IRQ affinity\n");
139 static void knav_queue_free_irq(struct knav_queue_inst *inst)
141 struct knav_range_info *range = inst->range;
142 unsigned queue = inst->id - inst->range->queue_base;
145 if (range->flags & RANGE_HAS_IRQ) {
146 irq = range->irqs[queue].irq;
147 irq_set_affinity_hint(irq, NULL);
152 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
154 return !list_empty(&inst->handles);
157 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
159 return inst->range->flags & RANGE_RESERVED;
162 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
164 struct knav_queue *tmp;
167 for_each_handle_rcu(tmp, inst) {
168 if (tmp->flags & KNAV_QUEUE_SHARED) {
177 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
180 if ((type == KNAV_QUEUE_QPEND) &&
181 (inst->range->flags & RANGE_HAS_IRQ)) {
183 } else if ((type == KNAV_QUEUE_ACC) &&
184 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
186 } else if ((type == KNAV_QUEUE_GP) &&
187 !(inst->range->flags &
188 (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
194 static inline struct knav_queue_inst *
195 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
197 struct knav_queue_inst *inst;
200 for_each_instance(idx, inst, kdev) {
207 static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
209 if (kdev->base_id <= id &&
210 kdev->base_id + kdev->num_queues > id) {
212 return knav_queue_match_id_to_inst(kdev, id);
217 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
218 const char *name, unsigned flags)
220 struct knav_queue *qh;
224 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
226 return ERR_PTR(-ENOMEM);
228 qh->stats = alloc_percpu(struct knav_queue_stats);
236 id = inst->id - inst->qmgr->start_queue;
237 qh->reg_push = &inst->qmgr->reg_push[id];
238 qh->reg_pop = &inst->qmgr->reg_pop[id];
239 qh->reg_peek = &inst->qmgr->reg_peek[id];
242 if (!knav_queue_is_busy(inst)) {
243 struct knav_range_info *range = inst->range;
245 inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
246 if (range->ops && range->ops->open_queue)
247 ret = range->ops->open_queue(range, inst, flags);
252 list_add_tail_rcu(&qh->list, &inst->handles);
257 free_percpu(qh->stats);
258 devm_kfree(inst->kdev->dev, qh);
262 static struct knav_queue *
263 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
265 struct knav_queue_inst *inst;
266 struct knav_queue *qh;
268 mutex_lock(&knav_dev_lock);
270 qh = ERR_PTR(-ENODEV);
271 inst = knav_queue_find_by_id(id);
275 qh = ERR_PTR(-EEXIST);
276 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
279 qh = ERR_PTR(-EBUSY);
280 if ((flags & KNAV_QUEUE_SHARED) &&
281 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
284 qh = __knav_queue_open(inst, name, flags);
287 mutex_unlock(&knav_dev_lock);
292 static struct knav_queue *knav_queue_open_by_type(const char *name,
293 unsigned type, unsigned flags)
295 struct knav_queue_inst *inst;
296 struct knav_queue *qh = ERR_PTR(-EINVAL);
299 mutex_lock(&knav_dev_lock);
301 for_each_instance(idx, inst, kdev) {
302 if (knav_queue_is_reserved(inst))
304 if (!knav_queue_match_type(inst, type))
306 if (knav_queue_is_busy(inst))
308 qh = __knav_queue_open(inst, name, flags);
313 mutex_unlock(&knav_dev_lock);
317 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
319 struct knav_range_info *range = inst->range;
321 if (range->ops && range->ops->set_notify)
322 range->ops->set_notify(range, inst, enabled);
325 static int knav_queue_enable_notifier(struct knav_queue *qh)
327 struct knav_queue_inst *inst = qh->inst;
330 if (WARN_ON(!qh->notifier_fn))
333 /* Adjust the per handle notifier count */
334 first = (atomic_inc_return(&qh->notifier_enabled) == 1);
336 return 0; /* nothing to do */
338 /* Now adjust the per instance notifier count */
339 first = (atomic_inc_return(&inst->num_notifiers) == 1);
341 knav_queue_set_notify(inst, true);
346 static int knav_queue_disable_notifier(struct knav_queue *qh)
348 struct knav_queue_inst *inst = qh->inst;
351 last = (atomic_dec_return(&qh->notifier_enabled) == 0);
353 return 0; /* nothing to do */
355 last = (atomic_dec_return(&inst->num_notifiers) == 0);
357 knav_queue_set_notify(inst, false);
362 static int knav_queue_set_notifier(struct knav_queue *qh,
363 struct knav_queue_notify_config *cfg)
365 knav_queue_notify_fn old_fn = qh->notifier_fn;
370 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
373 if (!cfg->fn && old_fn)
374 knav_queue_disable_notifier(qh);
376 qh->notifier_fn = cfg->fn;
377 qh->notifier_fn_arg = cfg->fn_arg;
379 if (cfg->fn && !old_fn)
380 knav_queue_enable_notifier(qh);
385 static int knav_gp_set_notify(struct knav_range_info *range,
386 struct knav_queue_inst *inst,
391 if (range->flags & RANGE_HAS_IRQ) {
392 queue = inst->id - range->queue_base;
394 enable_irq(range->irqs[queue].irq);
396 disable_irq_nosync(range->irqs[queue].irq);
401 static int knav_gp_open_queue(struct knav_range_info *range,
402 struct knav_queue_inst *inst, unsigned flags)
404 return knav_queue_setup_irq(range, inst);
407 static int knav_gp_close_queue(struct knav_range_info *range,
408 struct knav_queue_inst *inst)
410 knav_queue_free_irq(inst);
414 static struct knav_range_ops knav_gp_range_ops = {
415 .set_notify = knav_gp_set_notify,
416 .open_queue = knav_gp_open_queue,
417 .close_queue = knav_gp_close_queue,
421 static int knav_queue_get_count(void *qhandle)
423 struct knav_queue *qh = qhandle;
424 struct knav_queue_inst *inst = qh->inst;
426 return readl_relaxed(&qh->reg_peek[0].entry_count) +
427 atomic_read(&inst->desc_count);
430 static void knav_queue_debug_show_instance(struct seq_file *s,
431 struct knav_queue_inst *inst)
433 struct knav_device *kdev = inst->kdev;
434 struct knav_queue *qh;
442 if (!knav_queue_is_busy(inst))
445 seq_printf(s, "\tqueue id %d (%s)\n",
446 kdev->base_id + inst->id, inst->name);
447 for_each_handle_rcu(qh, inst) {
448 for_each_possible_cpu(cpu) {
449 pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
450 pops += per_cpu_ptr(qh->stats, cpu)->pops;
451 push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
452 pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
453 notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
456 seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
460 knav_queue_get_count(qh),
467 static int knav_queue_debug_show(struct seq_file *s, void *v)
469 struct knav_queue_inst *inst;
472 mutex_lock(&knav_dev_lock);
473 seq_printf(s, "%s: %u-%u\n",
474 dev_name(kdev->dev), kdev->base_id,
475 kdev->base_id + kdev->num_queues - 1);
476 for_each_instance(idx, inst, kdev)
477 knav_queue_debug_show_instance(s, inst);
478 mutex_unlock(&knav_dev_lock);
483 DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
485 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
491 end = jiffies + msecs_to_jiffies(timeout);
492 while (time_after(end, jiffies)) {
493 val = readl_relaxed(addr);
500 return val ? -ETIMEDOUT : 0;
504 static int knav_queue_flush(struct knav_queue *qh)
506 struct knav_queue_inst *inst = qh->inst;
507 unsigned id = inst->id - inst->qmgr->start_queue;
509 atomic_set(&inst->desc_count, 0);
510 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
515 * knav_queue_open() - open a hardware queue
516 * @name: - name to give the queue handle
517 * @id: - desired queue number if any or specifes the type
519 * @flags: - the following flags are applicable to queues:
520 * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
521 * exclusive by default.
522 * Subsequent attempts to open a shared queue should
523 * also have this flag.
525 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
526 * to check the returned value for error codes.
528 void *knav_queue_open(const char *name, unsigned id,
531 struct knav_queue *qh = ERR_PTR(-EINVAL);
534 case KNAV_QUEUE_QPEND:
537 qh = knav_queue_open_by_type(name, id, flags);
541 qh = knav_queue_open_by_id(name, id, flags);
546 EXPORT_SYMBOL_GPL(knav_queue_open);
549 * knav_queue_close() - close a hardware queue handle
550 * @qhandle: - handle to close
552 void knav_queue_close(void *qhandle)
554 struct knav_queue *qh = qhandle;
555 struct knav_queue_inst *inst = qh->inst;
557 while (atomic_read(&qh->notifier_enabled) > 0)
558 knav_queue_disable_notifier(qh);
560 mutex_lock(&knav_dev_lock);
561 list_del_rcu(&qh->list);
562 mutex_unlock(&knav_dev_lock);
564 if (!knav_queue_is_busy(inst)) {
565 struct knav_range_info *range = inst->range;
567 if (range->ops && range->ops->close_queue)
568 range->ops->close_queue(range, inst);
570 free_percpu(qh->stats);
571 devm_kfree(inst->kdev->dev, qh);
573 EXPORT_SYMBOL_GPL(knav_queue_close);
576 * knav_queue_device_control() - Perform control operations on a queue
577 * @qhandle: - queue handle
578 * @cmd: - control commands
579 * @arg: - command argument
581 * Returns 0 on success, errno otherwise.
583 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
586 struct knav_queue *qh = qhandle;
587 struct knav_queue_notify_config *cfg;
591 case KNAV_QUEUE_GET_ID:
592 ret = qh->inst->kdev->base_id + qh->inst->id;
595 case KNAV_QUEUE_FLUSH:
596 ret = knav_queue_flush(qh);
599 case KNAV_QUEUE_SET_NOTIFIER:
601 ret = knav_queue_set_notifier(qh, cfg);
604 case KNAV_QUEUE_ENABLE_NOTIFY:
605 ret = knav_queue_enable_notifier(qh);
608 case KNAV_QUEUE_DISABLE_NOTIFY:
609 ret = knav_queue_disable_notifier(qh);
612 case KNAV_QUEUE_GET_COUNT:
613 ret = knav_queue_get_count(qh);
622 EXPORT_SYMBOL_GPL(knav_queue_device_control);
627 * knav_queue_push() - push data (or descriptor) to the tail of a queue
628 * @qhandle: - hardware queue handle
629 * @dma: - DMA data to push
630 * @size: - size of data to push
631 * @flags: - can be used to pass additional information
633 * Returns 0 on success, errno otherwise.
635 int knav_queue_push(void *qhandle, dma_addr_t dma,
636 unsigned size, unsigned flags)
638 struct knav_queue *qh = qhandle;
641 val = (u32)dma | ((size / 16) - 1);
642 writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
644 this_cpu_inc(qh->stats->pushes);
647 EXPORT_SYMBOL_GPL(knav_queue_push);
650 * knav_queue_pop() - pop data (or descriptor) from the head of a queue
651 * @qhandle: - hardware queue handle
652 * @size: - (optional) size of the data pop'ed.
654 * Returns a DMA address on success, 0 on failure.
656 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
658 struct knav_queue *qh = qhandle;
659 struct knav_queue_inst *inst = qh->inst;
663 /* are we accumulated? */
665 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
666 atomic_inc(&inst->desc_count);
669 idx = atomic_inc_return(&inst->desc_head);
670 idx &= ACC_DESCS_MASK;
671 val = inst->descs[idx];
673 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
678 dma = val & DESC_PTR_MASK;
680 *size = ((val & DESC_SIZE_MASK) + 1) * 16;
682 this_cpu_inc(qh->stats->pops);
685 EXPORT_SYMBOL_GPL(knav_queue_pop);
687 /* carve out descriptors and push into queue */
688 static void kdesc_fill_pool(struct knav_pool *pool)
690 struct knav_region *region;
693 region = pool->region;
694 pool->desc_size = region->desc_size;
695 for (i = 0; i < pool->num_desc; i++) {
696 int index = pool->region_offset + i;
699 dma_addr = region->dma_start + (region->desc_size * index);
700 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
701 dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
703 knav_queue_push(pool->queue, dma_addr, dma_size, 0);
707 /* pop out descriptors and close the queue */
708 static void kdesc_empty_pool(struct knav_pool *pool)
719 dma = knav_queue_pop(pool->queue, &size);
722 desc = knav_pool_desc_dma_to_virt(pool, dma);
724 dev_dbg(pool->kdev->dev,
725 "couldn't unmap desc, continuing\n");
729 WARN_ON(i != pool->num_desc);
730 knav_queue_close(pool->queue);
734 /* Get the DMA address of a descriptor */
735 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
737 struct knav_pool *pool = ph;
738 return pool->region->dma_start + (virt - pool->region->virt_start);
740 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
742 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
744 struct knav_pool *pool = ph;
745 return pool->region->virt_start + (dma - pool->region->dma_start);
747 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
750 * knav_pool_create() - Create a pool of descriptors
751 * @name: - name to give the pool handle
752 * @num_desc: - numbers of descriptors in the pool
753 * @region_id: - QMSS region id from which the descriptors are to be
756 * Returns a pool handle on success.
757 * Use IS_ERR_OR_NULL() to identify error values on return.
759 void *knav_pool_create(const char *name,
760 int num_desc, int region_id)
762 struct knav_region *reg_itr, *region = NULL;
763 struct knav_pool *pool, *pi = NULL, *iter;
764 struct list_head *node;
765 unsigned last_offset;
769 return ERR_PTR(-EPROBE_DEFER);
772 return ERR_PTR(-ENODEV);
774 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
776 dev_err(kdev->dev, "out of memory allocating pool\n");
777 return ERR_PTR(-ENOMEM);
780 for_each_region(kdev, reg_itr) {
781 if (reg_itr->id != region_id)
788 dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
793 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
794 if (IS_ERR(pool->queue)) {
796 "failed to open queue for pool(%s), error %ld\n",
797 name, PTR_ERR(pool->queue));
798 ret = PTR_ERR(pool->queue);
802 pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
804 pool->dev = kdev->dev;
806 mutex_lock(&knav_dev_lock);
808 if (num_desc > (region->num_desc - region->used_desc)) {
809 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
815 /* Region maintains a sorted (by region offset) list of pools
816 * use the first free slot which is large enough to accomodate
820 node = ®ion->pools;
821 list_for_each_entry(iter, ®ion->pools, region_inst) {
822 if ((iter->region_offset - last_offset) >= num_desc) {
826 last_offset = iter->region_offset + iter->num_desc;
830 node = &pi->region_inst;
831 pool->region = region;
832 pool->num_desc = num_desc;
833 pool->region_offset = last_offset;
834 region->used_desc += num_desc;
835 list_add_tail(&pool->list, &kdev->pools);
836 list_add_tail(&pool->region_inst, node);
838 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
844 mutex_unlock(&knav_dev_lock);
845 kdesc_fill_pool(pool);
849 mutex_unlock(&knav_dev_lock);
852 devm_kfree(kdev->dev, pool);
855 EXPORT_SYMBOL_GPL(knav_pool_create);
858 * knav_pool_destroy() - Free a pool of descriptors
861 void knav_pool_destroy(void *ph)
863 struct knav_pool *pool = ph;
871 kdesc_empty_pool(pool);
872 mutex_lock(&knav_dev_lock);
874 pool->region->used_desc -= pool->num_desc;
875 list_del(&pool->region_inst);
876 list_del(&pool->list);
878 mutex_unlock(&knav_dev_lock);
880 devm_kfree(kdev->dev, pool);
882 EXPORT_SYMBOL_GPL(knav_pool_destroy);
886 * knav_pool_desc_get() - Get a descriptor from the pool
889 * Returns descriptor from the pool.
891 void *knav_pool_desc_get(void *ph)
893 struct knav_pool *pool = ph;
898 dma = knav_queue_pop(pool->queue, &size);
900 return ERR_PTR(-ENOMEM);
901 data = knav_pool_desc_dma_to_virt(pool, dma);
904 EXPORT_SYMBOL_GPL(knav_pool_desc_get);
907 * knav_pool_desc_put() - return a descriptor to the pool
909 * @desc: - virtual address
911 void knav_pool_desc_put(void *ph, void *desc)
913 struct knav_pool *pool = ph;
915 dma = knav_pool_desc_virt_to_dma(pool, desc);
916 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
918 EXPORT_SYMBOL_GPL(knav_pool_desc_put);
921 * knav_pool_desc_map() - Map descriptor for DMA transfer
923 * @desc: - address of descriptor to map
924 * @size: - size of descriptor to map
925 * @dma: - DMA address return pointer
926 * @dma_sz: - adjusted return pointer
928 * Returns 0 on success, errno otherwise.
930 int knav_pool_desc_map(void *ph, void *desc, unsigned size,
931 dma_addr_t *dma, unsigned *dma_sz)
933 struct knav_pool *pool = ph;
934 *dma = knav_pool_desc_virt_to_dma(pool, desc);
935 size = min(size, pool->region->desc_size);
936 size = ALIGN(size, SMP_CACHE_BYTES);
938 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
940 /* Ensure the descriptor reaches to the memory */
945 EXPORT_SYMBOL_GPL(knav_pool_desc_map);
948 * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
950 * @dma: - DMA address of descriptor to unmap
951 * @dma_sz: - size of descriptor to unmap
953 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
954 * error values on return.
956 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
958 struct knav_pool *pool = ph;
962 desc_sz = min(dma_sz, pool->region->desc_size);
963 desc = knav_pool_desc_dma_to_virt(pool, dma);
964 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
968 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
971 * knav_pool_count() - Get the number of descriptors in pool.
973 * Returns number of elements in the pool.
975 int knav_pool_count(void *ph)
977 struct knav_pool *pool = ph;
978 return knav_queue_get_count(pool->queue);
980 EXPORT_SYMBOL_GPL(knav_pool_count);
982 static void knav_queue_setup_region(struct knav_device *kdev,
983 struct knav_region *region)
985 unsigned hw_num_desc, hw_desc_size, size;
986 struct knav_reg_region __iomem *regs;
987 struct knav_qmgr_info *qmgr;
988 struct knav_pool *pool;
993 if (!region->num_desc) {
994 dev_warn(kdev->dev, "unused region %s\n", region->name);
998 /* get hardware descriptor value */
999 hw_num_desc = ilog2(region->num_desc - 1) + 1;
1001 /* did we force fit ourselves into nothingness? */
1002 if (region->num_desc < 32) {
1003 region->num_desc = 0;
1004 dev_warn(kdev->dev, "too few descriptors in region %s\n",
1009 size = region->num_desc * region->desc_size;
1010 region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1012 if (!region->virt_start) {
1013 region->num_desc = 0;
1014 dev_err(kdev->dev, "memory alloc failed for region %s\n",
1018 region->virt_end = region->virt_start + size;
1019 page = virt_to_page(region->virt_start);
1021 region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1023 if (dma_mapping_error(kdev->dev, region->dma_start)) {
1024 dev_err(kdev->dev, "dma map failed for region %s\n",
1028 region->dma_end = region->dma_start + size;
1030 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1032 dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1036 pool->region_offset = region->num_desc;
1037 list_add(&pool->region_inst, ®ion->pools);
1040 "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1041 region->name, id, region->desc_size, region->num_desc,
1042 region->link_index, ®ion->dma_start, ®ion->dma_end,
1043 region->virt_start, region->virt_end);
1045 hw_desc_size = (region->desc_size / 16) - 1;
1048 for_each_qmgr(kdev, qmgr) {
1049 regs = qmgr->reg_region + id;
1050 writel_relaxed((u32)region->dma_start, ®s->base);
1051 writel_relaxed(region->link_index, ®s->start_index);
1052 writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1058 if (region->dma_start)
1059 dma_unmap_page(kdev->dev, region->dma_start, size,
1061 if (region->virt_start)
1062 free_pages_exact(region->virt_start, size);
1063 region->num_desc = 0;
1067 static const char *knav_queue_find_name(struct device_node *node)
1071 if (of_property_read_string(node, "label", &name) < 0)
1078 static int knav_queue_setup_regions(struct knav_device *kdev,
1079 struct device_node *regions)
1081 struct device *dev = kdev->dev;
1082 struct knav_region *region;
1083 struct device_node *child;
1087 for_each_child_of_node(regions, child) {
1088 region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1091 dev_err(dev, "out of memory allocating region\n");
1095 region->name = knav_queue_find_name(child);
1096 of_property_read_u32(child, "id", ®ion->id);
1097 ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1099 region->num_desc = temp[0];
1100 region->desc_size = temp[1];
1102 dev_err(dev, "invalid region info %s\n", region->name);
1103 devm_kfree(dev, region);
1107 if (!of_get_property(child, "link-index", NULL)) {
1108 dev_err(dev, "No link info for %s\n", region->name);
1109 devm_kfree(dev, region);
1112 ret = of_property_read_u32(child, "link-index",
1113 ®ion->link_index);
1115 dev_err(dev, "link index not found for %s\n",
1117 devm_kfree(dev, region);
1121 INIT_LIST_HEAD(®ion->pools);
1122 list_add_tail(®ion->list, &kdev->regions);
1124 if (list_empty(&kdev->regions)) {
1125 dev_err(dev, "no valid region information found\n");
1129 /* Next, we run through the regions and set things up */
1130 for_each_region(kdev, region)
1131 knav_queue_setup_region(kdev, region);
1136 static int knav_get_link_ram(struct knav_device *kdev,
1138 struct knav_link_ram_block *block)
1140 struct platform_device *pdev = to_platform_device(kdev->dev);
1141 struct device_node *node = pdev->dev.of_node;
1145 * Note: link ram resources are specified in "entry" sized units. In
1146 * reality, although entries are ~40bits in hardware, we treat them as
1147 * 64-bit entities here.
1149 * For example, to specify the internal link ram for Keystone-I class
1150 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1152 * This gets a bit weird when other link rams are used. For example,
1153 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1154 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1155 * which accounts for 64-bits per entry, for 16K entries.
1157 if (!of_property_read_u32_array(node, name , temp, 2)) {
1160 * queue_base specified => using internal or onchip
1161 * link ram WARNING - we do not "reserve" this block
1163 block->dma = (dma_addr_t)temp[0];
1165 block->size = temp[1];
1167 block->size = temp[1];
1168 /* queue_base not specific => allocate requested size */
1169 block->virt = dmam_alloc_coherent(kdev->dev,
1170 8 * block->size, &block->dma,
1173 dev_err(kdev->dev, "failed to alloc linkram\n");
1183 static int knav_queue_setup_link_ram(struct knav_device *kdev)
1185 struct knav_link_ram_block *block;
1186 struct knav_qmgr_info *qmgr;
1188 for_each_qmgr(kdev, qmgr) {
1189 block = &kdev->link_rams[0];
1190 dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1191 &block->dma, block->virt, block->size);
1192 writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1193 if (kdev->version == QMSS_66AK2G)
1194 writel_relaxed(block->size,
1195 &qmgr->reg_config->link_ram_size0);
1197 writel_relaxed(block->size - 1,
1198 &qmgr->reg_config->link_ram_size0);
1203 dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1204 &block->dma, block->virt, block->size);
1205 writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1211 static int knav_setup_queue_range(struct knav_device *kdev,
1212 struct device_node *node)
1214 struct device *dev = kdev->dev;
1215 struct knav_range_info *range;
1216 struct knav_qmgr_info *qmgr;
1217 u32 temp[2], start, end, id, index;
1220 range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1222 dev_err(dev, "out of memory allocating range\n");
1227 range->name = knav_queue_find_name(node);
1228 ret = of_property_read_u32_array(node, "qrange", temp, 2);
1230 range->queue_base = temp[0] - kdev->base_id;
1231 range->num_queues = temp[1];
1233 dev_err(dev, "invalid queue range %s\n", range->name);
1234 devm_kfree(dev, range);
1238 for (i = 0; i < RANGE_MAX_IRQS; i++) {
1239 struct of_phandle_args oirq;
1241 if (of_irq_parse_one(node, i, &oirq))
1244 range->irqs[i].irq = irq_create_of_mapping(&oirq);
1245 if (range->irqs[i].irq == IRQ_NONE)
1250 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1254 range->irqs[i].cpu_mask = devm_kzalloc(dev,
1255 cpumask_size(), GFP_KERNEL);
1256 if (!range->irqs[i].cpu_mask)
1259 mask = (oirq.args[2] & 0x0000ff00) >> 8;
1260 for_each_set_bit(bit, &mask, BITS_PER_LONG)
1261 cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1265 range->num_irqs = min(range->num_irqs, range->num_queues);
1266 if (range->num_irqs)
1267 range->flags |= RANGE_HAS_IRQ;
1269 if (of_property_read_bool(node, "qalloc-by-id"))
1270 range->flags |= RANGE_RESERVED;
1272 if (of_property_present(node, "accumulator")) {
1273 ret = knav_init_acc_range(kdev, node, range);
1275 devm_kfree(dev, range);
1279 range->ops = &knav_gp_range_ops;
1282 /* set threshold to 1, and flush out the queues */
1283 for_each_qmgr(kdev, qmgr) {
1284 start = max(qmgr->start_queue, range->queue_base);
1285 end = min(qmgr->start_queue + qmgr->num_queues,
1286 range->queue_base + range->num_queues);
1287 for (id = start; id < end; id++) {
1288 index = id - qmgr->start_queue;
1289 writel_relaxed(THRESH_GTE | 1,
1290 &qmgr->reg_peek[index].ptr_size_thresh);
1292 &qmgr->reg_push[index].ptr_size_thresh);
1296 list_add_tail(&range->list, &kdev->queue_ranges);
1297 dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1298 range->name, range->queue_base,
1299 range->queue_base + range->num_queues - 1,
1301 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1302 (range->flags & RANGE_RESERVED) ? ", reserved" : "",
1303 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1304 kdev->num_queues_in_use += range->num_queues;
1308 static int knav_setup_queue_pools(struct knav_device *kdev,
1309 struct device_node *queue_pools)
1311 struct device_node *type, *range;
1313 for_each_child_of_node(queue_pools, type) {
1314 for_each_child_of_node(type, range) {
1315 /* return value ignored, we init the rest... */
1316 knav_setup_queue_range(kdev, range);
1320 /* ... and barf if they all failed! */
1321 if (list_empty(&kdev->queue_ranges)) {
1322 dev_err(kdev->dev, "no valid queue range found\n");
1328 static void knav_free_queue_range(struct knav_device *kdev,
1329 struct knav_range_info *range)
1331 if (range->ops && range->ops->free_range)
1332 range->ops->free_range(range);
1333 list_del(&range->list);
1334 devm_kfree(kdev->dev, range);
1337 static void knav_free_queue_ranges(struct knav_device *kdev)
1339 struct knav_range_info *range;
1342 range = first_queue_range(kdev);
1345 knav_free_queue_range(kdev, range);
1349 static void knav_queue_free_regions(struct knav_device *kdev)
1351 struct knav_region *region;
1352 struct knav_pool *pool, *tmp;
1356 region = first_region(kdev);
1359 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst)
1360 knav_pool_destroy(pool);
1362 size = region->virt_end - region->virt_start;
1364 free_pages_exact(region->virt_start, size);
1365 list_del(®ion->list);
1366 devm_kfree(kdev->dev, region);
1370 static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1371 struct device_node *node, int index)
1373 struct resource res;
1377 ret = of_address_to_resource(node, index, &res);
1379 dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1381 return ERR_PTR(ret);
1384 regs = devm_ioremap_resource(kdev->dev, &res);
1386 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1391 static int knav_queue_init_qmgrs(struct knav_device *kdev,
1392 struct device_node *qmgrs)
1394 struct device *dev = kdev->dev;
1395 struct knav_qmgr_info *qmgr;
1396 struct device_node *child;
1400 for_each_child_of_node(qmgrs, child) {
1401 qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1404 dev_err(dev, "out of memory allocating qmgr\n");
1408 ret = of_property_read_u32_array(child, "managed-queues",
1411 qmgr->start_queue = temp[0];
1412 qmgr->num_queues = temp[1];
1414 dev_err(dev, "invalid qmgr queue range\n");
1415 devm_kfree(dev, qmgr);
1419 dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1420 qmgr->start_queue, qmgr->num_queues);
1423 knav_queue_map_reg(kdev, child,
1424 KNAV_QUEUE_PEEK_REG_INDEX);
1426 if (kdev->version == QMSS) {
1428 knav_queue_map_reg(kdev, child,
1429 KNAV_QUEUE_STATUS_REG_INDEX);
1433 knav_queue_map_reg(kdev, child,
1434 (kdev->version == QMSS_66AK2G) ?
1435 KNAV_L_QUEUE_CONFIG_REG_INDEX :
1436 KNAV_QUEUE_CONFIG_REG_INDEX);
1438 knav_queue_map_reg(kdev, child,
1439 (kdev->version == QMSS_66AK2G) ?
1440 KNAV_L_QUEUE_REGION_REG_INDEX :
1441 KNAV_QUEUE_REGION_REG_INDEX);
1444 knav_queue_map_reg(kdev, child,
1445 (kdev->version == QMSS_66AK2G) ?
1446 KNAV_L_QUEUE_PUSH_REG_INDEX :
1447 KNAV_QUEUE_PUSH_REG_INDEX);
1449 if (kdev->version == QMSS) {
1451 knav_queue_map_reg(kdev, child,
1452 KNAV_QUEUE_POP_REG_INDEX);
1455 if (IS_ERR(qmgr->reg_peek) ||
1456 ((kdev->version == QMSS) &&
1457 (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1458 IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1459 IS_ERR(qmgr->reg_push)) {
1460 dev_err(dev, "failed to map qmgr regs\n");
1461 if (kdev->version == QMSS) {
1462 if (!IS_ERR(qmgr->reg_status))
1463 devm_iounmap(dev, qmgr->reg_status);
1464 if (!IS_ERR(qmgr->reg_pop))
1465 devm_iounmap(dev, qmgr->reg_pop);
1467 if (!IS_ERR(qmgr->reg_peek))
1468 devm_iounmap(dev, qmgr->reg_peek);
1469 if (!IS_ERR(qmgr->reg_config))
1470 devm_iounmap(dev, qmgr->reg_config);
1471 if (!IS_ERR(qmgr->reg_region))
1472 devm_iounmap(dev, qmgr->reg_region);
1473 if (!IS_ERR(qmgr->reg_push))
1474 devm_iounmap(dev, qmgr->reg_push);
1475 devm_kfree(dev, qmgr);
1479 /* Use same push register for pop as well */
1480 if (kdev->version == QMSS_66AK2G)
1481 qmgr->reg_pop = qmgr->reg_push;
1483 list_add_tail(&qmgr->list, &kdev->qmgrs);
1484 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1485 qmgr->start_queue, qmgr->num_queues,
1486 qmgr->reg_peek, qmgr->reg_status,
1487 qmgr->reg_config, qmgr->reg_region,
1488 qmgr->reg_push, qmgr->reg_pop);
1493 static int knav_queue_init_pdsps(struct knav_device *kdev,
1494 struct device_node *pdsps)
1496 struct device *dev = kdev->dev;
1497 struct knav_pdsp_info *pdsp;
1498 struct device_node *child;
1500 for_each_child_of_node(pdsps, child) {
1501 pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1504 dev_err(dev, "out of memory allocating pdsp\n");
1507 pdsp->name = knav_queue_find_name(child);
1509 knav_queue_map_reg(kdev, child,
1510 KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1512 knav_queue_map_reg(kdev, child,
1513 KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1515 knav_queue_map_reg(kdev, child,
1516 KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1518 knav_queue_map_reg(kdev, child,
1519 KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1521 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1522 IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1523 dev_err(dev, "failed to map pdsp %s regs\n",
1525 if (!IS_ERR(pdsp->command))
1526 devm_iounmap(dev, pdsp->command);
1527 if (!IS_ERR(pdsp->iram))
1528 devm_iounmap(dev, pdsp->iram);
1529 if (!IS_ERR(pdsp->regs))
1530 devm_iounmap(dev, pdsp->regs);
1531 if (!IS_ERR(pdsp->intd))
1532 devm_iounmap(dev, pdsp->intd);
1533 devm_kfree(dev, pdsp);
1536 of_property_read_u32(child, "id", &pdsp->id);
1537 list_add_tail(&pdsp->list, &kdev->pdsps);
1538 dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1539 pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1545 static int knav_queue_stop_pdsp(struct knav_device *kdev,
1546 struct knav_pdsp_info *pdsp)
1548 u32 val, timeout = 1000;
1551 val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1552 writel_relaxed(val, &pdsp->regs->control);
1553 ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1556 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1559 pdsp->loaded = false;
1560 pdsp->started = false;
1564 static int knav_queue_load_pdsp(struct knav_device *kdev,
1565 struct knav_pdsp_info *pdsp)
1568 const struct firmware *fw;
1572 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1573 if (knav_acc_firmwares[i]) {
1574 ret = reject_firmware_direct(&fw,
1575 knav_acc_firmwares[i],
1585 dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1589 dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1590 knav_acc_firmwares[i]);
1592 writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1593 /* download the firmware */
1594 fwdata = (u32 *)fw->data;
1595 fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1596 for (i = 0; i < fwlen; i++)
1597 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1599 release_firmware(fw);
1603 static int knav_queue_start_pdsp(struct knav_device *kdev,
1604 struct knav_pdsp_info *pdsp)
1606 u32 val, timeout = 1000;
1609 /* write a command for sync */
1610 writel_relaxed(0xffffffff, pdsp->command);
1611 while (readl_relaxed(pdsp->command) != 0xffffffff)
1614 /* soft reset the PDSP */
1615 val = readl_relaxed(&pdsp->regs->control);
1616 val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1617 writel_relaxed(val, &pdsp->regs->control);
1620 val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1621 writel_relaxed(val, &pdsp->regs->control);
1623 /* wait for command register to clear */
1624 ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1627 "timed out on pdsp %s command register wait\n",
1634 static void knav_queue_stop_pdsps(struct knav_device *kdev)
1636 struct knav_pdsp_info *pdsp;
1638 /* disable all pdsps */
1639 for_each_pdsp(kdev, pdsp)
1640 knav_queue_stop_pdsp(kdev, pdsp);
1643 static int knav_queue_start_pdsps(struct knav_device *kdev)
1645 struct knav_pdsp_info *pdsp;
1648 knav_queue_stop_pdsps(kdev);
1649 /* now load them all. We return success even if pdsp
1650 * is not loaded as acc channels are optional on having
1651 * firmware availability in the system. We set the loaded
1652 * and stated flag and when initialize the acc range, check
1653 * it and init the range only if pdsp is started.
1655 for_each_pdsp(kdev, pdsp) {
1656 ret = knav_queue_load_pdsp(kdev, pdsp);
1658 pdsp->loaded = true;
1661 for_each_pdsp(kdev, pdsp) {
1663 ret = knav_queue_start_pdsp(kdev, pdsp);
1665 pdsp->started = true;
1671 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1673 struct knav_qmgr_info *qmgr;
1675 for_each_qmgr(kdev, qmgr) {
1676 if ((id >= qmgr->start_queue) &&
1677 (id < qmgr->start_queue + qmgr->num_queues))
1683 static int knav_queue_init_queue(struct knav_device *kdev,
1684 struct knav_range_info *range,
1685 struct knav_queue_inst *inst,
1688 char irq_name[KNAV_NAME_SIZE];
1689 inst->qmgr = knav_find_qmgr(id);
1693 INIT_LIST_HEAD(&inst->handles);
1695 inst->range = range;
1698 scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1699 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1701 if (range->ops && range->ops->init_queue)
1702 return range->ops->init_queue(range, inst);
1707 static int knav_queue_init_queues(struct knav_device *kdev)
1709 struct knav_range_info *range;
1710 int size, id, base_idx;
1711 int idx = 0, ret = 0;
1713 /* how much do we need for instance data? */
1714 size = sizeof(struct knav_queue_inst);
1716 /* round this up to a power of 2, keep the index to instance
1719 kdev->inst_shift = order_base_2(size);
1720 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1721 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1722 if (!kdev->instances)
1725 for_each_queue_range(kdev, range) {
1726 if (range->ops && range->ops->init_range)
1727 range->ops->init_range(range);
1729 for (id = range->queue_base;
1730 id < range->queue_base + range->num_queues; id++, idx++) {
1731 ret = knav_queue_init_queue(kdev, range,
1732 knav_queue_idx_to_inst(kdev, idx), id);
1736 range->queue_base_inst =
1737 knav_queue_idx_to_inst(kdev, base_idx);
1742 /* Match table for of_platform binding */
1743 static const struct of_device_id keystone_qmss_of_match[] = {
1745 .compatible = "ti,keystone-navigator-qmss",
1748 .compatible = "ti,66ak2g-navss-qm",
1749 .data = (void *)QMSS_66AK2G,
1753 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1755 static int knav_queue_probe(struct platform_device *pdev)
1757 struct device_node *node = pdev->dev.of_node;
1758 struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1759 struct device *dev = &pdev->dev;
1764 dev_err(dev, "device tree info unavailable\n");
1768 kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1770 dev_err(dev, "memory allocation failed\n");
1774 if (device_get_match_data(dev))
1775 kdev->version = QMSS_66AK2G;
1777 platform_set_drvdata(pdev, kdev);
1779 INIT_LIST_HEAD(&kdev->queue_ranges);
1780 INIT_LIST_HEAD(&kdev->qmgrs);
1781 INIT_LIST_HEAD(&kdev->pools);
1782 INIT_LIST_HEAD(&kdev->regions);
1783 INIT_LIST_HEAD(&kdev->pdsps);
1785 pm_runtime_enable(&pdev->dev);
1786 ret = pm_runtime_resume_and_get(&pdev->dev);
1788 pm_runtime_disable(&pdev->dev);
1789 dev_err(dev, "Failed to enable QMSS\n");
1793 if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1794 dev_err(dev, "queue-range not specified\n");
1798 kdev->base_id = temp[0];
1799 kdev->num_queues = temp[1];
1801 /* Initialize queue managers using device tree configuration */
1802 qmgrs = of_get_child_by_name(node, "qmgrs");
1804 dev_err(dev, "queue manager info not specified\n");
1808 ret = knav_queue_init_qmgrs(kdev, qmgrs);
1813 /* get pdsp configuration values from device tree */
1814 pdsps = of_get_child_by_name(node, "pdsps");
1816 ret = knav_queue_init_pdsps(kdev, pdsps);
1820 ret = knav_queue_start_pdsps(kdev);
1826 /* get usable queue range values from device tree */
1827 queue_pools = of_get_child_by_name(node, "queue-pools");
1829 dev_err(dev, "queue-pools not specified\n");
1833 ret = knav_setup_queue_pools(kdev, queue_pools);
1834 of_node_put(queue_pools);
1838 ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1840 dev_err(kdev->dev, "could not setup linking ram\n");
1844 ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1847 * nothing really, we have one linking ram already, so we just
1848 * live within our means
1852 ret = knav_queue_setup_link_ram(kdev);
1856 regions = of_get_child_by_name(node, "descriptor-regions");
1858 dev_err(dev, "descriptor-regions not specified\n");
1862 ret = knav_queue_setup_regions(kdev, regions);
1863 of_node_put(regions);
1867 ret = knav_queue_init_queues(kdev);
1869 dev_err(dev, "hwqueue initialization failed\n");
1873 debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1874 &knav_queue_debug_fops);
1875 device_ready = true;
1879 knav_queue_stop_pdsps(kdev);
1880 knav_queue_free_regions(kdev);
1881 knav_free_queue_ranges(kdev);
1882 pm_runtime_put_sync(&pdev->dev);
1883 pm_runtime_disable(&pdev->dev);
1887 static void knav_queue_remove(struct platform_device *pdev)
1889 /* TODO: Free resources */
1890 pm_runtime_put_sync(&pdev->dev);
1891 pm_runtime_disable(&pdev->dev);
1894 static struct platform_driver keystone_qmss_driver = {
1895 .probe = knav_queue_probe,
1896 .remove_new = knav_queue_remove,
1898 .name = "keystone-navigator-qmss",
1899 .of_match_table = keystone_qmss_of_match,
1902 module_platform_driver(keystone_qmss_driver);
1904 MODULE_LICENSE("GPL v2");
1905 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1906 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1907 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");