2 * Intel SST Firmware Loader
4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/firmware.h>
21 #include <linux/export.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/pci.h>
26 #include <linux/acpi.h>
28 /* supported DMA engine drivers */
29 #include <linux/dma/dw.h>
32 #include <asm/pgtable.h>
35 #include "sst-dsp-priv.h"
37 #define SST_DMA_RESOURCES 2
38 #define SST_DSP_DMA_MAX_BURST 0x3
39 #define SST_HSW_BLOCK_ANY 0xffffffff
41 #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
46 struct dw_dma_chip *chip;
48 struct dma_async_tx_descriptor *desc;
52 static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
54 /* __iowrite32_copy use 32bit size values so divide by 4 */
55 __iowrite32_copy((void *)dest, src, bytes/4);
58 static void sst_dma_transfer_complete(void *arg)
60 struct sst_dsp *sst = (struct sst_dsp *)arg;
62 dev_dbg(sst->dev, "DMA: callback\n");
65 static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
66 dma_addr_t src_addr, size_t size)
68 struct dma_async_tx_descriptor *desc;
69 struct sst_dma *dma = sst->dma;
71 if (dma->ch == NULL) {
72 dev_err(sst->dev, "error: no DMA channel\n");
76 dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
77 (unsigned long)src_addr, (unsigned long)dest_addr, size);
79 desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
80 src_addr, size, DMA_CTRL_ACK);
82 dev_err(sst->dev, "error: dma prep memcpy failed\n");
86 desc->callback = sst_dma_transfer_complete;
87 desc->callback_param = sst;
89 desc->tx_submit(desc);
90 dma_wait_for_async_tx(desc);
96 int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
97 dma_addr_t src_addr, size_t size)
99 return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
102 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
105 int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
106 dma_addr_t src_addr, size_t size)
108 return sst_dsp_dma_copy(sst, dest_addr,
109 src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
111 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
113 /* remove module from memory - callers hold locks */
114 static void block_list_remove(struct sst_dsp *dsp,
115 struct list_head *block_list)
117 struct sst_mem_block *block, *tmp;
120 /* disable each block */
121 list_for_each_entry(block, block_list, module_list) {
123 if (block->ops && block->ops->disable) {
124 err = block->ops->disable(block);
127 "error: cant disable block %d:%d\n",
128 block->type, block->index);
132 /* mark each block as free */
133 list_for_each_entry_safe(block, tmp, block_list, module_list) {
134 list_del(&block->module_list);
135 list_move(&block->list, &dsp->free_block_list);
136 dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
137 block->type, block->index, block->offset);
141 /* prepare the memory block to receive data from host - callers hold locks */
142 static int block_list_prepare(struct sst_dsp *dsp,
143 struct list_head *block_list)
145 struct sst_mem_block *block;
148 /* enable each block so that's it'e ready for data */
149 list_for_each_entry(block, block_list, module_list) {
151 if (block->ops && block->ops->enable && !block->users) {
152 ret = block->ops->enable(block);
155 "error: cant disable block %d:%d\n",
156 block->type, block->index);
164 list_for_each_entry(block, block_list, module_list) {
165 if (block->ops && block->ops->disable)
166 block->ops->disable(block);
171 static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
174 struct dw_dma_chip *chip;
177 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
179 return ERR_PTR(-ENOMEM);
182 chip->regs = devm_ioremap_resource(dev, mem);
183 if (IS_ERR(chip->regs))
184 return ERR_CAST(chip->regs);
186 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
192 err = dw_dma_probe(chip, NULL);
199 static void dw_remove(struct dw_dma_chip *chip)
204 static bool dma_chan_filter(struct dma_chan *chan, void *param)
206 struct sst_dsp *dsp = (struct sst_dsp *)param;
208 return chan->device->dev == dsp->dma_dev;
211 int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
213 struct sst_dma *dma = dsp->dma;
214 struct dma_slave_config slave;
219 dma_cap_set(DMA_SLAVE, mask);
220 dma_cap_set(DMA_MEMCPY, mask);
222 dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
223 if (dma->ch == NULL) {
224 dev_err(dsp->dev, "error: DMA request channel failed\n");
228 memset(&slave, 0, sizeof(slave));
229 slave.direction = DMA_MEM_TO_DEV;
230 slave.src_addr_width =
231 slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
232 slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
234 ret = dmaengine_slave_config(dma->ch, &slave);
236 dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
238 dma_release_channel(dma->ch);
244 EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
246 void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
248 struct sst_dma *dma = dsp->dma;
253 dma_release_channel(dma->ch);
256 EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
258 int sst_dma_new(struct sst_dsp *sst)
260 struct sst_pdata *sst_pdata = sst->pdata;
265 if (sst->pdata->resindex_dma_base == -1)
266 /* DMA is not used, return and squelsh error messages */
269 /* configure the correct platform data for whatever DMA engine
270 * is attached to the ADSP IP. */
271 switch (sst->pdata->dma_engine) {
272 case SST_DMA_TYPE_DW:
275 dev_err(sst->dev, "error: invalid DMA engine %d\n",
276 sst->pdata->dma_engine);
280 dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
286 memset(&mem, 0, sizeof(mem));
288 mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
289 mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
290 mem.flags = IORESOURCE_MEM;
292 /* now register DMA engine device */
293 dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
294 if (IS_ERR(dma->chip)) {
295 dev_err(sst->dev, "error: DMA device register failed\n");
296 ret = PTR_ERR(dma->chip);
301 sst->fw_use_dma = true;
305 devm_kfree(sst->dev, dma);
308 EXPORT_SYMBOL(sst_dma_new);
310 void sst_dma_free(struct sst_dma *dma)
317 dma_release_channel(dma->ch);
320 dw_remove(dma->chip);
323 EXPORT_SYMBOL(sst_dma_free);
325 /* create new generic firmware object */
326 struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
327 const struct firmware *fw, void *private)
329 struct sst_fw *sst_fw;
332 if (!dsp->ops->parse_fw)
335 sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
340 sst_fw->private = private;
341 sst_fw->size = fw->size;
343 /* allocate DMA buffer to store FW data */
344 sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
345 &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
346 if (!sst_fw->dma_buf) {
347 dev_err(dsp->dev, "error: DMA alloc failed\n");
352 /* copy FW data to DMA-able memory */
353 memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
355 if (dsp->fw_use_dma) {
356 err = sst_dsp_dma_get_channel(dsp, 0);
361 /* call core specific FW paser to load FW data into DSP */
362 err = dsp->ops->parse_fw(sst_fw);
364 dev_err(dsp->dev, "error: parse fw failed %d\n", err);
369 sst_dsp_dma_put_channel(dsp);
371 mutex_lock(&dsp->mutex);
372 list_add(&sst_fw->list, &dsp->fw_list);
373 mutex_unlock(&dsp->mutex);
379 sst_dsp_dma_put_channel(dsp);
381 dma_free_coherent(dsp->dma_dev, sst_fw->size,
383 sst_fw->dmable_fw_paddr);
384 sst_fw->dma_buf = NULL;
388 EXPORT_SYMBOL_GPL(sst_fw_new);
390 int sst_fw_reload(struct sst_fw *sst_fw)
392 struct sst_dsp *dsp = sst_fw->dsp;
395 dev_dbg(dsp->dev, "reloading firmware\n");
397 /* call core specific FW paser to load FW data into DSP */
398 ret = dsp->ops->parse_fw(sst_fw);
400 dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
404 EXPORT_SYMBOL_GPL(sst_fw_reload);
406 void sst_fw_unload(struct sst_fw *sst_fw)
408 struct sst_dsp *dsp = sst_fw->dsp;
409 struct sst_module *module, *mtmp;
410 struct sst_module_runtime *runtime, *rtmp;
412 dev_dbg(dsp->dev, "unloading firmware\n");
414 mutex_lock(&dsp->mutex);
416 /* check module by module */
417 list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
418 if (module->sst_fw == sst_fw) {
420 /* remove runtime modules */
421 list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
423 block_list_remove(dsp, &runtime->block_list);
424 list_del(&runtime->list);
428 /* now remove the module */
429 block_list_remove(dsp, &module->block_list);
430 list_del(&module->list);
435 /* remove all scratch blocks */
436 block_list_remove(dsp, &dsp->scratch_block_list);
438 mutex_unlock(&dsp->mutex);
440 EXPORT_SYMBOL_GPL(sst_fw_unload);
442 /* free single firmware object */
443 void sst_fw_free(struct sst_fw *sst_fw)
445 struct sst_dsp *dsp = sst_fw->dsp;
447 mutex_lock(&dsp->mutex);
448 list_del(&sst_fw->list);
449 mutex_unlock(&dsp->mutex);
452 dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
453 sst_fw->dmable_fw_paddr);
456 EXPORT_SYMBOL_GPL(sst_fw_free);
458 /* free all firmware objects */
459 void sst_fw_free_all(struct sst_dsp *dsp)
461 struct sst_fw *sst_fw, *t;
463 mutex_lock(&dsp->mutex);
464 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
466 list_del(&sst_fw->list);
467 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
468 sst_fw->dmable_fw_paddr);
471 mutex_unlock(&dsp->mutex);
473 EXPORT_SYMBOL_GPL(sst_fw_free_all);
475 /* create a new SST generic module from FW template */
476 struct sst_module *sst_module_new(struct sst_fw *sst_fw,
477 struct sst_module_template *template, void *private)
479 struct sst_dsp *dsp = sst_fw->dsp;
480 struct sst_module *sst_module;
482 sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
483 if (sst_module == NULL)
486 sst_module->id = template->id;
487 sst_module->dsp = dsp;
488 sst_module->sst_fw = sst_fw;
489 sst_module->scratch_size = template->scratch_size;
490 sst_module->persistent_size = template->persistent_size;
491 sst_module->entry = template->entry;
492 sst_module->state = SST_MODULE_STATE_UNLOADED;
494 INIT_LIST_HEAD(&sst_module->block_list);
495 INIT_LIST_HEAD(&sst_module->runtime_list);
497 mutex_lock(&dsp->mutex);
498 list_add(&sst_module->list, &dsp->module_list);
499 mutex_unlock(&dsp->mutex);
503 EXPORT_SYMBOL_GPL(sst_module_new);
505 /* free firmware module and remove from available list */
506 void sst_module_free(struct sst_module *sst_module)
508 struct sst_dsp *dsp = sst_module->dsp;
510 mutex_lock(&dsp->mutex);
511 list_del(&sst_module->list);
512 mutex_unlock(&dsp->mutex);
516 EXPORT_SYMBOL_GPL(sst_module_free);
518 struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
519 int id, void *private)
521 struct sst_dsp *dsp = module->dsp;
522 struct sst_module_runtime *runtime;
524 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
530 runtime->module = module;
531 INIT_LIST_HEAD(&runtime->block_list);
533 mutex_lock(&dsp->mutex);
534 list_add(&runtime->list, &module->runtime_list);
535 mutex_unlock(&dsp->mutex);
539 EXPORT_SYMBOL_GPL(sst_module_runtime_new);
541 void sst_module_runtime_free(struct sst_module_runtime *runtime)
543 struct sst_dsp *dsp = runtime->dsp;
545 mutex_lock(&dsp->mutex);
546 list_del(&runtime->list);
547 mutex_unlock(&dsp->mutex);
551 EXPORT_SYMBOL_GPL(sst_module_runtime_free);
553 static struct sst_mem_block *find_block(struct sst_dsp *dsp,
554 struct sst_block_allocator *ba)
556 struct sst_mem_block *block;
558 list_for_each_entry(block, &dsp->free_block_list, list) {
559 if (block->type == ba->type && block->offset == ba->offset)
566 /* Block allocator must be on block boundary */
567 static int block_alloc_contiguous(struct sst_dsp *dsp,
568 struct sst_block_allocator *ba, struct list_head *block_list)
570 struct list_head tmp = LIST_HEAD_INIT(tmp);
571 struct sst_mem_block *block;
572 u32 block_start = SST_HSW_BLOCK_ANY;
573 int size = ba->size, offset = ba->offset;
575 while (ba->size > 0) {
577 block = find_block(dsp, ba);
579 list_splice(&tmp, &dsp->free_block_list);
586 list_move_tail(&block->list, &tmp);
587 ba->offset += block->size;
588 ba->size -= block->size;
593 list_for_each_entry(block, &tmp, list) {
595 if (block->offset < block_start)
596 block_start = block->offset;
598 list_add(&block->module_list, block_list);
600 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
601 block->type, block->index, block->offset);
604 list_splice(&tmp, &dsp->used_block_list);
608 /* allocate first free DSP blocks for data - callers hold locks */
609 static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
610 struct list_head *block_list)
612 struct sst_mem_block *block, *tmp;
618 /* find first free whole blocks that can hold module */
619 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
621 /* ignore blocks with wrong type */
622 if (block->type != ba->type)
625 if (ba->size > block->size)
628 ba->offset = block->offset;
629 block->bytes_used = ba->size % block->size;
630 list_add(&block->module_list, block_list);
631 list_move(&block->list, &dsp->used_block_list);
632 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
633 block->type, block->index, block->offset);
637 /* then find free multiple blocks that can hold module */
638 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
640 /* ignore blocks with wrong type */
641 if (block->type != ba->type)
644 /* do we span > 1 blocks */
645 if (ba->size > block->size) {
647 /* align ba to block boundary */
648 ba->offset = block->offset;
650 ret = block_alloc_contiguous(dsp, ba, block_list);
657 /* not enough free block space */
661 int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
662 struct list_head *block_list)
666 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
667 ba->size, ba->offset, ba->type);
669 mutex_lock(&dsp->mutex);
671 ret = block_alloc(dsp, ba, block_list);
673 dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
677 /* prepare DSP blocks for module usage */
678 ret = block_list_prepare(dsp, block_list);
680 dev_err(dsp->dev, "error: prepare failed\n");
683 mutex_unlock(&dsp->mutex);
686 EXPORT_SYMBOL_GPL(sst_alloc_blocks);
688 int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
690 mutex_lock(&dsp->mutex);
691 block_list_remove(dsp, block_list);
692 mutex_unlock(&dsp->mutex);
695 EXPORT_SYMBOL_GPL(sst_free_blocks);
697 /* allocate memory blocks for static module addresses - callers hold locks */
698 static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
699 struct list_head *block_list)
701 struct sst_mem_block *block, *tmp;
702 struct sst_block_allocator ba_tmp = *ba;
703 u32 end = ba->offset + ba->size, block_end;
706 /* only IRAM/DRAM blocks are managed */
707 if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
710 /* are blocks already attached to this module */
711 list_for_each_entry_safe(block, tmp, block_list, module_list) {
713 /* ignore blocks with wrong type */
714 if (block->type != ba->type)
717 block_end = block->offset + block->size;
719 /* find block that holds section */
720 if (ba->offset >= block->offset && end <= block_end)
723 /* does block span more than 1 section */
724 if (ba->offset >= block->offset && ba->offset < block_end) {
726 /* align ba to block boundary */
727 ba_tmp.size -= block_end - ba->offset;
728 ba_tmp.offset = block_end;
729 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
733 /* module already owns blocks */
738 /* find first free blocks that can hold section in free list */
739 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
740 block_end = block->offset + block->size;
742 /* ignore blocks with wrong type */
743 if (block->type != ba->type)
746 /* find block that holds section */
747 if (ba->offset >= block->offset && end <= block_end) {
750 list_move(&block->list, &dsp->used_block_list);
751 list_add(&block->module_list, block_list);
752 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
753 block->type, block->index, block->offset);
757 /* does block span more than 1 section */
758 if (ba->offset >= block->offset && ba->offset < block_end) {
761 list_move(&block->list, &dsp->used_block_list);
762 list_add(&block->module_list, block_list);
763 /* align ba to block boundary */
764 ba_tmp.size -= block_end - ba->offset;
765 ba_tmp.offset = block_end;
767 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
778 /* Load fixed module data into DSP memory blocks */
779 int sst_module_alloc_blocks(struct sst_module *module)
781 struct sst_dsp *dsp = module->dsp;
782 struct sst_fw *sst_fw = module->sst_fw;
783 struct sst_block_allocator ba;
786 memset(&ba, 0, sizeof(ba));
787 ba.size = module->size;
788 ba.type = module->type;
789 ba.offset = module->offset;
791 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
792 ba.size, ba.offset, ba.type);
794 mutex_lock(&dsp->mutex);
796 /* alloc blocks that includes this section */
797 ret = block_alloc_fixed(dsp, &ba, &module->block_list);
800 "error: no free blocks for section at offset 0x%x size 0x%x\n",
801 module->offset, module->size);
802 mutex_unlock(&dsp->mutex);
806 /* prepare DSP blocks for module copy */
807 ret = block_list_prepare(dsp, &module->block_list);
809 dev_err(dsp->dev, "error: fw module prepare failed\n");
813 /* copy partial module data to blocks */
814 if (dsp->fw_use_dma) {
815 ret = sst_dsp_dma_copyto(dsp,
816 dsp->addr.lpe_base + module->offset,
817 sst_fw->dmable_fw_paddr + module->data_offset,
820 dev_err(dsp->dev, "error: module copy failed\n");
824 sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
827 mutex_unlock(&dsp->mutex);
831 block_list_remove(dsp, &module->block_list);
832 mutex_unlock(&dsp->mutex);
835 EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
837 /* Unload entire module from DSP memory */
838 int sst_module_free_blocks(struct sst_module *module)
840 struct sst_dsp *dsp = module->dsp;
842 mutex_lock(&dsp->mutex);
843 block_list_remove(dsp, &module->block_list);
844 mutex_unlock(&dsp->mutex);
847 EXPORT_SYMBOL_GPL(sst_module_free_blocks);
849 int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
852 struct sst_dsp *dsp = runtime->dsp;
853 struct sst_module *module = runtime->module;
854 struct sst_block_allocator ba;
857 if (module->persistent_size == 0)
860 memset(&ba, 0, sizeof(ba));
861 ba.size = module->persistent_size;
862 ba.type = SST_MEM_DRAM;
864 mutex_lock(&dsp->mutex);
866 /* do we need to allocate at a fixed address ? */
871 dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
872 ba.size, ba.type, ba.offset);
874 /* alloc blocks that includes this section */
875 ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
878 dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
881 /* alloc blocks that includes this section */
882 ret = block_alloc(dsp, &ba, &runtime->block_list);
886 "error: no free blocks for runtime module size 0x%x\n",
887 module->persistent_size);
888 mutex_unlock(&dsp->mutex);
891 runtime->persistent_offset = ba.offset;
893 /* prepare DSP blocks for module copy */
894 ret = block_list_prepare(dsp, &runtime->block_list);
896 dev_err(dsp->dev, "error: runtime block prepare failed\n");
900 mutex_unlock(&dsp->mutex);
904 block_list_remove(dsp, &module->block_list);
905 mutex_unlock(&dsp->mutex);
908 EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
910 int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
912 struct sst_dsp *dsp = runtime->dsp;
914 mutex_lock(&dsp->mutex);
915 block_list_remove(dsp, &runtime->block_list);
916 mutex_unlock(&dsp->mutex);
919 EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
921 int sst_module_runtime_save(struct sst_module_runtime *runtime,
922 struct sst_module_runtime_context *context)
924 struct sst_dsp *dsp = runtime->dsp;
925 struct sst_module *module = runtime->module;
928 dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
929 runtime->id, runtime->persistent_offset,
930 module->persistent_size);
932 context->buffer = dma_alloc_coherent(dsp->dma_dev,
933 module->persistent_size,
934 &context->dma_buffer, GFP_DMA | GFP_KERNEL);
935 if (!context->buffer) {
936 dev_err(dsp->dev, "error: DMA context alloc failed\n");
940 mutex_lock(&dsp->mutex);
942 if (dsp->fw_use_dma) {
944 ret = sst_dsp_dma_get_channel(dsp, 0);
948 ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
949 dsp->addr.lpe_base + runtime->persistent_offset,
950 module->persistent_size);
951 sst_dsp_dma_put_channel(dsp);
953 dev_err(dsp->dev, "error: context copy failed\n");
957 sst_memcpy32(context->buffer, dsp->addr.lpe +
958 runtime->persistent_offset,
959 module->persistent_size);
962 mutex_unlock(&dsp->mutex);
965 EXPORT_SYMBOL_GPL(sst_module_runtime_save);
967 int sst_module_runtime_restore(struct sst_module_runtime *runtime,
968 struct sst_module_runtime_context *context)
970 struct sst_dsp *dsp = runtime->dsp;
971 struct sst_module *module = runtime->module;
974 dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
975 runtime->id, runtime->persistent_offset,
976 module->persistent_size);
978 mutex_lock(&dsp->mutex);
980 if (!context->buffer) {
981 dev_info(dsp->dev, "no context buffer need to restore!\n");
985 if (dsp->fw_use_dma) {
987 ret = sst_dsp_dma_get_channel(dsp, 0);
991 ret = sst_dsp_dma_copyto(dsp,
992 dsp->addr.lpe_base + runtime->persistent_offset,
993 context->dma_buffer, module->persistent_size);
994 sst_dsp_dma_put_channel(dsp);
996 dev_err(dsp->dev, "error: module copy failed\n");
1000 sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
1001 context->buffer, module->persistent_size);
1003 dma_free_coherent(dsp->dma_dev, module->persistent_size,
1004 context->buffer, context->dma_buffer);
1005 context->buffer = NULL;
1008 mutex_unlock(&dsp->mutex);
1011 EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
1013 /* register a DSP memory block for use with FW based modules */
1014 struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
1015 u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
1018 struct sst_mem_block *block;
1020 block = kzalloc(sizeof(*block), GFP_KERNEL);
1024 block->offset = offset;
1026 block->index = index;
1029 block->private = private;
1032 mutex_lock(&dsp->mutex);
1033 list_add(&block->list, &dsp->free_block_list);
1034 mutex_unlock(&dsp->mutex);
1038 EXPORT_SYMBOL_GPL(sst_mem_block_register);
1040 /* unregister all DSP memory blocks */
1041 void sst_mem_block_unregister_all(struct sst_dsp *dsp)
1043 struct sst_mem_block *block, *tmp;
1045 mutex_lock(&dsp->mutex);
1047 /* unregister used blocks */
1048 list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
1049 list_del(&block->list);
1053 /* unregister free blocks */
1054 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
1055 list_del(&block->list);
1059 mutex_unlock(&dsp->mutex);
1061 EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
1063 /* allocate scratch buffer blocks */
1064 int sst_block_alloc_scratch(struct sst_dsp *dsp)
1066 struct sst_module *module;
1067 struct sst_block_allocator ba;
1070 mutex_lock(&dsp->mutex);
1072 /* calculate required scratch size */
1073 dsp->scratch_size = 0;
1074 list_for_each_entry(module, &dsp->module_list, list) {
1075 dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
1076 module->id, module->scratch_size);
1077 if (dsp->scratch_size < module->scratch_size)
1078 dsp->scratch_size = module->scratch_size;
1081 dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
1084 if (dsp->scratch_size == 0) {
1085 dev_info(dsp->dev, "no modules need scratch buffer\n");
1086 mutex_unlock(&dsp->mutex);
1090 /* allocate blocks for module scratch buffers */
1091 dev_dbg(dsp->dev, "allocating scratch blocks\n");
1093 ba.size = dsp->scratch_size;
1094 ba.type = SST_MEM_DRAM;
1096 /* do we need to allocate at fixed offset */
1097 if (dsp->scratch_offset != 0) {
1099 dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
1100 ba.size, ba.type, ba.offset);
1102 ba.offset = dsp->scratch_offset;
1104 /* alloc blocks that includes this section */
1105 ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
1108 dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
1112 ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
1115 dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
1116 mutex_unlock(&dsp->mutex);
1120 ret = block_list_prepare(dsp, &dsp->scratch_block_list);
1122 dev_err(dsp->dev, "error: scratch block prepare failed\n");
1123 mutex_unlock(&dsp->mutex);
1127 /* assign the same offset of scratch to each module */
1128 dsp->scratch_offset = ba.offset;
1129 mutex_unlock(&dsp->mutex);
1130 return dsp->scratch_size;
1132 EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
1134 /* free all scratch blocks */
1135 void sst_block_free_scratch(struct sst_dsp *dsp)
1137 mutex_lock(&dsp->mutex);
1138 block_list_remove(dsp, &dsp->scratch_block_list);
1139 mutex_unlock(&dsp->mutex);
1141 EXPORT_SYMBOL_GPL(sst_block_free_scratch);
1143 /* get a module from it's unique ID */
1144 struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
1146 struct sst_module *module;
1148 mutex_lock(&dsp->mutex);
1150 list_for_each_entry(module, &dsp->module_list, list) {
1151 if (module->id == id) {
1152 mutex_unlock(&dsp->mutex);
1157 mutex_unlock(&dsp->mutex);
1160 EXPORT_SYMBOL_GPL(sst_module_get_from_id);
1162 struct sst_module_runtime *sst_module_runtime_get_from_id(
1163 struct sst_module *module, u32 id)
1165 struct sst_module_runtime *runtime;
1166 struct sst_dsp *dsp = module->dsp;
1168 mutex_lock(&dsp->mutex);
1170 list_for_each_entry(runtime, &module->runtime_list, list) {
1171 if (runtime->id == id) {
1172 mutex_unlock(&dsp->mutex);
1177 mutex_unlock(&dsp->mutex);
1180 EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
1182 /* returns block address in DSP address space */
1183 u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
1184 enum sst_mem_type type)
1188 return offset - dsp->addr.iram_offset +
1189 dsp->addr.dsp_iram_offset;
1191 return offset - dsp->addr.dram_offset +
1192 dsp->addr.dsp_dram_offset;
1197 EXPORT_SYMBOL_GPL(sst_dsp_get_offset);