1 // SPDX-License-Identifier: GPL-2.0+
3 * NVIDIA Tegra Video decoder driver
5 * Copyright (C) 2016-2019 GRATE-DRIVER project
8 #include <linux/dma-buf.h>
9 #include <linux/iova.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/workqueue.h>
18 struct tegra_vde_cache_entry {
19 enum dma_data_direction dma_dir;
20 struct dma_buf_attachment *a;
21 struct delayed_work dwork;
22 struct tegra_vde *vde;
23 struct list_head list;
29 static void tegra_vde_release_entry(struct tegra_vde_cache_entry *entry)
31 struct dma_buf *dmabuf = entry->a->dmabuf;
33 WARN_ON_ONCE(entry->refcnt);
35 if (entry->vde->domain)
36 tegra_vde_iommu_unmap(entry->vde, entry->iova);
38 dma_buf_unmap_attachment(entry->a, entry->sgt, entry->dma_dir);
39 dma_buf_detach(dmabuf, entry->a);
42 list_del(&entry->list);
46 static void tegra_vde_delayed_unmap(struct work_struct *work)
48 struct tegra_vde_cache_entry *entry;
49 struct tegra_vde *vde;
51 entry = container_of(work, struct tegra_vde_cache_entry,
55 mutex_lock(&vde->map_lock);
56 tegra_vde_release_entry(entry);
57 mutex_unlock(&vde->map_lock);
60 int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
61 struct dma_buf *dmabuf,
62 enum dma_data_direction dma_dir,
63 struct dma_buf_attachment **ap,
66 struct device *dev = vde->miscdev.parent;
67 struct dma_buf_attachment *attachment;
68 struct tegra_vde_cache_entry *entry;
73 mutex_lock(&vde->map_lock);
75 list_for_each_entry(entry, &vde->map_list, list) {
76 if (entry->a->dmabuf != dmabuf)
79 if (!cancel_delayed_work(&entry->dwork))
82 if (entry->dma_dir != dma_dir)
83 entry->dma_dir = DMA_BIDIRECTIONAL;
88 *addrp = iova_dma_addr(&vde->iova, entry->iova);
90 *addrp = sg_dma_address(entry->sgt->sgl);
95 attachment = dma_buf_attach(dmabuf, dev);
96 if (IS_ERR(attachment)) {
97 dev_err(dev, "Failed to attach dmabuf\n");
98 err = PTR_ERR(attachment);
102 sgt = dma_buf_map_attachment(attachment, dma_dir);
104 dev_err(dev, "Failed to get dmabufs sg_table\n");
109 if (!vde->domain && sgt->nents > 1) {
110 dev_err(dev, "Sparse DMA region is unsupported, please enable IOMMU\n");
115 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
122 err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size);
126 *addrp = iova_dma_addr(&vde->iova, iova);
128 *addrp = sg_dma_address(sgt->sgl);
132 INIT_DELAYED_WORK(&entry->dwork, tegra_vde_delayed_unmap);
133 list_add(&entry->list, &vde->map_list);
135 entry->dma_dir = dma_dir;
139 entry->a = attachment;
145 mutex_unlock(&vde->map_lock);
152 dma_buf_unmap_attachment(attachment, sgt, dma_dir);
154 dma_buf_detach(dmabuf, attachment);
156 mutex_unlock(&vde->map_lock);
161 void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
162 struct dma_buf_attachment *a,
165 struct tegra_vde_cache_entry *entry;
167 mutex_lock(&vde->map_lock);
169 list_for_each_entry(entry, &vde->map_list, list) {
173 WARN_ON_ONCE(!entry->refcnt);
175 if (--entry->refcnt == 0) {
177 tegra_vde_release_entry(entry);
179 schedule_delayed_work(&entry->dwork, 5 * HZ);
184 mutex_unlock(&vde->map_lock);
187 void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde)
189 struct tegra_vde_cache_entry *entry, *tmp;
191 mutex_lock(&vde->map_lock);
193 list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
197 if (!cancel_delayed_work(&entry->dwork))
200 tegra_vde_release_entry(entry);
203 mutex_unlock(&vde->map_lock);
206 void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde)
208 struct tegra_vde_cache_entry *entry, *tmp;
210 mutex_lock(&vde->map_lock);
212 while (!list_empty(&vde->map_list)) {
213 list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
214 if (!cancel_delayed_work(&entry->dwork))
217 tegra_vde_release_entry(entry);
220 mutex_unlock(&vde->map_lock);
222 mutex_lock(&vde->map_lock);
225 mutex_unlock(&vde->map_lock);