2 * Copyright (C) 2016 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/iommu.h>
18 #include <linux/platform_device.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/bitops.h>
24 #include "etnaviv_cmdbuf.h"
25 #include "etnaviv_gpu.h"
26 #include "etnaviv_mmu.h"
27 #include "etnaviv_iommu.h"
28 #include "state.xml.h"
29 #include "state_hi.xml.h"
31 #define MMUv2_PTE_PRESENT BIT(0)
32 #define MMUv2_PTE_EXCEPTION BIT(1)
33 #define MMUv2_PTE_WRITEABLE BIT(2)
35 #define MMUv2_MTLB_MASK 0xffc00000
36 #define MMUv2_MTLB_SHIFT 22
37 #define MMUv2_STLB_MASK 0x003ff000
38 #define MMUv2_STLB_SHIFT 12
40 #define MMUv2_MAX_STLB_ENTRIES 1024
42 struct etnaviv_iommuv2_domain {
43 struct iommu_domain domain;
46 dma_addr_t bad_page_dma;
47 /* M(aster) TLB aka first level pagetable */
50 /* S(lave) TLB aka second level pagetable */
52 dma_addr_t stlb_dma[1024];
55 static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
57 return container_of(domain, struct etnaviv_iommuv2_domain, domain);
60 static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
61 phys_addr_t paddr, size_t size, int prot)
63 struct etnaviv_iommuv2_domain *etnaviv_domain =
64 to_etnaviv_domain(domain);
65 int mtlb_entry, stlb_entry;
66 u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
71 if (prot & IOMMU_WRITE)
72 entry |= MMUv2_PTE_WRITEABLE;
74 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
75 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
77 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
82 static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
83 unsigned long iova, size_t size)
85 struct etnaviv_iommuv2_domain *etnaviv_domain =
86 to_etnaviv_domain(domain);
87 int mtlb_entry, stlb_entry;
92 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
93 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
95 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
100 static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
103 struct etnaviv_iommuv2_domain *etnaviv_domain =
104 to_etnaviv_domain(domain);
105 int mtlb_entry, stlb_entry;
107 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
108 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
110 return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
113 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
118 /* allocate scratch page */
119 etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
121 &etnaviv_domain->bad_page_dma,
123 if (!etnaviv_domain->bad_page_cpu) {
127 p = etnaviv_domain->bad_page_cpu;
128 for (i = 0; i < SZ_4K / 4; i++)
131 etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
133 &etnaviv_domain->mtlb_dma,
135 if (!etnaviv_domain->mtlb_cpu) {
140 /* pre-populate STLB pages (may want to switch to on-demand later) */
141 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
142 etnaviv_domain->stlb_cpu[i] =
143 dma_alloc_coherent(etnaviv_domain->dev,
145 &etnaviv_domain->stlb_dma[i],
147 if (!etnaviv_domain->stlb_cpu[i]) {
151 p = etnaviv_domain->stlb_cpu[i];
152 for (j = 0; j < SZ_4K / 4; j++)
153 *p++ = MMUv2_PTE_EXCEPTION;
155 etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
162 if (etnaviv_domain->bad_page_cpu)
163 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
164 etnaviv_domain->bad_page_cpu,
165 etnaviv_domain->bad_page_dma);
167 if (etnaviv_domain->mtlb_cpu)
168 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
169 etnaviv_domain->mtlb_cpu,
170 etnaviv_domain->mtlb_dma);
172 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
173 if (etnaviv_domain->stlb_cpu[i])
174 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
175 etnaviv_domain->stlb_cpu[i],
176 etnaviv_domain->stlb_dma[i]);
182 static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
184 struct etnaviv_iommuv2_domain *etnaviv_domain =
185 to_etnaviv_domain(domain);
188 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
189 etnaviv_domain->bad_page_cpu,
190 etnaviv_domain->bad_page_dma);
192 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
193 etnaviv_domain->mtlb_cpu,
194 etnaviv_domain->mtlb_dma);
196 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
197 if (etnaviv_domain->stlb_cpu[i])
198 dma_free_coherent(etnaviv_domain->dev, SZ_4K,
199 etnaviv_domain->stlb_cpu[i],
200 etnaviv_domain->stlb_dma[i]);
203 vfree(etnaviv_domain);
206 static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
208 struct etnaviv_iommuv2_domain *etnaviv_domain =
209 to_etnaviv_domain(domain);
210 size_t dump_size = SZ_4K;
213 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
214 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
220 static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
222 struct etnaviv_iommuv2_domain *etnaviv_domain =
223 to_etnaviv_domain(domain);
226 memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
228 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
229 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
230 memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
233 static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
235 .domain_free = etnaviv_iommuv2_domain_free,
236 .map = etnaviv_iommuv2_map,
237 .unmap = etnaviv_iommuv2_unmap,
238 .iova_to_phys = etnaviv_iommuv2_iova_to_phys,
239 .pgsize_bitmap = SZ_4K,
241 .dump_size = etnaviv_iommuv2_dump_size,
242 .dump = etnaviv_iommuv2_dump,
245 void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
247 struct etnaviv_iommuv2_domain *etnaviv_domain =
248 to_etnaviv_domain(gpu->mmu->domain);
251 /* If the MMU is already enabled the state is still there. */
252 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
255 prefetch = etnaviv_buffer_config_mmuv2(gpu,
256 (u32)etnaviv_domain->mtlb_dma,
257 (u32)etnaviv_domain->bad_page_dma);
258 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
260 etnaviv_gpu_wait_idle(gpu, 100);
262 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
264 struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
266 struct etnaviv_iommuv2_domain *etnaviv_domain;
269 etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
273 etnaviv_domain->dev = gpu->dev;
275 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
276 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
277 etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
278 etnaviv_domain->domain.geometry.aperture_start = 0;
279 etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
281 ret = etnaviv_iommuv2_init(etnaviv_domain);
285 return &etnaviv_domain->domain;
288 vfree(etnaviv_domain);