1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic ARM page table allocator.
5 * Copyright (C) 2014 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/barrier.h>
23 #include "io-pgtable-arm.h"
25 #define ARM_LPAE_MAX_ADDR_BITS 52
26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
27 #define ARM_LPAE_MAX_LEVELS 4
29 /* Struct accessors */
30 #define io_pgtable_to_data(x) \
31 container_of((x), struct arm_lpae_io_pgtable, iop)
33 #define io_pgtable_ops_to_data(x) \
34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
37 * Calculate the right shift amount to get to the portion describing level l
38 * in a virtual address mapped by the pagetable in d.
40 #define ARM_LPAE_LVL_SHIFT(l,d) \
41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
42 ilog2(sizeof(arm_lpae_iopte)))
44 #define ARM_LPAE_GRANULE(d) \
45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46 #define ARM_LPAE_PGD_SIZE(d) \
47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
50 * Calculate the index at level l used to map virtual address a using the
53 #define ARM_LPAE_PGD_IDX(l,d) \
54 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
56 #define ARM_LPAE_LVL_IDX(a,l,d) \
57 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
58 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
60 /* Calculate the block/page mapping size at level l for pagetable in d. */
61 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
64 #define ARM_LPAE_PTE_TYPE_SHIFT 0
65 #define ARM_LPAE_PTE_TYPE_MASK 0x3
67 #define ARM_LPAE_PTE_TYPE_BLOCK 1
68 #define ARM_LPAE_PTE_TYPE_TABLE 3
69 #define ARM_LPAE_PTE_TYPE_PAGE 3
71 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
73 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
74 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
75 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
76 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
77 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
78 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
79 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
80 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
82 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
83 /* Ignore the contiguous bit for block splitting */
84 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
85 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
86 ARM_LPAE_PTE_ATTR_HI_MASK)
87 /* Software bit for solving coherency races */
88 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
91 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
92 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
93 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
94 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
97 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
98 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
99 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
100 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
101 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
102 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
105 #define ARM_LPAE_VTCR_SL0_MASK 0x3
107 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
109 #define ARM_LPAE_VTCR_PS_SHIFT 16
110 #define ARM_LPAE_VTCR_PS_MASK 0x7
112 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
113 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
114 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
115 #define ARM_LPAE_MAIR_ATTR_NC 0x44
116 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
117 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
118 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
119 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
120 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
121 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
123 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
124 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
125 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
127 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
128 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
130 /* IOPTE accessors */
131 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
133 #define iopte_type(pte,l) \
134 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
136 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
138 struct arm_lpae_io_pgtable {
139 struct io_pgtable iop;
148 typedef u64 arm_lpae_iopte;
150 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
151 enum io_pgtable_fmt fmt)
153 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
154 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
156 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
159 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
160 struct arm_lpae_io_pgtable *data)
162 arm_lpae_iopte pte = paddr;
164 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
165 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
168 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
169 struct arm_lpae_io_pgtable *data)
171 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
173 if (ARM_LPAE_GRANULE(data) < SZ_64K)
176 /* Rotate the packed high-order bits back to the top */
177 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
180 static bool selftest_running = false;
182 static dma_addr_t __arm_lpae_dma_addr(void *pages)
184 return (dma_addr_t)virt_to_phys(pages);
187 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
188 struct io_pgtable_cfg *cfg)
190 struct device *dev = cfg->iommu_dev;
191 int order = get_order(size);
196 VM_BUG_ON((gfp & __GFP_HIGHMEM));
197 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
198 gfp | __GFP_ZERO, order);
202 pages = page_address(p);
203 if (!cfg->coherent_walk) {
204 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
205 if (dma_mapping_error(dev, dma))
208 * We depend on the IOMMU being able to work with any physical
209 * address directly, so if the DMA layer suggests otherwise by
210 * translating or truncating them, that bodes very badly...
212 if (dma != virt_to_phys(pages))
219 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
220 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
222 __free_pages(p, order);
226 static void __arm_lpae_free_pages(void *pages, size_t size,
227 struct io_pgtable_cfg *cfg)
229 if (!cfg->coherent_walk)
230 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
231 size, DMA_TO_DEVICE);
232 free_pages((unsigned long)pages, get_order(size));
235 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
236 struct io_pgtable_cfg *cfg)
238 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
239 sizeof(*ptep), DMA_TO_DEVICE);
242 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
243 struct io_pgtable_cfg *cfg)
247 if (!cfg->coherent_walk)
248 __arm_lpae_sync_pte(ptep, cfg);
251 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
252 struct iommu_iotlb_gather *gather,
253 unsigned long iova, size_t size, int lvl,
254 arm_lpae_iopte *ptep);
256 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
257 phys_addr_t paddr, arm_lpae_iopte prot,
258 int lvl, arm_lpae_iopte *ptep)
260 arm_lpae_iopte pte = prot;
262 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
263 pte |= ARM_LPAE_PTE_TYPE_PAGE;
265 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
267 pte |= paddr_to_iopte(paddr, data);
269 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
272 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
273 unsigned long iova, phys_addr_t paddr,
274 arm_lpae_iopte prot, int lvl,
275 arm_lpae_iopte *ptep)
277 arm_lpae_iopte pte = *ptep;
279 if (iopte_leaf(pte, lvl, data->iop.fmt)) {
280 /* We require an unmap first */
281 WARN_ON(!selftest_running);
283 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
285 * We need to unmap and free the old table before
286 * overwriting it with a block entry.
288 arm_lpae_iopte *tblp;
289 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
291 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
292 if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
298 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
302 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
303 arm_lpae_iopte *ptep,
305 struct arm_lpae_io_pgtable *data)
307 arm_lpae_iopte old, new;
308 struct io_pgtable_cfg *cfg = &data->iop.cfg;
310 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
311 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
312 new |= ARM_LPAE_PTE_NSTABLE;
315 * Ensure the table itself is visible before its PTE can be.
316 * Whilst we could get away with cmpxchg64_release below, this
317 * doesn't have any ordering semantics when !CONFIG_SMP.
321 old = cmpxchg64_relaxed(ptep, curr, new);
323 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
326 /* Even if it's not ours, there's no point waiting; just kick it */
327 __arm_lpae_sync_pte(ptep, cfg);
329 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
334 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
335 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
336 int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
338 arm_lpae_iopte *cptep, pte;
339 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
340 size_t tblsz = ARM_LPAE_GRANULE(data);
341 struct io_pgtable_cfg *cfg = &data->iop.cfg;
343 /* Find our entry at the current level */
344 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
346 /* If we can install a leaf entry at this level, then do so */
347 if (size == block_size)
348 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
350 /* We can't allocate tables at the final level */
351 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
354 /* Grab a pointer to the next level */
355 pte = READ_ONCE(*ptep);
357 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
361 pte = arm_lpae_install_table(cptep, ptep, 0, data);
363 __arm_lpae_free_pages(cptep, tblsz, cfg);
364 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
365 __arm_lpae_sync_pte(ptep, cfg);
368 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
369 cptep = iopte_deref(pte, data);
371 /* We require an unmap first */
372 WARN_ON(!selftest_running);
377 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
380 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
385 if (data->iop.fmt == ARM_64_LPAE_S1 ||
386 data->iop.fmt == ARM_32_LPAE_S1) {
387 pte = ARM_LPAE_PTE_nG;
388 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
389 pte |= ARM_LPAE_PTE_AP_RDONLY;
390 if (!(prot & IOMMU_PRIV))
391 pte |= ARM_LPAE_PTE_AP_UNPRIV;
393 pte = ARM_LPAE_PTE_HAP_FAULT;
394 if (prot & IOMMU_READ)
395 pte |= ARM_LPAE_PTE_HAP_READ;
396 if (prot & IOMMU_WRITE)
397 pte |= ARM_LPAE_PTE_HAP_WRITE;
401 * Note that this logic is structured to accommodate Mali LPAE
402 * having stage-1-like attributes but stage-2-like permissions.
404 if (data->iop.fmt == ARM_64_LPAE_S2 ||
405 data->iop.fmt == ARM_32_LPAE_S2) {
406 if (prot & IOMMU_MMIO)
407 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
408 else if (prot & IOMMU_CACHE)
409 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
411 pte |= ARM_LPAE_PTE_MEMATTR_NC;
413 if (prot & IOMMU_MMIO)
414 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
415 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
416 else if (prot & IOMMU_CACHE)
417 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
418 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
422 * Also Mali has its own notions of shareability wherein its Inner
423 * domain covers the cores within the GPU, and its Outer domain is
424 * "outside the GPU" (i.e. either the Inner or System domain in CPU
425 * terms, depending on coherency).
427 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
428 pte |= ARM_LPAE_PTE_SH_IS;
430 pte |= ARM_LPAE_PTE_SH_OS;
432 if (prot & IOMMU_NOEXEC)
433 pte |= ARM_LPAE_PTE_XN;
435 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
436 pte |= ARM_LPAE_PTE_NS;
438 if (data->iop.fmt != ARM_MALI_LPAE)
439 pte |= ARM_LPAE_PTE_AF;
444 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
445 phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
447 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
448 struct io_pgtable_cfg *cfg = &data->iop.cfg;
449 arm_lpae_iopte *ptep = data->pgd;
450 int ret, lvl = data->start_level;
452 long iaext = (s64)iova >> cfg->ias;
454 /* If no access, then nothing to do */
455 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
458 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
461 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
463 if (WARN_ON(iaext || paddr >> cfg->oas))
466 prot = arm_lpae_prot_to_pte(data, iommu_prot);
467 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
469 * Synchronise all PTE updates for the new mapping before there's
470 * a chance for anything to kick off a table walk for the new iova.
477 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
478 arm_lpae_iopte *ptep)
480 arm_lpae_iopte *start, *end;
481 unsigned long table_size;
483 if (lvl == data->start_level)
484 table_size = ARM_LPAE_PGD_SIZE(data);
486 table_size = ARM_LPAE_GRANULE(data);
490 /* Only leaf entries at the last level */
491 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
494 end = (void *)ptep + table_size;
496 while (ptep != end) {
497 arm_lpae_iopte pte = *ptep++;
499 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
502 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
505 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
508 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
510 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
512 __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
516 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
517 struct iommu_iotlb_gather *gather,
518 unsigned long iova, size_t size,
519 arm_lpae_iopte blk_pte, int lvl,
520 arm_lpae_iopte *ptep)
522 struct io_pgtable_cfg *cfg = &data->iop.cfg;
523 arm_lpae_iopte pte, *tablep;
524 phys_addr_t blk_paddr;
525 size_t tablesz = ARM_LPAE_GRANULE(data);
526 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
527 int i, unmap_idx = -1;
529 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
532 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
534 return 0; /* Bytes unmapped */
536 if (size == split_sz)
537 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
539 blk_paddr = iopte_to_paddr(blk_pte, data);
540 pte = iopte_prot(blk_pte);
542 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
547 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
550 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
551 if (pte != blk_pte) {
552 __arm_lpae_free_pages(tablep, tablesz, cfg);
554 * We may race against someone unmapping another part of this
555 * block, but anything else is invalid. We can't misinterpret
556 * a page entry here since we're never at the last level.
558 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
561 tablep = iopte_deref(pte, data);
562 } else if (unmap_idx >= 0) {
563 io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
567 return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
570 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
571 struct iommu_iotlb_gather *gather,
572 unsigned long iova, size_t size, int lvl,
573 arm_lpae_iopte *ptep)
576 struct io_pgtable *iop = &data->iop;
578 /* Something went horribly wrong and we ran out of page table */
579 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
582 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
583 pte = READ_ONCE(*ptep);
587 /* If the size matches this level, we're in the right place */
588 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
589 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
591 if (!iopte_leaf(pte, lvl, iop->fmt)) {
592 /* Also flush any partial walks */
593 io_pgtable_tlb_flush_walk(iop, iova, size,
594 ARM_LPAE_GRANULE(data));
595 ptep = iopte_deref(pte, data);
596 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
597 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
599 * Order the PTE update against queueing the IOVA, to
600 * guarantee that a flush callback from a different CPU
601 * has observed it before the TLBIALL can be issued.
605 io_pgtable_tlb_add_page(iop, gather, iova, size);
609 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
611 * Insert a table at the next level to map the old region,
612 * minus the part we want to unmap
614 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
618 /* Keep on walkin' */
619 ptep = iopte_deref(pte, data);
620 return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
623 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
624 size_t size, struct iommu_iotlb_gather *gather)
626 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
627 struct io_pgtable_cfg *cfg = &data->iop.cfg;
628 arm_lpae_iopte *ptep = data->pgd;
629 long iaext = (s64)iova >> cfg->ias;
631 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
634 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
639 return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
642 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
645 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
646 arm_lpae_iopte pte, *ptep = data->pgd;
647 int lvl = data->start_level;
650 /* Valid IOPTE pointer? */
654 /* Grab the IOPTE we're interested in */
655 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
656 pte = READ_ONCE(*ptep);
663 if (iopte_leaf(pte, lvl, data->iop.fmt))
664 goto found_translation;
666 /* Take it to the next level */
667 ptep = iopte_deref(pte, data);
668 } while (++lvl < ARM_LPAE_MAX_LEVELS);
670 /* Ran out of page tables to walk */
674 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
675 return iopte_to_paddr(pte, data) | iova;
678 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
680 unsigned long granule, page_sizes;
681 unsigned int max_addr_bits = 48;
684 * We need to restrict the supported page sizes to match the
685 * translation regime for a particular granule. Aim to match
686 * the CPU page size if possible, otherwise prefer smaller sizes.
687 * While we're at it, restrict the block sizes to match the
690 if (cfg->pgsize_bitmap & PAGE_SIZE)
692 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
693 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
694 else if (cfg->pgsize_bitmap & PAGE_MASK)
695 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
701 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
704 page_sizes = (SZ_16K | SZ_32M);
708 page_sizes = (SZ_64K | SZ_512M);
710 page_sizes |= 1ULL << 42; /* 4TB */
716 cfg->pgsize_bitmap &= page_sizes;
717 cfg->ias = min(cfg->ias, max_addr_bits);
718 cfg->oas = min(cfg->oas, max_addr_bits);
721 static struct arm_lpae_io_pgtable *
722 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
724 struct arm_lpae_io_pgtable *data;
725 int levels, va_bits, pg_shift;
727 arm_lpae_restrict_pgsizes(cfg);
729 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
732 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
735 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
738 data = kmalloc(sizeof(*data), GFP_KERNEL);
742 pg_shift = __ffs(cfg->pgsize_bitmap);
743 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
745 va_bits = cfg->ias - pg_shift;
746 levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
747 data->start_level = ARM_LPAE_MAX_LEVELS - levels;
749 /* Calculate the actual size of our pgd (without concatenation) */
750 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
752 data->iop.ops = (struct io_pgtable_ops) {
754 .unmap = arm_lpae_unmap,
755 .iova_to_phys = arm_lpae_iova_to_phys,
761 static struct io_pgtable *
762 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
765 struct arm_lpae_io_pgtable *data;
766 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
769 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
770 IO_PGTABLE_QUIRK_NON_STRICT |
771 IO_PGTABLE_QUIRK_ARM_TTBR1))
774 data = arm_lpae_alloc_pgtable(cfg);
779 if (cfg->coherent_walk) {
780 tcr->sh = ARM_LPAE_TCR_SH_IS;
781 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
782 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
784 tcr->sh = ARM_LPAE_TCR_SH_OS;
785 tcr->irgn = ARM_LPAE_TCR_RGN_NC;
786 tcr->orgn = ARM_LPAE_TCR_RGN_NC;
789 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
790 switch (ARM_LPAE_GRANULE(data)) {
792 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
795 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
798 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
804 tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
807 tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
810 tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
813 tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
816 tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
819 tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
822 tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
828 tcr->tsz = 64ULL - cfg->ias;
831 reg = (ARM_LPAE_MAIR_ATTR_NC
832 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
833 (ARM_LPAE_MAIR_ATTR_WBRWA
834 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
835 (ARM_LPAE_MAIR_ATTR_DEVICE
836 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
837 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
838 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
840 cfg->arm_lpae_s1_cfg.mair = reg;
842 /* Looking good; allocate a pgd */
843 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
848 /* Ensure the empty pgd is visible before any actual TTBR write */
852 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
860 static struct io_pgtable *
861 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
864 struct arm_lpae_io_pgtable *data;
865 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
867 /* The NS quirk doesn't apply at stage 2 */
868 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
871 data = arm_lpae_alloc_pgtable(cfg);
876 * Concatenate PGDs at level 1 if possible in order to reduce
877 * the depth of the stage-2 walk.
879 if (data->start_level == 0) {
880 unsigned long pgd_pages;
882 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
883 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
884 data->pgd_bits += data->bits_per_level;
890 if (cfg->coherent_walk) {
891 vtcr->sh = ARM_LPAE_TCR_SH_IS;
892 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
893 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
895 vtcr->sh = ARM_LPAE_TCR_SH_OS;
896 vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
897 vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
900 sl = data->start_level;
902 switch (ARM_LPAE_GRANULE(data)) {
904 vtcr->tg = ARM_LPAE_TCR_TG0_4K;
905 sl++; /* SL0 format is different for 4K granule size */
908 vtcr->tg = ARM_LPAE_TCR_TG0_16K;
911 vtcr->tg = ARM_LPAE_TCR_TG0_64K;
917 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
920 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
923 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
926 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
929 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
932 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
935 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
941 vtcr->tsz = 64ULL - cfg->ias;
942 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
944 /* Allocate pgd pages */
945 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
950 /* Ensure the empty pgd is visible before any actual TTBR write */
954 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
962 static struct io_pgtable *
963 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
965 if (cfg->ias > 32 || cfg->oas > 40)
968 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
969 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
972 static struct io_pgtable *
973 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
975 if (cfg->ias > 40 || cfg->oas > 40)
978 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
979 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
982 static struct io_pgtable *
983 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
985 struct arm_lpae_io_pgtable *data;
987 /* No quirks for Mali (hopefully) */
991 if (cfg->ias > 48 || cfg->oas > 40)
994 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
996 data = arm_lpae_alloc_pgtable(cfg);
1000 /* Mali seems to need a full 4-level table regardless of IAS */
1001 if (data->start_level > 0) {
1002 data->start_level = 0;
1006 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1007 * best we can do is mimic the out-of-tree driver and hope that the
1008 * "implementation-defined caching policy" is good enough. Similarly,
1009 * we'll use it for the sake of a valid attribute for our 'device'
1010 * index, although callers should never request that in practice.
1012 cfg->arm_mali_lpae_cfg.memattr =
1013 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1014 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1015 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1016 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1017 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1018 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1020 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1025 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1028 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1029 ARM_MALI_LPAE_TTBR_READ_INNER |
1030 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1031 if (cfg->coherent_walk)
1032 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1041 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1042 .alloc = arm_64_lpae_alloc_pgtable_s1,
1043 .free = arm_lpae_free_pgtable,
1046 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1047 .alloc = arm_64_lpae_alloc_pgtable_s2,
1048 .free = arm_lpae_free_pgtable,
1051 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1052 .alloc = arm_32_lpae_alloc_pgtable_s1,
1053 .free = arm_lpae_free_pgtable,
1056 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1057 .alloc = arm_32_lpae_alloc_pgtable_s2,
1058 .free = arm_lpae_free_pgtable,
1061 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1062 .alloc = arm_mali_lpae_alloc_pgtable,
1063 .free = arm_lpae_free_pgtable,
1066 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1068 static struct io_pgtable_cfg *cfg_cookie __initdata;
1070 static void __init dummy_tlb_flush_all(void *cookie)
1072 WARN_ON(cookie != cfg_cookie);
1075 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1076 size_t granule, void *cookie)
1078 WARN_ON(cookie != cfg_cookie);
1079 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1082 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1083 unsigned long iova, size_t granule,
1086 dummy_tlb_flush(iova, granule, granule, cookie);
1089 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1090 .tlb_flush_all = dummy_tlb_flush_all,
1091 .tlb_flush_walk = dummy_tlb_flush,
1092 .tlb_flush_leaf = dummy_tlb_flush,
1093 .tlb_add_page = dummy_tlb_add_page,
1096 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1098 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1099 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1101 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1102 cfg->pgsize_bitmap, cfg->ias);
1103 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1104 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1105 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1108 #define __FAIL(ops, i) ({ \
1109 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1110 arm_lpae_dump_ops(ops); \
1111 selftest_running = false; \
1115 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1117 static const enum io_pgtable_fmt fmts[] __initconst = {
1125 struct io_pgtable_ops *ops;
1127 selftest_running = true;
1129 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1131 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1133 pr_err("selftest: failed to allocate io pgtable ops\n");
1138 * Initial sanity checks.
1139 * Empty page tables shouldn't provide any translations.
1141 if (ops->iova_to_phys(ops, 42))
1142 return __FAIL(ops, i);
1144 if (ops->iova_to_phys(ops, SZ_1G + 42))
1145 return __FAIL(ops, i);
1147 if (ops->iova_to_phys(ops, SZ_2G + 42))
1148 return __FAIL(ops, i);
1151 * Distinct mappings of different granule sizes.
1154 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1157 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1160 IOMMU_CACHE, GFP_KERNEL))
1161 return __FAIL(ops, i);
1163 /* Overlapping mappings */
1164 if (!ops->map(ops, iova, iova + size, size,
1165 IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
1166 return __FAIL(ops, i);
1168 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1169 return __FAIL(ops, i);
1175 size = 1UL << __ffs(cfg->pgsize_bitmap);
1176 if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1177 return __FAIL(ops, i);
1179 /* Remap of partial unmap */
1180 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1181 return __FAIL(ops, i);
1183 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1184 return __FAIL(ops, i);
1188 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1191 if (ops->unmap(ops, iova, size, NULL) != size)
1192 return __FAIL(ops, i);
1194 if (ops->iova_to_phys(ops, iova + 42))
1195 return __FAIL(ops, i);
1197 /* Remap full block */
1198 if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
1199 return __FAIL(ops, i);
1201 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1202 return __FAIL(ops, i);
1207 free_io_pgtable_ops(ops);
1210 selftest_running = false;
1214 static int __init arm_lpae_do_selftests(void)
1216 static const unsigned long pgsize[] __initconst = {
1217 SZ_4K | SZ_2M | SZ_1G,
1222 static const unsigned int ias[] __initconst = {
1223 32, 36, 40, 42, 44, 48,
1226 int i, j, pass = 0, fail = 0;
1227 struct io_pgtable_cfg cfg = {
1228 .tlb = &dummy_tlb_ops,
1230 .coherent_walk = true,
1233 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1234 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1235 cfg.pgsize_bitmap = pgsize[i];
1237 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1239 if (arm_lpae_run_tests(&cfg))
1246 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1247 return fail ? -EFAULT : 0;
1249 subsys_initcall(arm_lpae_do_selftests);