2 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
14 #include <linux/clk.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
18 #include <linux/iommu.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
22 #include <linux/of_iommu.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/dma-iommu.h>
29 typedef u32 sysmmu_iova_t;
30 typedef u32 sysmmu_pte_t;
32 /* We do not consider super section mapping (16MB) */
34 #define LPAGE_ORDER 16
35 #define SPAGE_ORDER 12
37 #define SECT_SIZE (1 << SECT_ORDER)
38 #define LPAGE_SIZE (1 << LPAGE_ORDER)
39 #define SPAGE_SIZE (1 << SPAGE_ORDER)
41 #define SECT_MASK (~(SECT_SIZE - 1))
42 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
43 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
45 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
51 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
53 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
55 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
58 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
59 * v5.0 introduced support for 36bit physical address space by shifting
60 * all page entry values by 4 bits.
61 * All SYSMMU controllers in the system support the address spaces of the same
62 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
65 static short PG_ENT_SHIFT = -1;
66 #define SYSMMU_PG_ENT_SHIFT 0
67 #define SYSMMU_V5_PG_ENT_SHIFT 4
69 static const sysmmu_pte_t *LV1_PROT;
70 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
71 ((0 << 15) | (0 << 10)), /* no access */
72 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
73 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
74 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
76 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
77 (0 << 4), /* no access */
78 (1 << 4), /* IOMMU_READ only */
79 (2 << 4), /* IOMMU_WRITE only */
80 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
83 static const sysmmu_pte_t *LV2_PROT;
84 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
85 ((0 << 9) | (0 << 4)), /* no access */
86 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
87 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
88 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
90 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
91 (0 << 2), /* no access */
92 (1 << 2), /* IOMMU_READ only */
93 (2 << 2), /* IOMMU_WRITE only */
94 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
97 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
99 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
100 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
101 #define section_offs(iova) (iova & (SECT_SIZE - 1))
102 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
103 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
104 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
105 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
107 #define NUM_LV1ENTRIES 4096
108 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
110 static u32 lv1ent_offset(sysmmu_iova_t iova)
112 return iova >> SECT_ORDER;
115 static u32 lv2ent_offset(sysmmu_iova_t iova)
117 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
120 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
121 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
123 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
124 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
126 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
127 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
128 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
129 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
131 #define CTRL_ENABLE 0x5
132 #define CTRL_BLOCK 0x7
133 #define CTRL_DISABLE 0x0
136 #define CFG_EAP (1 << 2)
137 #define CFG_QOS(n) ((n & 0xF) << 7)
138 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
139 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
140 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
142 /* common registers */
143 #define REG_MMU_CTRL 0x000
144 #define REG_MMU_CFG 0x004
145 #define REG_MMU_STATUS 0x008
146 #define REG_MMU_VERSION 0x034
148 #define MMU_MAJ_VER(val) ((val) >> 7)
149 #define MMU_MIN_VER(val) ((val) & 0x7F)
150 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
152 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
154 /* v1.x - v3.x registers */
155 #define REG_MMU_FLUSH 0x00C
156 #define REG_MMU_FLUSH_ENTRY 0x010
157 #define REG_PT_BASE_ADDR 0x014
158 #define REG_INT_STATUS 0x018
159 #define REG_INT_CLEAR 0x01C
161 #define REG_PAGE_FAULT_ADDR 0x024
162 #define REG_AW_FAULT_ADDR 0x028
163 #define REG_AR_FAULT_ADDR 0x02C
164 #define REG_DEFAULT_SLAVE_ADDR 0x030
167 #define REG_V5_PT_BASE_PFN 0x00C
168 #define REG_V5_MMU_FLUSH_ALL 0x010
169 #define REG_V5_MMU_FLUSH_ENTRY 0x014
170 #define REG_V5_MMU_FLUSH_RANGE 0x018
171 #define REG_V5_MMU_FLUSH_START 0x020
172 #define REG_V5_MMU_FLUSH_END 0x024
173 #define REG_V5_INT_STATUS 0x060
174 #define REG_V5_INT_CLEAR 0x064
175 #define REG_V5_FAULT_AR_VA 0x070
176 #define REG_V5_FAULT_AW_VA 0x080
178 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
180 static struct device *dma_dev;
181 static struct kmem_cache *lv2table_kmem_cache;
182 static sysmmu_pte_t *zero_lv2_table;
183 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
185 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
187 return pgtable + lv1ent_offset(iova);
190 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
192 return (sysmmu_pte_t *)phys_to_virt(
193 lv2table_base(sent)) + lv2ent_offset(iova);
197 * IOMMU fault information register
199 struct sysmmu_fault_info {
200 unsigned int bit; /* bit number in STATUS register */
201 unsigned short addr_reg; /* register to read VA fault address */
202 const char *name; /* human readable fault name */
203 unsigned int type; /* fault type for report_iommu_fault */
206 static const struct sysmmu_fault_info sysmmu_faults[] = {
207 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
208 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
209 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
210 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
211 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
212 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
213 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
214 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
217 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
218 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
219 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
220 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
221 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
222 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
223 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
224 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
225 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
226 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
227 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
231 * This structure is attached to dev.archdata.iommu of the master device
232 * on device add, contains a list of SYSMMU controllers defined by device tree,
233 * which are bound to given master device. It is usually referenced by 'owner'
236 struct exynos_iommu_owner {
237 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
238 struct iommu_domain *domain; /* domain this device is attached */
239 struct mutex rpm_lock; /* for runtime pm of all sysmmus */
243 * This structure exynos specific generalization of struct iommu_domain.
244 * It contains list of SYSMMU controllers from all master devices, which has
245 * been attached to this domain and page tables of IO address space defined by
246 * it. It is usually referenced by 'domain' pointer.
248 struct exynos_iommu_domain {
249 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
250 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
251 short *lv2entcnt; /* free lv2 entry counter for each section */
252 spinlock_t lock; /* lock for modyfying list of clients */
253 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
254 struct iommu_domain domain; /* generic domain data structure */
258 * This structure hold all data of a single SYSMMU controller, this includes
259 * hw resources like registers and clocks, pointers and list nodes to connect
260 * it to all other structures, internal state and parameters read from device
261 * tree. It is usually referenced by 'data' pointer.
263 struct sysmmu_drvdata {
264 struct device *sysmmu; /* SYSMMU controller device */
265 struct device *master; /* master device (owner) */
266 void __iomem *sfrbase; /* our registers */
267 struct clk *clk; /* SYSMMU's clock */
268 struct clk *aclk; /* SYSMMU's aclk clock */
269 struct clk *pclk; /* SYSMMU's pclk clock */
270 struct clk *clk_master; /* master's device clock */
271 spinlock_t lock; /* lock for modyfying state */
272 bool active; /* current status */
273 struct exynos_iommu_domain *domain; /* domain we belong to */
274 struct list_head domain_node; /* node for domain clients list */
275 struct list_head owner_node; /* node for owner controllers list */
276 phys_addr_t pgtable; /* assigned page table structure */
277 unsigned int version; /* our version */
279 struct iommu_device iommu; /* IOMMU core handle */
282 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
284 return container_of(dom, struct exynos_iommu_domain, domain);
287 static void sysmmu_unblock(struct sysmmu_drvdata *data)
289 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
292 static bool sysmmu_block(struct sysmmu_drvdata *data)
296 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
297 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
300 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
301 sysmmu_unblock(data);
308 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
310 if (MMU_MAJ_VER(data->version) < 5)
311 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
313 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
316 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
317 sysmmu_iova_t iova, unsigned int num_inv)
321 if (MMU_MAJ_VER(data->version) < 5) {
322 for (i = 0; i < num_inv; i++) {
323 writel((iova & SPAGE_MASK) | 1,
324 data->sfrbase + REG_MMU_FLUSH_ENTRY);
329 writel((iova & SPAGE_MASK) | 1,
330 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
332 writel((iova & SPAGE_MASK),
333 data->sfrbase + REG_V5_MMU_FLUSH_START);
334 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
335 data->sfrbase + REG_V5_MMU_FLUSH_END);
336 writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
341 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
343 if (MMU_MAJ_VER(data->version) < 5)
344 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
346 writel(pgd >> PAGE_SHIFT,
347 data->sfrbase + REG_V5_PT_BASE_PFN);
349 __sysmmu_tlb_invalidate(data);
352 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
354 BUG_ON(clk_prepare_enable(data->clk_master));
355 BUG_ON(clk_prepare_enable(data->clk));
356 BUG_ON(clk_prepare_enable(data->pclk));
357 BUG_ON(clk_prepare_enable(data->aclk));
360 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
362 clk_disable_unprepare(data->aclk);
363 clk_disable_unprepare(data->pclk);
364 clk_disable_unprepare(data->clk);
365 clk_disable_unprepare(data->clk_master);
368 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
372 __sysmmu_enable_clocks(data);
374 ver = readl(data->sfrbase + REG_MMU_VERSION);
376 /* controllers on some SoCs don't report proper version */
377 if (ver == 0x80000001u)
378 data->version = MAKE_MMU_VER(1, 0);
380 data->version = MMU_RAW_VER(ver);
382 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
383 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
385 __sysmmu_disable_clocks(data);
388 static void show_fault_information(struct sysmmu_drvdata *data,
389 const struct sysmmu_fault_info *finfo,
390 sysmmu_iova_t fault_addr)
394 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
395 dev_name(data->master), finfo->name, fault_addr);
396 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
397 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
398 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
399 if (lv1ent_page(ent)) {
400 ent = page_entry(ent, fault_addr);
401 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
405 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
407 /* SYSMMU is in blocked state when interrupt occurred. */
408 struct sysmmu_drvdata *data = dev_id;
409 const struct sysmmu_fault_info *finfo;
410 unsigned int i, n, itype;
411 sysmmu_iova_t fault_addr = -1;
412 unsigned short reg_status, reg_clear;
415 WARN_ON(!data->active);
417 if (MMU_MAJ_VER(data->version) < 5) {
418 reg_status = REG_INT_STATUS;
419 reg_clear = REG_INT_CLEAR;
420 finfo = sysmmu_faults;
421 n = ARRAY_SIZE(sysmmu_faults);
423 reg_status = REG_V5_INT_STATUS;
424 reg_clear = REG_V5_INT_CLEAR;
425 finfo = sysmmu_v5_faults;
426 n = ARRAY_SIZE(sysmmu_v5_faults);
429 spin_lock(&data->lock);
431 clk_enable(data->clk_master);
433 itype = __ffs(readl(data->sfrbase + reg_status));
434 for (i = 0; i < n; i++, finfo++)
435 if (finfo->bit == itype)
437 /* unknown/unsupported fault */
440 /* print debug message */
441 fault_addr = readl(data->sfrbase + finfo->addr_reg);
442 show_fault_information(data, finfo, fault_addr);
445 ret = report_iommu_fault(&data->domain->domain,
446 data->master, fault_addr, finfo->type);
447 /* fault is not recovered by fault handler */
450 writel(1 << itype, data->sfrbase + reg_clear);
452 sysmmu_unblock(data);
454 clk_disable(data->clk_master);
456 spin_unlock(&data->lock);
461 static void __sysmmu_disable(struct sysmmu_drvdata *data)
465 clk_enable(data->clk_master);
467 spin_lock_irqsave(&data->lock, flags);
468 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
469 writel(0, data->sfrbase + REG_MMU_CFG);
470 data->active = false;
471 spin_unlock_irqrestore(&data->lock, flags);
473 __sysmmu_disable_clocks(data);
476 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
480 if (data->version <= MAKE_MMU_VER(3, 1))
481 cfg = CFG_LRU | CFG_QOS(15);
482 else if (data->version <= MAKE_MMU_VER(3, 2))
483 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
485 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
487 cfg |= CFG_EAP; /* enable access protection bits check */
489 writel(cfg, data->sfrbase + REG_MMU_CFG);
492 static void __sysmmu_enable(struct sysmmu_drvdata *data)
496 __sysmmu_enable_clocks(data);
498 spin_lock_irqsave(&data->lock, flags);
499 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
500 __sysmmu_init_config(data);
501 __sysmmu_set_ptbase(data, data->pgtable);
502 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
504 spin_unlock_irqrestore(&data->lock, flags);
507 * SYSMMU driver keeps master's clock enabled only for the short
508 * time, while accessing the registers. For performing address
509 * translation during DMA transaction it relies on the client
510 * driver to enable it.
512 clk_disable(data->clk_master);
515 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
520 spin_lock_irqsave(&data->lock, flags);
521 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
522 clk_enable(data->clk_master);
523 if (sysmmu_block(data)) {
524 if (data->version >= MAKE_MMU_VER(5, 0))
525 __sysmmu_tlb_invalidate(data);
527 __sysmmu_tlb_invalidate_entry(data, iova, 1);
528 sysmmu_unblock(data);
530 clk_disable(data->clk_master);
532 spin_unlock_irqrestore(&data->lock, flags);
535 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
536 sysmmu_iova_t iova, size_t size)
540 spin_lock_irqsave(&data->lock, flags);
542 unsigned int num_inv = 1;
544 clk_enable(data->clk_master);
547 * L2TLB invalidation required
548 * 4KB page: 1 invalidation
549 * 64KB page: 16 invalidations
550 * 1MB page: 64 invalidations
551 * because it is set-associative TLB
552 * with 8-way and 64 sets.
553 * 1MB page can be cached in one of all sets.
554 * 64KB page can be one of 16 consecutive sets.
556 if (MMU_MAJ_VER(data->version) == 2)
557 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
559 if (sysmmu_block(data)) {
560 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
561 sysmmu_unblock(data);
563 clk_disable(data->clk_master);
565 spin_unlock_irqrestore(&data->lock, flags);
568 static const struct iommu_ops exynos_iommu_ops;
570 static int __init exynos_sysmmu_probe(struct platform_device *pdev)
573 struct device *dev = &pdev->dev;
574 struct sysmmu_drvdata *data;
575 struct resource *res;
577 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
581 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
582 data->sfrbase = devm_ioremap_resource(dev, res);
583 if (IS_ERR(data->sfrbase))
584 return PTR_ERR(data->sfrbase);
586 irq = platform_get_irq(pdev, 0);
588 dev_err(dev, "Unable to find IRQ resource\n");
592 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
593 dev_name(dev), data);
595 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
599 data->clk = devm_clk_get(dev, "sysmmu");
600 if (PTR_ERR(data->clk) == -ENOENT)
602 else if (IS_ERR(data->clk))
603 return PTR_ERR(data->clk);
605 data->aclk = devm_clk_get(dev, "aclk");
606 if (PTR_ERR(data->aclk) == -ENOENT)
608 else if (IS_ERR(data->aclk))
609 return PTR_ERR(data->aclk);
611 data->pclk = devm_clk_get(dev, "pclk");
612 if (PTR_ERR(data->pclk) == -ENOENT)
614 else if (IS_ERR(data->pclk))
615 return PTR_ERR(data->pclk);
617 if (!data->clk && (!data->aclk || !data->pclk)) {
618 dev_err(dev, "Failed to get device clock(s)!\n");
622 data->clk_master = devm_clk_get(dev, "master");
623 if (PTR_ERR(data->clk_master) == -ENOENT)
624 data->clk_master = NULL;
625 else if (IS_ERR(data->clk_master))
626 return PTR_ERR(data->clk_master);
629 spin_lock_init(&data->lock);
631 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
632 dev_name(data->sysmmu));
636 iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
637 iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
639 ret = iommu_device_register(&data->iommu);
641 goto err_iommu_register;
643 platform_set_drvdata(pdev, data);
645 __sysmmu_get_version(data);
646 if (PG_ENT_SHIFT < 0) {
647 if (MMU_MAJ_VER(data->version) < 5) {
648 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
649 LV1_PROT = SYSMMU_LV1_PROT;
650 LV2_PROT = SYSMMU_LV2_PROT;
652 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
653 LV1_PROT = SYSMMU_V5_LV1_PROT;
654 LV2_PROT = SYSMMU_V5_LV2_PROT;
659 * use the first registered sysmmu device for performing
660 * dma mapping operations on iommu page tables (cpu cache flush)
663 dma_dev = &pdev->dev;
665 pm_runtime_enable(dev);
670 iommu_device_sysfs_remove(&data->iommu);
674 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
676 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
677 struct device *master = data->master;
680 struct exynos_iommu_owner *owner = master->archdata.iommu;
682 mutex_lock(&owner->rpm_lock);
684 dev_dbg(data->sysmmu, "saving state\n");
685 __sysmmu_disable(data);
687 mutex_unlock(&owner->rpm_lock);
692 static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
694 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
695 struct device *master = data->master;
698 struct exynos_iommu_owner *owner = master->archdata.iommu;
700 mutex_lock(&owner->rpm_lock);
702 dev_dbg(data->sysmmu, "restoring state\n");
703 __sysmmu_enable(data);
705 mutex_unlock(&owner->rpm_lock);
710 static const struct dev_pm_ops sysmmu_pm_ops = {
711 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
712 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
713 pm_runtime_force_resume)
716 static const struct of_device_id sysmmu_of_match[] = {
717 { .compatible = "samsung,exynos-sysmmu", },
721 static struct platform_driver exynos_sysmmu_driver __refdata = {
722 .probe = exynos_sysmmu_probe,
724 .name = "exynos-sysmmu",
725 .of_match_table = sysmmu_of_match,
726 .pm = &sysmmu_pm_ops,
727 .suppress_bind_attrs = true,
731 static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
733 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
735 *ent = cpu_to_le32(val);
736 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
740 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
742 struct exynos_iommu_domain *domain;
746 /* Check if correct PTE offsets are initialized */
747 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
749 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
753 if (type == IOMMU_DOMAIN_DMA) {
754 if (iommu_get_dma_cookie(&domain->domain) != 0)
756 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
760 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
761 if (!domain->pgtable)
764 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
765 if (!domain->lv2entcnt)
768 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
769 for (i = 0; i < NUM_LV1ENTRIES; i++)
770 domain->pgtable[i] = ZERO_LV2LINK;
772 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
774 /* For mapping page table entries we rely on dma == phys */
775 BUG_ON(handle != virt_to_phys(domain->pgtable));
776 if (dma_mapping_error(dma_dev, handle))
779 spin_lock_init(&domain->lock);
780 spin_lock_init(&domain->pgtablelock);
781 INIT_LIST_HEAD(&domain->clients);
783 domain->domain.geometry.aperture_start = 0;
784 domain->domain.geometry.aperture_end = ~0UL;
785 domain->domain.geometry.force_aperture = true;
787 return &domain->domain;
790 free_pages((unsigned long)domain->lv2entcnt, 1);
792 free_pages((unsigned long)domain->pgtable, 2);
794 if (type == IOMMU_DOMAIN_DMA)
795 iommu_put_dma_cookie(&domain->domain);
801 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
803 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
804 struct sysmmu_drvdata *data, *next;
808 WARN_ON(!list_empty(&domain->clients));
810 spin_lock_irqsave(&domain->lock, flags);
812 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
813 spin_lock(&data->lock);
814 __sysmmu_disable(data);
817 list_del_init(&data->domain_node);
818 spin_unlock(&data->lock);
821 spin_unlock_irqrestore(&domain->lock, flags);
823 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
824 iommu_put_dma_cookie(iommu_domain);
826 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
829 for (i = 0; i < NUM_LV1ENTRIES; i++)
830 if (lv1ent_page(domain->pgtable + i)) {
831 phys_addr_t base = lv2table_base(domain->pgtable + i);
833 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
835 kmem_cache_free(lv2table_kmem_cache,
839 free_pages((unsigned long)domain->pgtable, 2);
840 free_pages((unsigned long)domain->lv2entcnt, 1);
844 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
847 struct exynos_iommu_owner *owner = dev->archdata.iommu;
848 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
849 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
850 struct sysmmu_drvdata *data, *next;
853 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
856 mutex_lock(&owner->rpm_lock);
858 list_for_each_entry(data, &owner->controllers, owner_node) {
859 pm_runtime_get_noresume(data->sysmmu);
860 if (pm_runtime_active(data->sysmmu))
861 __sysmmu_disable(data);
862 pm_runtime_put(data->sysmmu);
865 spin_lock_irqsave(&domain->lock, flags);
866 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
867 spin_lock(&data->lock);
870 list_del_init(&data->domain_node);
871 spin_unlock(&data->lock);
873 owner->domain = NULL;
874 spin_unlock_irqrestore(&domain->lock, flags);
876 mutex_unlock(&owner->rpm_lock);
878 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
882 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
885 struct exynos_iommu_owner *owner = dev->archdata.iommu;
886 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
887 struct sysmmu_drvdata *data;
888 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
891 if (!has_sysmmu(dev))
895 exynos_iommu_detach_device(owner->domain, dev);
897 mutex_lock(&owner->rpm_lock);
899 spin_lock_irqsave(&domain->lock, flags);
900 list_for_each_entry(data, &owner->controllers, owner_node) {
901 spin_lock(&data->lock);
902 data->pgtable = pagetable;
903 data->domain = domain;
904 list_add_tail(&data->domain_node, &domain->clients);
905 spin_unlock(&data->lock);
907 owner->domain = iommu_domain;
908 spin_unlock_irqrestore(&domain->lock, flags);
910 list_for_each_entry(data, &owner->controllers, owner_node) {
911 pm_runtime_get_noresume(data->sysmmu);
912 if (pm_runtime_active(data->sysmmu))
913 __sysmmu_enable(data);
914 pm_runtime_put(data->sysmmu);
917 mutex_unlock(&owner->rpm_lock);
919 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
925 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
926 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
928 if (lv1ent_section(sent)) {
929 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
930 return ERR_PTR(-EADDRINUSE);
933 if (lv1ent_fault(sent)) {
936 bool need_flush_flpd_cache = lv1ent_zero(sent);
938 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
939 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
941 return ERR_PTR(-ENOMEM);
943 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
944 kmemleak_ignore(pent);
945 *pgcounter = NUM_LV2ENTRIES;
946 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
948 if (dma_mapping_error(dma_dev, handle)) {
949 kmem_cache_free(lv2table_kmem_cache, pent);
950 return ERR_PTR(-EADDRINUSE);
954 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
955 * FLPD cache may cache the address of zero_l2_table. This
956 * function replaces the zero_l2_table with new L2 page table
957 * to write valid mappings.
958 * Accessing the valid area may cause page fault since FLPD
959 * cache may still cache zero_l2_table for the valid area
960 * instead of new L2 page table that has the mapping
961 * information of the valid area.
962 * Thus any replacement of zero_l2_table with other valid L2
963 * page table must involve FLPD cache invalidation for System
965 * FLPD cache invalidation is performed with TLB invalidation
966 * by VPN without blocking. It is safe to invalidate TLB without
967 * blocking because the target address of TLB invalidation is
968 * not currently mapped.
970 if (need_flush_flpd_cache) {
971 struct sysmmu_drvdata *data;
973 spin_lock(&domain->lock);
974 list_for_each_entry(data, &domain->clients, domain_node)
975 sysmmu_tlb_invalidate_flpdcache(data, iova);
976 spin_unlock(&domain->lock);
980 return page_entry(sent, iova);
983 static int lv1set_section(struct exynos_iommu_domain *domain,
984 sysmmu_pte_t *sent, sysmmu_iova_t iova,
985 phys_addr_t paddr, int prot, short *pgcnt)
987 if (lv1ent_section(sent)) {
988 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
993 if (lv1ent_page(sent)) {
994 if (*pgcnt != NUM_LV2ENTRIES) {
995 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
1000 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
1004 update_pte(sent, mk_lv1ent_sect(paddr, prot));
1006 spin_lock(&domain->lock);
1007 if (lv1ent_page_zero(sent)) {
1008 struct sysmmu_drvdata *data;
1010 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1011 * entry by speculative prefetch of SLPD which has no mapping.
1013 list_for_each_entry(data, &domain->clients, domain_node)
1014 sysmmu_tlb_invalidate_flpdcache(data, iova);
1016 spin_unlock(&domain->lock);
1021 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1022 int prot, short *pgcnt)
1024 if (size == SPAGE_SIZE) {
1025 if (WARN_ON(!lv2ent_fault(pent)))
1028 update_pte(pent, mk_lv2ent_spage(paddr, prot));
1030 } else { /* size == LPAGE_SIZE */
1032 dma_addr_t pent_base = virt_to_phys(pent);
1034 dma_sync_single_for_cpu(dma_dev, pent_base,
1035 sizeof(*pent) * SPAGES_PER_LPAGE,
1037 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1038 if (WARN_ON(!lv2ent_fault(pent))) {
1040 memset(pent - i, 0, sizeof(*pent) * i);
1044 *pent = mk_lv2ent_lpage(paddr, prot);
1046 dma_sync_single_for_device(dma_dev, pent_base,
1047 sizeof(*pent) * SPAGES_PER_LPAGE,
1049 *pgcnt -= SPAGES_PER_LPAGE;
1056 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1058 * System MMU v3.x has advanced logic to improve address translation
1059 * performance with caching more page table entries by a page table walk.
1060 * However, the logic has a bug that while caching faulty page table entries,
1061 * System MMU reports page fault if the cached fault entry is hit even though
1062 * the fault entry is updated to a valid entry after the entry is cached.
1063 * To prevent caching faulty page table entries which may be updated to valid
1064 * entries later, the virtual memory manager should care about the workaround
1065 * for the problem. The following describes the workaround.
1067 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1068 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1070 * Precisely, any start address of I/O virtual region must be aligned with
1071 * the following sizes for System MMU v3.1 and v3.2.
1072 * System MMU v3.1: 128KiB
1073 * System MMU v3.2: 256KiB
1075 * Because System MMU v3.3 caches page table entries more aggressively, it needs
1077 * - Any two consecutive I/O virtual regions must have a hole of size larger
1078 * than or equal to 128KiB.
1079 * - Start address of an I/O virtual region must be aligned by 128KiB.
1081 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1082 unsigned long l_iova, phys_addr_t paddr, size_t size,
1085 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1086 sysmmu_pte_t *entry;
1087 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1088 unsigned long flags;
1091 BUG_ON(domain->pgtable == NULL);
1092 prot &= SYSMMU_SUPPORTED_PROT_BITS;
1094 spin_lock_irqsave(&domain->pgtablelock, flags);
1096 entry = section_entry(domain->pgtable, iova);
1098 if (size == SECT_SIZE) {
1099 ret = lv1set_section(domain, entry, iova, paddr, prot,
1100 &domain->lv2entcnt[lv1ent_offset(iova)]);
1104 pent = alloc_lv2entry(domain, entry, iova,
1105 &domain->lv2entcnt[lv1ent_offset(iova)]);
1108 ret = PTR_ERR(pent);
1110 ret = lv2set_page(pent, paddr, size, prot,
1111 &domain->lv2entcnt[lv1ent_offset(iova)]);
1115 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1116 __func__, ret, size, iova);
1118 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1123 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1124 sysmmu_iova_t iova, size_t size)
1126 struct sysmmu_drvdata *data;
1127 unsigned long flags;
1129 spin_lock_irqsave(&domain->lock, flags);
1131 list_for_each_entry(data, &domain->clients, domain_node)
1132 sysmmu_tlb_invalidate_entry(data, iova, size);
1134 spin_unlock_irqrestore(&domain->lock, flags);
1137 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1138 unsigned long l_iova, size_t size)
1140 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1141 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1144 unsigned long flags;
1146 BUG_ON(domain->pgtable == NULL);
1148 spin_lock_irqsave(&domain->pgtablelock, flags);
1150 ent = section_entry(domain->pgtable, iova);
1152 if (lv1ent_section(ent)) {
1153 if (WARN_ON(size < SECT_SIZE)) {
1154 err_pgsize = SECT_SIZE;
1158 /* workaround for h/w bug in System MMU v3.3 */
1159 update_pte(ent, ZERO_LV2LINK);
1164 if (unlikely(lv1ent_fault(ent))) {
1165 if (size > SECT_SIZE)
1170 /* lv1ent_page(sent) == true here */
1172 ent = page_entry(ent, iova);
1174 if (unlikely(lv2ent_fault(ent))) {
1179 if (lv2ent_small(ent)) {
1182 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1186 /* lv1ent_large(ent) == true here */
1187 if (WARN_ON(size < LPAGE_SIZE)) {
1188 err_pgsize = LPAGE_SIZE;
1192 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1193 sizeof(*ent) * SPAGES_PER_LPAGE,
1195 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1196 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1197 sizeof(*ent) * SPAGES_PER_LPAGE,
1200 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1202 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1204 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1208 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1210 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1211 __func__, size, iova, err_pgsize);
1216 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1219 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1220 sysmmu_pte_t *entry;
1221 unsigned long flags;
1222 phys_addr_t phys = 0;
1224 spin_lock_irqsave(&domain->pgtablelock, flags);
1226 entry = section_entry(domain->pgtable, iova);
1228 if (lv1ent_section(entry)) {
1229 phys = section_phys(entry) + section_offs(iova);
1230 } else if (lv1ent_page(entry)) {
1231 entry = page_entry(entry, iova);
1233 if (lv2ent_large(entry))
1234 phys = lpage_phys(entry) + lpage_offs(iova);
1235 else if (lv2ent_small(entry))
1236 phys = spage_phys(entry) + spage_offs(iova);
1239 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1244 static struct iommu_group *get_device_iommu_group(struct device *dev)
1246 struct iommu_group *group;
1248 group = iommu_group_get(dev);
1250 group = iommu_group_alloc();
1255 static int exynos_iommu_add_device(struct device *dev)
1257 struct iommu_group *group;
1259 if (!has_sysmmu(dev))
1262 group = iommu_group_get_for_dev(dev);
1265 return PTR_ERR(group);
1267 iommu_group_put(group);
1272 static void exynos_iommu_remove_device(struct device *dev)
1274 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1276 if (!has_sysmmu(dev))
1279 if (owner->domain) {
1280 struct iommu_group *group = iommu_group_get(dev);
1283 WARN_ON(owner->domain !=
1284 iommu_group_default_domain(group));
1285 exynos_iommu_detach_device(owner->domain, dev);
1286 iommu_group_put(group);
1289 iommu_group_remove_device(dev);
1292 static int exynos_iommu_of_xlate(struct device *dev,
1293 struct of_phandle_args *spec)
1295 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1296 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1297 struct sysmmu_drvdata *data, *entry;
1302 data = platform_get_drvdata(sysmmu);
1304 put_device(&sysmmu->dev);
1309 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1311 put_device(&sysmmu->dev);
1315 INIT_LIST_HEAD(&owner->controllers);
1316 mutex_init(&owner->rpm_lock);
1317 dev->archdata.iommu = owner;
1320 list_for_each_entry(entry, &owner->controllers, owner_node)
1324 list_add_tail(&data->owner_node, &owner->controllers);
1328 * SYSMMU will be runtime activated via device link (dependency) to its
1329 * master device, so there are no direct calls to pm_runtime_get/put
1332 device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
1337 static const struct iommu_ops exynos_iommu_ops = {
1338 .domain_alloc = exynos_iommu_domain_alloc,
1339 .domain_free = exynos_iommu_domain_free,
1340 .attach_dev = exynos_iommu_attach_device,
1341 .detach_dev = exynos_iommu_detach_device,
1342 .map = exynos_iommu_map,
1343 .unmap = exynos_iommu_unmap,
1344 .map_sg = default_iommu_map_sg,
1345 .iova_to_phys = exynos_iommu_iova_to_phys,
1346 .device_group = get_device_iommu_group,
1347 .add_device = exynos_iommu_add_device,
1348 .remove_device = exynos_iommu_remove_device,
1349 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1350 .of_xlate = exynos_iommu_of_xlate,
1353 static int __init exynos_iommu_init(void)
1355 struct device_node *np;
1358 np = of_find_matching_node(NULL, sysmmu_of_match);
1364 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1365 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1366 if (!lv2table_kmem_cache) {
1367 pr_err("%s: Failed to create kmem cache\n", __func__);
1371 ret = platform_driver_register(&exynos_sysmmu_driver);
1373 pr_err("%s: Failed to register driver\n", __func__);
1374 goto err_reg_driver;
1377 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1378 if (zero_lv2_table == NULL) {
1379 pr_err("%s: Failed to allocate zero level2 page table\n",
1385 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1387 pr_err("%s: Failed to register exynos-iommu driver.\n",
1394 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1396 platform_driver_unregister(&exynos_sysmmu_driver);
1398 kmem_cache_destroy(lv2table_kmem_cache);
1401 core_initcall(exynos_iommu_init);
1403 IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", NULL);