1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/platform_device.h>
11 #include <linux/errno.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/slab.h>
18 #include <linux/iommu.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
21 #include <linux/of_iommu.h>
23 #include <asm/cacheflush.h>
24 #include <linux/sizes.h>
26 #include "msm_iommu_hw-8xxx.h"
27 #include "msm_iommu.h"
29 #define MRC(reg, processor, op1, crn, crm, op2) \
30 __asm__ __volatile__ ( \
31 " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
34 /* bitmap of the page sizes currently supported */
35 #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
37 static DEFINE_SPINLOCK(msm_iommu_lock);
38 static LIST_HEAD(qcom_iommu_devices);
39 static struct iommu_ops msm_iommu_ops;
42 struct list_head list_attached;
43 struct iommu_domain domain;
44 struct io_pgtable_cfg cfg;
45 struct io_pgtable_ops *iop;
47 spinlock_t pgtlock; /* pagetable lock */
50 static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
52 return container_of(dom, struct msm_priv, domain);
55 static int __enable_clocks(struct msm_iommu_dev *iommu)
59 ret = clk_enable(iommu->pclk);
64 ret = clk_enable(iommu->clk);
66 clk_disable(iommu->pclk);
72 static void __disable_clocks(struct msm_iommu_dev *iommu)
75 clk_disable(iommu->clk);
76 clk_disable(iommu->pclk);
79 static void msm_iommu_reset(void __iomem *base, int ncb)
85 SET_ESRRESTORE(base, 0);
89 SET_TESTBUSCR(base, 0);
91 SET_GLOBAL_TLBIALL(base, 0);
93 SET_TLBLKCRWE(base, 1);
95 for (ctx = 0; ctx < ncb; ctx++) {
96 SET_BPRCOSH(base, ctx, 0);
97 SET_BPRCISH(base, ctx, 0);
98 SET_BPRCNSH(base, ctx, 0);
99 SET_BPSHCFG(base, ctx, 0);
100 SET_BPMTCFG(base, ctx, 0);
101 SET_ACTLR(base, ctx, 0);
102 SET_SCTLR(base, ctx, 0);
103 SET_FSRRESTORE(base, ctx, 0);
104 SET_TTBR0(base, ctx, 0);
105 SET_TTBR1(base, ctx, 0);
106 SET_TTBCR(base, ctx, 0);
107 SET_BFBCR(base, ctx, 0);
108 SET_PAR(base, ctx, 0);
109 SET_FAR(base, ctx, 0);
110 SET_CTX_TLBIALL(base, ctx, 0);
111 SET_TLBFLPTER(base, ctx, 0);
112 SET_TLBSLPTER(base, ctx, 0);
113 SET_TLBLKCR(base, ctx, 0);
114 SET_CONTEXTIDR(base, ctx, 0);
118 static void __flush_iotlb(void *cookie)
120 struct msm_priv *priv = cookie;
121 struct msm_iommu_dev *iommu = NULL;
122 struct msm_iommu_ctx_dev *master;
125 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
126 ret = __enable_clocks(iommu);
130 list_for_each_entry(master, &iommu->ctx_list, list)
131 SET_CTX_TLBIALL(iommu->base, master->num, 0);
133 __disable_clocks(iommu);
139 static void __flush_iotlb_range(unsigned long iova, size_t size,
140 size_t granule, bool leaf, void *cookie)
142 struct msm_priv *priv = cookie;
143 struct msm_iommu_dev *iommu = NULL;
144 struct msm_iommu_ctx_dev *master;
148 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
149 ret = __enable_clocks(iommu);
153 list_for_each_entry(master, &iommu->ctx_list, list) {
157 iova |= GET_CONTEXTIDR_ASID(iommu->base,
159 SET_TLBIVA(iommu->base, master->num, iova);
161 } while (temp_size -= granule);
164 __disable_clocks(iommu);
171 static void __flush_iotlb_walk(unsigned long iova, size_t size,
172 size_t granule, void *cookie)
174 __flush_iotlb_range(iova, size, granule, false, cookie);
177 static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
178 unsigned long iova, size_t granule, void *cookie)
180 __flush_iotlb_range(iova, granule, granule, true, cookie);
183 static const struct iommu_flush_ops msm_iommu_flush_ops = {
184 .tlb_flush_all = __flush_iotlb,
185 .tlb_flush_walk = __flush_iotlb_walk,
186 .tlb_add_page = __flush_iotlb_page,
189 static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
194 idx = find_next_zero_bit(map, end, start);
197 } while (test_and_set_bit(idx, map));
202 static void msm_iommu_free_ctx(unsigned long *map, int idx)
207 static void config_mids(struct msm_iommu_dev *iommu,
208 struct msm_iommu_ctx_dev *master)
212 for (i = 0; i < master->num_mids; i++) {
213 mid = master->mids[i];
216 SET_M2VCBR_N(iommu->base, mid, 0);
217 SET_CBACR_N(iommu->base, ctx, 0);
220 SET_VMID(iommu->base, mid, 0);
222 /* Set the context number for that MID to this context */
223 SET_CBNDX(iommu->base, mid, ctx);
225 /* Set MID associated with this context bank to 0*/
226 SET_CBVMID(iommu->base, ctx, 0);
228 /* Set the ASID for TLB tagging for this context */
229 SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
231 /* Set security bit override to be Non-secure */
232 SET_NSCFG(iommu->base, mid, 3);
236 static void __reset_context(void __iomem *base, int ctx)
238 SET_BPRCOSH(base, ctx, 0);
239 SET_BPRCISH(base, ctx, 0);
240 SET_BPRCNSH(base, ctx, 0);
241 SET_BPSHCFG(base, ctx, 0);
242 SET_BPMTCFG(base, ctx, 0);
243 SET_ACTLR(base, ctx, 0);
244 SET_SCTLR(base, ctx, 0);
245 SET_FSRRESTORE(base, ctx, 0);
246 SET_TTBR0(base, ctx, 0);
247 SET_TTBR1(base, ctx, 0);
248 SET_TTBCR(base, ctx, 0);
249 SET_BFBCR(base, ctx, 0);
250 SET_PAR(base, ctx, 0);
251 SET_FAR(base, ctx, 0);
252 SET_CTX_TLBIALL(base, ctx, 0);
253 SET_TLBFLPTER(base, ctx, 0);
254 SET_TLBSLPTER(base, ctx, 0);
255 SET_TLBLKCR(base, ctx, 0);
258 static void __program_context(void __iomem *base, int ctx,
259 struct msm_priv *priv)
261 __reset_context(base, ctx);
263 /* Turn on TEX Remap */
264 SET_TRE(base, ctx, 1);
265 SET_AFE(base, ctx, 1);
267 /* Set up HTW mode */
268 /* TLB miss configuration: perform HTW on miss */
269 SET_TLBMCFG(base, ctx, 0x3);
271 /* V2P configuration: HTW for access */
272 SET_V2PCFG(base, ctx, 0x3);
274 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
275 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
276 SET_TTBR1(base, ctx, 0);
278 /* Set prrr and nmrr */
279 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
280 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
282 /* Invalidate the TLB for this context */
283 SET_CTX_TLBIALL(base, ctx, 0);
285 /* Set interrupt number to "secure" interrupt */
286 SET_IRPTNDX(base, ctx, 0);
288 /* Enable context fault interrupt */
289 SET_CFEIE(base, ctx, 1);
291 /* Stall access on a context fault and let the handler deal with it */
292 SET_CFCFG(base, ctx, 1);
294 /* Redirect all cacheable requests to L2 slave port. */
295 SET_RCISH(base, ctx, 1);
296 SET_RCOSH(base, ctx, 1);
297 SET_RCNSH(base, ctx, 1);
299 /* Turn on BFB prefetch */
300 SET_BFBDFE(base, ctx, 1);
306 static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
308 struct msm_priv *priv;
310 if (type != IOMMU_DOMAIN_UNMANAGED)
313 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
317 INIT_LIST_HEAD(&priv->list_attached);
319 priv->domain.geometry.aperture_start = 0;
320 priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
321 priv->domain.geometry.force_aperture = true;
323 return &priv->domain;
330 static void msm_iommu_domain_free(struct iommu_domain *domain)
332 struct msm_priv *priv;
335 spin_lock_irqsave(&msm_iommu_lock, flags);
336 priv = to_msm_priv(domain);
338 spin_unlock_irqrestore(&msm_iommu_lock, flags);
341 static int msm_iommu_domain_config(struct msm_priv *priv)
343 spin_lock_init(&priv->pgtlock);
345 priv->cfg = (struct io_pgtable_cfg) {
346 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
349 .tlb = &msm_iommu_flush_ops,
350 .iommu_dev = priv->dev,
353 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
355 dev_err(priv->dev, "Failed to allocate pgtable\n");
359 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
364 /* Must be called under msm_iommu_lock */
365 static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
367 struct msm_iommu_dev *iommu, *ret = NULL;
368 struct msm_iommu_ctx_dev *master;
370 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
371 master = list_first_entry(&iommu->ctx_list,
372 struct msm_iommu_ctx_dev,
374 if (master->of_node == dev->of_node) {
383 static struct iommu_device *msm_iommu_probe_device(struct device *dev)
385 struct msm_iommu_dev *iommu;
388 spin_lock_irqsave(&msm_iommu_lock, flags);
389 iommu = find_iommu_for_dev(dev);
390 spin_unlock_irqrestore(&msm_iommu_lock, flags);
393 return ERR_PTR(-ENODEV);
395 return &iommu->iommu;
398 static void msm_iommu_release_device(struct device *dev)
402 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
406 struct msm_iommu_dev *iommu;
407 struct msm_priv *priv = to_msm_priv(domain);
408 struct msm_iommu_ctx_dev *master;
411 msm_iommu_domain_config(priv);
413 spin_lock_irqsave(&msm_iommu_lock, flags);
414 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
415 master = list_first_entry(&iommu->ctx_list,
416 struct msm_iommu_ctx_dev,
418 if (master->of_node == dev->of_node) {
419 ret = __enable_clocks(iommu);
423 list_for_each_entry(master, &iommu->ctx_list, list) {
425 dev_err(dev, "domain already attached");
430 msm_iommu_alloc_ctx(iommu->context_map,
432 if (IS_ERR_VALUE(master->num)) {
436 config_mids(iommu, master);
437 __program_context(iommu->base, master->num,
440 __disable_clocks(iommu);
441 list_add(&iommu->dom_node, &priv->list_attached);
446 spin_unlock_irqrestore(&msm_iommu_lock, flags);
451 static void msm_iommu_detach_dev(struct iommu_domain *domain,
454 struct msm_priv *priv = to_msm_priv(domain);
456 struct msm_iommu_dev *iommu;
457 struct msm_iommu_ctx_dev *master;
460 free_io_pgtable_ops(priv->iop);
462 spin_lock_irqsave(&msm_iommu_lock, flags);
463 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
464 ret = __enable_clocks(iommu);
468 list_for_each_entry(master, &iommu->ctx_list, list) {
469 msm_iommu_free_ctx(iommu->context_map, master->num);
470 __reset_context(iommu->base, master->num);
472 __disable_clocks(iommu);
475 spin_unlock_irqrestore(&msm_iommu_lock, flags);
478 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
479 phys_addr_t pa, size_t len, int prot, gfp_t gfp)
481 struct msm_priv *priv = to_msm_priv(domain);
485 spin_lock_irqsave(&priv->pgtlock, flags);
486 ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
487 spin_unlock_irqrestore(&priv->pgtlock, flags);
492 static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
495 struct msm_priv *priv = to_msm_priv(domain);
497 __flush_iotlb_range(iova, size, SZ_4K, false, priv);
500 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
501 size_t len, struct iommu_iotlb_gather *gather)
503 struct msm_priv *priv = to_msm_priv(domain);
506 spin_lock_irqsave(&priv->pgtlock, flags);
507 len = priv->iop->unmap(priv->iop, iova, len, gather);
508 spin_unlock_irqrestore(&priv->pgtlock, flags);
513 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
516 struct msm_priv *priv;
517 struct msm_iommu_dev *iommu;
518 struct msm_iommu_ctx_dev *master;
523 spin_lock_irqsave(&msm_iommu_lock, flags);
525 priv = to_msm_priv(domain);
526 iommu = list_first_entry(&priv->list_attached,
527 struct msm_iommu_dev, dom_node);
529 if (list_empty(&iommu->ctx_list))
532 master = list_first_entry(&iommu->ctx_list,
533 struct msm_iommu_ctx_dev, list);
537 ret = __enable_clocks(iommu);
541 /* Invalidate context TLB */
542 SET_CTX_TLBIALL(iommu->base, master->num, 0);
543 SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
545 par = GET_PAR(iommu->base, master->num);
547 /* We are dealing with a supersection */
548 if (GET_NOFAULT_SS(iommu->base, master->num))
549 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
550 else /* Upper 20 bits from PAR, lower 12 from VA */
551 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
553 if (GET_FAULT(iommu->base, master->num))
556 __disable_clocks(iommu);
558 spin_unlock_irqrestore(&msm_iommu_lock, flags);
562 static bool msm_iommu_capable(enum iommu_cap cap)
567 static void print_ctx_regs(void __iomem *base, int ctx)
569 unsigned int fsr = GET_FSR(base, ctx);
570 pr_err("FAR = %08x PAR = %08x\n",
571 GET_FAR(base, ctx), GET_PAR(base, ctx));
572 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
573 (fsr & 0x02) ? "TF " : "",
574 (fsr & 0x04) ? "AFF " : "",
575 (fsr & 0x08) ? "APF " : "",
576 (fsr & 0x10) ? "TLBMF " : "",
577 (fsr & 0x20) ? "HTWDEEF " : "",
578 (fsr & 0x40) ? "HTWSEEF " : "",
579 (fsr & 0x80) ? "MHF " : "",
580 (fsr & 0x10000) ? "SL " : "",
581 (fsr & 0x40000000) ? "SS " : "",
582 (fsr & 0x80000000) ? "MULTI " : "");
584 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
585 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
586 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
587 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
588 pr_err("SCTLR = %08x ACTLR = %08x\n",
589 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
592 static void insert_iommu_master(struct device *dev,
593 struct msm_iommu_dev **iommu,
594 struct of_phandle_args *spec)
596 struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
599 if (list_empty(&(*iommu)->ctx_list)) {
600 master = kzalloc(sizeof(*master), GFP_ATOMIC);
601 master->of_node = dev->of_node;
602 list_add(&master->list, &(*iommu)->ctx_list);
603 dev_iommu_priv_set(dev, master);
606 for (sid = 0; sid < master->num_mids; sid++)
607 if (master->mids[sid] == spec->args[0]) {
608 dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
613 master->mids[master->num_mids++] = spec->args[0];
616 static int qcom_iommu_of_xlate(struct device *dev,
617 struct of_phandle_args *spec)
619 struct msm_iommu_dev *iommu;
623 spin_lock_irqsave(&msm_iommu_lock, flags);
624 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
625 if (iommu->dev->of_node == spec->np)
628 if (!iommu || iommu->dev->of_node != spec->np) {
633 insert_iommu_master(dev, &iommu, spec);
635 spin_unlock_irqrestore(&msm_iommu_lock, flags);
640 irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
642 struct msm_iommu_dev *iommu = dev_id;
646 spin_lock(&msm_iommu_lock);
649 pr_err("Invalid device ID in context interrupt handler\n");
653 pr_err("Unexpected IOMMU page fault!\n");
654 pr_err("base = %08x\n", (unsigned int)iommu->base);
656 ret = __enable_clocks(iommu);
660 for (i = 0; i < iommu->ncb; i++) {
661 fsr = GET_FSR(iommu->base, i);
663 pr_err("Fault occurred in context %d.\n", i);
664 pr_err("Interesting registers:\n");
665 print_ctx_regs(iommu->base, i);
666 SET_FSR(iommu->base, i, 0x4000000F);
669 __disable_clocks(iommu);
671 spin_unlock(&msm_iommu_lock);
675 static struct iommu_ops msm_iommu_ops = {
676 .capable = msm_iommu_capable,
677 .domain_alloc = msm_iommu_domain_alloc,
678 .domain_free = msm_iommu_domain_free,
679 .attach_dev = msm_iommu_attach_dev,
680 .detach_dev = msm_iommu_detach_dev,
681 .map = msm_iommu_map,
682 .unmap = msm_iommu_unmap,
684 * Nothing is needed here, the barrier to guarantee
685 * completion of the tlb sync operation is implicitly
686 * taken care when the iommu client does a writel before
687 * kick starting the other master.
690 .iotlb_sync_map = msm_iommu_sync_map,
691 .iova_to_phys = msm_iommu_iova_to_phys,
692 .probe_device = msm_iommu_probe_device,
693 .release_device = msm_iommu_release_device,
694 .device_group = generic_device_group,
695 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
696 .of_xlate = qcom_iommu_of_xlate,
699 static int msm_iommu_probe(struct platform_device *pdev)
702 resource_size_t ioaddr;
703 struct msm_iommu_dev *iommu;
706 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
710 iommu->dev = &pdev->dev;
711 INIT_LIST_HEAD(&iommu->ctx_list);
713 iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
714 if (IS_ERR(iommu->pclk)) {
715 dev_err(iommu->dev, "could not get smmu_pclk\n");
716 return PTR_ERR(iommu->pclk);
719 ret = clk_prepare(iommu->pclk);
721 dev_err(iommu->dev, "could not prepare smmu_pclk\n");
725 iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
726 if (IS_ERR(iommu->clk)) {
727 dev_err(iommu->dev, "could not get iommu_clk\n");
728 clk_unprepare(iommu->pclk);
729 return PTR_ERR(iommu->clk);
732 ret = clk_prepare(iommu->clk);
734 dev_err(iommu->dev, "could not prepare iommu_clk\n");
735 clk_unprepare(iommu->pclk);
739 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
740 iommu->base = devm_ioremap_resource(iommu->dev, r);
741 if (IS_ERR(iommu->base)) {
742 dev_err(iommu->dev, "could not get iommu base\n");
743 ret = PTR_ERR(iommu->base);
748 iommu->irq = platform_get_irq(pdev, 0);
749 if (iommu->irq < 0) {
754 ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
756 dev_err(iommu->dev, "could not get ncb\n");
761 msm_iommu_reset(iommu->base, iommu->ncb);
762 SET_M(iommu->base, 0, 1);
763 SET_PAR(iommu->base, 0, 0);
764 SET_V2PCFG(iommu->base, 0, 1);
765 SET_V2PPR(iommu->base, 0, 0);
766 par = GET_PAR(iommu->base, 0);
767 SET_V2PCFG(iommu->base, 0, 0);
768 SET_M(iommu->base, 0, 0);
771 pr_err("Invalid PAR value detected\n");
776 ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
777 msm_iommu_fault_handler,
778 IRQF_ONESHOT | IRQF_SHARED,
779 "msm_iommu_secure_irpt_handler",
782 pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
786 list_add(&iommu->dev_node, &qcom_iommu_devices);
788 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
789 "msm-smmu.%pa", &ioaddr);
791 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
795 ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
797 pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
801 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
803 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
804 iommu->base, iommu->irq, iommu->ncb);
808 clk_unprepare(iommu->clk);
809 clk_unprepare(iommu->pclk);
813 static const struct of_device_id msm_iommu_dt_match[] = {
814 { .compatible = "qcom,apq8064-iommu" },
818 static int msm_iommu_remove(struct platform_device *pdev)
820 struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
822 clk_unprepare(iommu->clk);
823 clk_unprepare(iommu->pclk);
827 static struct platform_driver msm_iommu_driver = {
830 .of_match_table = msm_iommu_dt_match,
832 .probe = msm_iommu_probe,
833 .remove = msm_iommu_remove,
836 static int __init msm_iommu_driver_init(void)
840 ret = platform_driver_register(&msm_iommu_driver);
842 pr_err("Failed to register IOMMU driver\n");
846 subsys_initcall(msm_iommu_driver_init);