GNU Linux-libre 4.14.328-gnu1
[releases.git] / drivers / iommu / ipmmu-vmsa.c
1 /*
2  * IPMMU VMSA
3  *
4  * Copyright (C) 2014 Renesas Electronics Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 of the License.
9  */
10
11 #include <linux/bitmap.h>
12 #include <linux/delay.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iommu.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/sizes.h>
25 #include <linux/slab.h>
26
27 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
28 #include <asm/dma-iommu.h>
29 #include <asm/pgalloc.h>
30 #endif
31
32 #include "io-pgtable.h"
33
34 #define IPMMU_CTX_MAX 1
35
36 struct ipmmu_vmsa_device {
37         struct device *dev;
38         void __iomem *base;
39         struct iommu_device iommu;
40
41         unsigned int num_utlbs;
42         spinlock_t lock;                        /* Protects ctx and domains[] */
43         DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
44         struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
45
46         struct dma_iommu_mapping *mapping;
47 };
48
49 struct ipmmu_vmsa_domain {
50         struct ipmmu_vmsa_device *mmu;
51         struct iommu_domain io_domain;
52
53         struct io_pgtable_cfg cfg;
54         struct io_pgtable_ops *iop;
55
56         unsigned int context_id;
57         struct mutex mutex;                     /* Protects mappings */
58 };
59
60 struct ipmmu_vmsa_iommu_priv {
61         struct ipmmu_vmsa_device *mmu;
62         struct device *dev;
63         struct list_head list;
64 };
65
66 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
67 {
68         return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
69 }
70
71 static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
72 {
73         return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
74 }
75
76 #define TLB_LOOP_TIMEOUT                100     /* 100us */
77
78 /* -----------------------------------------------------------------------------
79  * Registers Definition
80  */
81
82 #define IM_NS_ALIAS_OFFSET              0x800
83
84 #define IM_CTX_SIZE                     0x40
85
86 #define IMCTR                           0x0000
87 #define IMCTR_TRE                       (1 << 17)
88 #define IMCTR_AFE                       (1 << 16)
89 #define IMCTR_RTSEL_MASK                (3 << 4)
90 #define IMCTR_RTSEL_SHIFT               4
91 #define IMCTR_TREN                      (1 << 3)
92 #define IMCTR_INTEN                     (1 << 2)
93 #define IMCTR_FLUSH                     (1 << 1)
94 #define IMCTR_MMUEN                     (1 << 0)
95
96 #define IMCAAR                          0x0004
97
98 #define IMTTBCR                         0x0008
99 #define IMTTBCR_EAE                     (1 << 31)
100 #define IMTTBCR_PMB                     (1 << 30)
101 #define IMTTBCR_SH1_NON_SHAREABLE       (0 << 28)
102 #define IMTTBCR_SH1_OUTER_SHAREABLE     (2 << 28)
103 #define IMTTBCR_SH1_INNER_SHAREABLE     (3 << 28)
104 #define IMTTBCR_SH1_MASK                (3 << 28)
105 #define IMTTBCR_ORGN1_NC                (0 << 26)
106 #define IMTTBCR_ORGN1_WB_WA             (1 << 26)
107 #define IMTTBCR_ORGN1_WT                (2 << 26)
108 #define IMTTBCR_ORGN1_WB                (3 << 26)
109 #define IMTTBCR_ORGN1_MASK              (3 << 26)
110 #define IMTTBCR_IRGN1_NC                (0 << 24)
111 #define IMTTBCR_IRGN1_WB_WA             (1 << 24)
112 #define IMTTBCR_IRGN1_WT                (2 << 24)
113 #define IMTTBCR_IRGN1_WB                (3 << 24)
114 #define IMTTBCR_IRGN1_MASK              (3 << 24)
115 #define IMTTBCR_TSZ1_MASK               (7 << 16)
116 #define IMTTBCR_TSZ1_SHIFT              16
117 #define IMTTBCR_SH0_NON_SHAREABLE       (0 << 12)
118 #define IMTTBCR_SH0_OUTER_SHAREABLE     (2 << 12)
119 #define IMTTBCR_SH0_INNER_SHAREABLE     (3 << 12)
120 #define IMTTBCR_SH0_MASK                (3 << 12)
121 #define IMTTBCR_ORGN0_NC                (0 << 10)
122 #define IMTTBCR_ORGN0_WB_WA             (1 << 10)
123 #define IMTTBCR_ORGN0_WT                (2 << 10)
124 #define IMTTBCR_ORGN0_WB                (3 << 10)
125 #define IMTTBCR_ORGN0_MASK              (3 << 10)
126 #define IMTTBCR_IRGN0_NC                (0 << 8)
127 #define IMTTBCR_IRGN0_WB_WA             (1 << 8)
128 #define IMTTBCR_IRGN0_WT                (2 << 8)
129 #define IMTTBCR_IRGN0_WB                (3 << 8)
130 #define IMTTBCR_IRGN0_MASK              (3 << 8)
131 #define IMTTBCR_SL0_LVL_2               (0 << 4)
132 #define IMTTBCR_SL0_LVL_1               (1 << 4)
133 #define IMTTBCR_TSZ0_MASK               (7 << 0)
134 #define IMTTBCR_TSZ0_SHIFT              O
135
136 #define IMBUSCR                         0x000c
137 #define IMBUSCR_DVM                     (1 << 2)
138 #define IMBUSCR_BUSSEL_SYS              (0 << 0)
139 #define IMBUSCR_BUSSEL_CCI              (1 << 0)
140 #define IMBUSCR_BUSSEL_IMCAAR           (2 << 0)
141 #define IMBUSCR_BUSSEL_CCI_IMCAAR       (3 << 0)
142 #define IMBUSCR_BUSSEL_MASK             (3 << 0)
143
144 #define IMTTLBR0                        0x0010
145 #define IMTTUBR0                        0x0014
146 #define IMTTLBR1                        0x0018
147 #define IMTTUBR1                        0x001c
148
149 #define IMSTR                           0x0020
150 #define IMSTR_ERRLVL_MASK               (3 << 12)
151 #define IMSTR_ERRLVL_SHIFT              12
152 #define IMSTR_ERRCODE_TLB_FORMAT        (1 << 8)
153 #define IMSTR_ERRCODE_ACCESS_PERM       (4 << 8)
154 #define IMSTR_ERRCODE_SECURE_ACCESS     (5 << 8)
155 #define IMSTR_ERRCODE_MASK              (7 << 8)
156 #define IMSTR_MHIT                      (1 << 4)
157 #define IMSTR_ABORT                     (1 << 2)
158 #define IMSTR_PF                        (1 << 1)
159 #define IMSTR_TF                        (1 << 0)
160
161 #define IMMAIR0                         0x0028
162 #define IMMAIR1                         0x002c
163 #define IMMAIR_ATTR_MASK                0xff
164 #define IMMAIR_ATTR_DEVICE              0x04
165 #define IMMAIR_ATTR_NC                  0x44
166 #define IMMAIR_ATTR_WBRWA               0xff
167 #define IMMAIR_ATTR_SHIFT(n)            ((n) << 3)
168 #define IMMAIR_ATTR_IDX_NC              0
169 #define IMMAIR_ATTR_IDX_WBRWA           1
170 #define IMMAIR_ATTR_IDX_DEV             2
171
172 #define IMEAR                           0x0030
173
174 #define IMPCTR                          0x0200
175 #define IMPSTR                          0x0208
176 #define IMPEAR                          0x020c
177 #define IMPMBA(n)                       (0x0280 + ((n) * 4))
178 #define IMPMBD(n)                       (0x02c0 + ((n) * 4))
179
180 #define IMUCTR(n)                       (0x0300 + ((n) * 16))
181 #define IMUCTR_FIXADDEN                 (1 << 31)
182 #define IMUCTR_FIXADD_MASK              (0xff << 16)
183 #define IMUCTR_FIXADD_SHIFT             16
184 #define IMUCTR_TTSEL_MMU(n)             ((n) << 4)
185 #define IMUCTR_TTSEL_PMB                (8 << 4)
186 #define IMUCTR_TTSEL_MASK               (15 << 4)
187 #define IMUCTR_FLUSH                    (1 << 1)
188 #define IMUCTR_MMUEN                    (1 << 0)
189
190 #define IMUASID(n)                      (0x0308 + ((n) * 16))
191 #define IMUASID_ASID8_MASK              (0xff << 8)
192 #define IMUASID_ASID8_SHIFT             8
193 #define IMUASID_ASID0_MASK              (0xff << 0)
194 #define IMUASID_ASID0_SHIFT             0
195
196 /* -----------------------------------------------------------------------------
197  * Read/Write Access
198  */
199
200 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
201 {
202         return ioread32(mmu->base + offset);
203 }
204
205 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
206                         u32 data)
207 {
208         iowrite32(data, mmu->base + offset);
209 }
210
211 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
212 {
213         return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
214 }
215
216 static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
217                             u32 data)
218 {
219         ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
220 }
221
222 /* -----------------------------------------------------------------------------
223  * TLB and microTLB Management
224  */
225
226 /* Wait for any pending TLB invalidations to complete */
227 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
228 {
229         unsigned int count = 0;
230
231         while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
232                 cpu_relax();
233                 if (++count == TLB_LOOP_TIMEOUT) {
234                         dev_err_ratelimited(domain->mmu->dev,
235                         "TLB sync timed out -- MMU may be deadlocked\n");
236                         return;
237                 }
238                 udelay(1);
239         }
240 }
241
242 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
243 {
244         u32 reg;
245
246         reg = ipmmu_ctx_read(domain, IMCTR);
247         reg |= IMCTR_FLUSH;
248         ipmmu_ctx_write(domain, IMCTR, reg);
249
250         ipmmu_tlb_sync(domain);
251 }
252
253 /*
254  * Enable MMU translation for the microTLB.
255  */
256 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
257                               unsigned int utlb)
258 {
259         struct ipmmu_vmsa_device *mmu = domain->mmu;
260
261         /*
262          * TODO: Reference-count the microTLB as several bus masters can be
263          * connected to the same microTLB.
264          */
265
266         /* TODO: What should we set the ASID to ? */
267         ipmmu_write(mmu, IMUASID(utlb), 0);
268         /* TODO: Do we need to flush the microTLB ? */
269         ipmmu_write(mmu, IMUCTR(utlb),
270                     IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
271                     IMUCTR_MMUEN);
272 }
273
274 /*
275  * Disable MMU translation for the microTLB.
276  */
277 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
278                                unsigned int utlb)
279 {
280         struct ipmmu_vmsa_device *mmu = domain->mmu;
281
282         ipmmu_write(mmu, IMUCTR(utlb), 0);
283 }
284
285 static void ipmmu_tlb_flush_all(void *cookie)
286 {
287         struct ipmmu_vmsa_domain *domain = cookie;
288
289         ipmmu_tlb_invalidate(domain);
290 }
291
292 static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
293                                 size_t granule, bool leaf, void *cookie)
294 {
295         /* The hardware doesn't support selective TLB flush. */
296 }
297
298 static const struct iommu_gather_ops ipmmu_gather_ops = {
299         .tlb_flush_all = ipmmu_tlb_flush_all,
300         .tlb_add_flush = ipmmu_tlb_add_flush,
301         .tlb_sync = ipmmu_tlb_flush_all,
302 };
303
304 /* -----------------------------------------------------------------------------
305  * Domain/Context Management
306  */
307
308 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
309                                          struct ipmmu_vmsa_domain *domain)
310 {
311         unsigned long flags;
312         int ret;
313
314         spin_lock_irqsave(&mmu->lock, flags);
315
316         ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX);
317         if (ret != IPMMU_CTX_MAX) {
318                 mmu->domains[ret] = domain;
319                 set_bit(ret, mmu->ctx);
320         }
321
322         spin_unlock_irqrestore(&mmu->lock, flags);
323
324         return ret;
325 }
326
327 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
328                                       unsigned int context_id)
329 {
330         unsigned long flags;
331
332         spin_lock_irqsave(&mmu->lock, flags);
333
334         clear_bit(context_id, mmu->ctx);
335         mmu->domains[context_id] = NULL;
336
337         spin_unlock_irqrestore(&mmu->lock, flags);
338 }
339
340 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
341 {
342         u64 ttbr;
343         int ret;
344
345         /*
346          * Allocate the page table operations.
347          *
348          * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
349          * access, Long-descriptor format" that the NStable bit being set in a
350          * table descriptor will result in the NStable and NS bits of all child
351          * entries being ignored and considered as being set. The IPMMU seems
352          * not to comply with this, as it generates a secure access page fault
353          * if any of the NStable and NS bits isn't set when running in
354          * non-secure mode.
355          */
356         domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
357         domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
358         domain->cfg.ias = 32;
359         domain->cfg.oas = 40;
360         domain->cfg.tlb = &ipmmu_gather_ops;
361         domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
362         domain->io_domain.geometry.force_aperture = true;
363         /*
364          * TODO: Add support for coherent walk through CCI with DVM and remove
365          * cache handling. For now, delegate it to the io-pgtable code.
366          */
367         domain->cfg.iommu_dev = domain->mmu->dev;
368
369         /*
370          * Find an unused context.
371          */
372         ret = ipmmu_domain_allocate_context(domain->mmu, domain);
373         if (ret == IPMMU_CTX_MAX)
374                 return -EBUSY;
375
376         domain->context_id = ret;
377
378         domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
379                                            domain);
380         if (!domain->iop) {
381                 ipmmu_domain_free_context(domain->mmu, domain->context_id);
382                 return -EINVAL;
383         }
384
385         /* TTBR0 */
386         ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
387         ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
388         ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
389
390         /*
391          * TTBCR
392          * We use long descriptors with inner-shareable WBWA tables and allocate
393          * the whole 32-bit VA space to TTBR0.
394          */
395         ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
396                         IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
397                         IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
398
399         /* MAIR0 */
400         ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
401
402         /* IMBUSCR */
403         ipmmu_ctx_write(domain, IMBUSCR,
404                         ipmmu_ctx_read(domain, IMBUSCR) &
405                         ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
406
407         /*
408          * IMSTR
409          * Clear all interrupt flags.
410          */
411         ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
412
413         /*
414          * IMCTR
415          * Enable the MMU and interrupt generation. The long-descriptor
416          * translation table format doesn't use TEX remapping. Don't enable AF
417          * software management as we have no use for it. Flush the TLB as
418          * required when modifying the context registers.
419          */
420         ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
421
422         return 0;
423 }
424
425 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
426 {
427         if (!domain->mmu)
428                 return;
429
430         /*
431          * Disable the context. Flush the TLB as required when modifying the
432          * context registers.
433          *
434          * TODO: Is TLB flush really needed ?
435          */
436         ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
437         ipmmu_tlb_sync(domain);
438         ipmmu_domain_free_context(domain->mmu, domain->context_id);
439 }
440
441 /* -----------------------------------------------------------------------------
442  * Fault Handling
443  */
444
445 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
446 {
447         const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
448         struct ipmmu_vmsa_device *mmu = domain->mmu;
449         u32 status;
450         u32 iova;
451
452         status = ipmmu_ctx_read(domain, IMSTR);
453         if (!(status & err_mask))
454                 return IRQ_NONE;
455
456         iova = ipmmu_ctx_read(domain, IMEAR);
457
458         /*
459          * Clear the error status flags. Unlike traditional interrupt flag
460          * registers that must be cleared by writing 1, this status register
461          * seems to require 0. The error address register must be read before,
462          * otherwise its value will be 0.
463          */
464         ipmmu_ctx_write(domain, IMSTR, 0);
465
466         /* Log fatal errors. */
467         if (status & IMSTR_MHIT)
468                 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
469                                     iova);
470         if (status & IMSTR_ABORT)
471                 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
472                                     iova);
473
474         if (!(status & (IMSTR_PF | IMSTR_TF)))
475                 return IRQ_NONE;
476
477         /*
478          * Try to handle page faults and translation faults.
479          *
480          * TODO: We need to look up the faulty device based on the I/O VA. Use
481          * the IOMMU device for now.
482          */
483         if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
484                 return IRQ_HANDLED;
485
486         dev_err_ratelimited(mmu->dev,
487                             "Unhandled fault: status 0x%08x iova 0x%08x\n",
488                             status, iova);
489
490         return IRQ_HANDLED;
491 }
492
493 static irqreturn_t ipmmu_irq(int irq, void *dev)
494 {
495         struct ipmmu_vmsa_device *mmu = dev;
496         irqreturn_t status = IRQ_NONE;
497         unsigned int i;
498         unsigned long flags;
499
500         spin_lock_irqsave(&mmu->lock, flags);
501
502         /*
503          * Check interrupts for all active contexts.
504          */
505         for (i = 0; i < IPMMU_CTX_MAX; i++) {
506                 if (!mmu->domains[i])
507                         continue;
508                 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
509                         status = IRQ_HANDLED;
510         }
511
512         spin_unlock_irqrestore(&mmu->lock, flags);
513
514         return status;
515 }
516
517 /* -----------------------------------------------------------------------------
518  * IOMMU Operations
519  */
520
521 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
522 {
523         struct ipmmu_vmsa_domain *domain;
524
525         domain = kzalloc(sizeof(*domain), GFP_KERNEL);
526         if (!domain)
527                 return NULL;
528
529         mutex_init(&domain->mutex);
530
531         return &domain->io_domain;
532 }
533
534 static void ipmmu_domain_free(struct iommu_domain *io_domain)
535 {
536         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
537
538         /*
539          * Free the domain resources. We assume that all devices have already
540          * been detached.
541          */
542         ipmmu_domain_destroy_context(domain);
543         free_io_pgtable_ops(domain->iop);
544         kfree(domain);
545 }
546
547 static int ipmmu_attach_device(struct iommu_domain *io_domain,
548                                struct device *dev)
549 {
550         struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
551         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
552         struct ipmmu_vmsa_device *mmu = priv->mmu;
553         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
554         unsigned int i;
555         int ret = 0;
556
557         if (!priv || !priv->mmu) {
558                 dev_err(dev, "Cannot attach to IPMMU\n");
559                 return -ENXIO;
560         }
561
562         mutex_lock(&domain->mutex);
563
564         if (!domain->mmu) {
565                 /* The domain hasn't been used yet, initialize it. */
566                 domain->mmu = mmu;
567                 ret = ipmmu_domain_init_context(domain);
568         } else if (domain->mmu != mmu) {
569                 /*
570                  * Something is wrong, we can't attach two devices using
571                  * different IOMMUs to the same domain.
572                  */
573                 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
574                         dev_name(mmu->dev), dev_name(domain->mmu->dev));
575                 ret = -EINVAL;
576         } else
577                 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
578
579         mutex_unlock(&domain->mutex);
580
581         if (ret < 0)
582                 return ret;
583
584         for (i = 0; i < fwspec->num_ids; ++i)
585                 ipmmu_utlb_enable(domain, fwspec->ids[i]);
586
587         return 0;
588 }
589
590 static void ipmmu_detach_device(struct iommu_domain *io_domain,
591                                 struct device *dev)
592 {
593         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
594         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
595         unsigned int i;
596
597         for (i = 0; i < fwspec->num_ids; ++i)
598                 ipmmu_utlb_disable(domain, fwspec->ids[i]);
599
600         /*
601          * TODO: Optimize by disabling the context when no device is attached.
602          */
603 }
604
605 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
606                      phys_addr_t paddr, size_t size, int prot)
607 {
608         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
609
610         if (!domain)
611                 return -ENODEV;
612
613         return domain->iop->map(domain->iop, iova, paddr, size, prot);
614 }
615
616 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
617                           size_t size)
618 {
619         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
620
621         return domain->iop->unmap(domain->iop, iova, size);
622 }
623
624 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
625                                       dma_addr_t iova)
626 {
627         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
628
629         /* TODO: Is locking needed ? */
630
631         return domain->iop->iova_to_phys(domain->iop, iova);
632 }
633
634 static int ipmmu_init_platform_device(struct device *dev,
635                                       struct of_phandle_args *args)
636 {
637         struct platform_device *ipmmu_pdev;
638         struct ipmmu_vmsa_iommu_priv *priv;
639
640         ipmmu_pdev = of_find_device_by_node(args->np);
641         if (!ipmmu_pdev)
642                 return -ENODEV;
643
644         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
645         if (!priv)
646                 return -ENOMEM;
647
648         priv->mmu = platform_get_drvdata(ipmmu_pdev);
649         priv->dev = dev;
650         dev->iommu_fwspec->iommu_priv = priv;
651         return 0;
652 }
653
654 static int ipmmu_of_xlate(struct device *dev,
655                           struct of_phandle_args *spec)
656 {
657         iommu_fwspec_add_ids(dev, spec->args, 1);
658
659         /* Initialize once - xlate() will call multiple times */
660         if (to_priv(dev))
661                 return 0;
662
663         return ipmmu_init_platform_device(dev, spec);
664 }
665
666 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
667
668 static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
669 {
670         if (type != IOMMU_DOMAIN_UNMANAGED)
671                 return NULL;
672
673         return __ipmmu_domain_alloc(type);
674 }
675
676 static int ipmmu_add_device(struct device *dev)
677 {
678         struct ipmmu_vmsa_device *mmu = NULL;
679         struct iommu_group *group;
680         int ret;
681
682         /*
683          * Only let through devices that have been verified in xlate()
684          */
685         if (!to_priv(dev))
686                 return -ENODEV;
687
688         /* Create a device group and add the device to it. */
689         group = iommu_group_alloc();
690         if (IS_ERR(group)) {
691                 dev_err(dev, "Failed to allocate IOMMU group\n");
692                 ret = PTR_ERR(group);
693                 goto error;
694         }
695
696         ret = iommu_group_add_device(group, dev);
697         iommu_group_put(group);
698
699         if (ret < 0) {
700                 dev_err(dev, "Failed to add device to IPMMU group\n");
701                 group = NULL;
702                 goto error;
703         }
704
705         /*
706          * Create the ARM mapping, used by the ARM DMA mapping core to allocate
707          * VAs. This will allocate a corresponding IOMMU domain.
708          *
709          * TODO:
710          * - Create one mapping per context (TLB).
711          * - Make the mapping size configurable ? We currently use a 2GB mapping
712          *   at a 1GB offset to ensure that NULL VAs will fault.
713          */
714         mmu = to_priv(dev)->mmu;
715         if (!mmu->mapping) {
716                 struct dma_iommu_mapping *mapping;
717
718                 mapping = arm_iommu_create_mapping(&platform_bus_type,
719                                                    SZ_1G, SZ_2G);
720                 if (IS_ERR(mapping)) {
721                         dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
722                         ret = PTR_ERR(mapping);
723                         goto error;
724                 }
725
726                 mmu->mapping = mapping;
727         }
728
729         /* Attach the ARM VA mapping to the device. */
730         ret = arm_iommu_attach_device(dev, mmu->mapping);
731         if (ret < 0) {
732                 dev_err(dev, "Failed to attach device to VA mapping\n");
733                 goto error;
734         }
735
736         return 0;
737
738 error:
739         if (mmu)
740                 arm_iommu_release_mapping(mmu->mapping);
741
742         if (!IS_ERR_OR_NULL(group))
743                 iommu_group_remove_device(dev);
744
745         return ret;
746 }
747
748 static void ipmmu_remove_device(struct device *dev)
749 {
750         arm_iommu_detach_device(dev);
751         iommu_group_remove_device(dev);
752 }
753
754 static const struct iommu_ops ipmmu_ops = {
755         .domain_alloc = ipmmu_domain_alloc,
756         .domain_free = ipmmu_domain_free,
757         .attach_dev = ipmmu_attach_device,
758         .detach_dev = ipmmu_detach_device,
759         .map = ipmmu_map,
760         .unmap = ipmmu_unmap,
761         .map_sg = default_iommu_map_sg,
762         .iova_to_phys = ipmmu_iova_to_phys,
763         .add_device = ipmmu_add_device,
764         .remove_device = ipmmu_remove_device,
765         .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
766         .of_xlate = ipmmu_of_xlate,
767 };
768
769 #endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */
770
771 #ifdef CONFIG_IOMMU_DMA
772
773 static DEFINE_SPINLOCK(ipmmu_slave_devices_lock);
774 static LIST_HEAD(ipmmu_slave_devices);
775
776 static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type)
777 {
778         struct iommu_domain *io_domain = NULL;
779
780         switch (type) {
781         case IOMMU_DOMAIN_UNMANAGED:
782                 io_domain = __ipmmu_domain_alloc(type);
783                 break;
784
785         case IOMMU_DOMAIN_DMA:
786                 io_domain = __ipmmu_domain_alloc(type);
787                 if (io_domain)
788                         iommu_get_dma_cookie(io_domain);
789                 break;
790         }
791
792         return io_domain;
793 }
794
795 static void ipmmu_domain_free_dma(struct iommu_domain *io_domain)
796 {
797         switch (io_domain->type) {
798         case IOMMU_DOMAIN_DMA:
799                 iommu_put_dma_cookie(io_domain);
800                 /* fall-through */
801         default:
802                 ipmmu_domain_free(io_domain);
803                 break;
804         }
805 }
806
807 static int ipmmu_add_device_dma(struct device *dev)
808 {
809         struct iommu_group *group;
810
811         /*
812          * Only let through devices that have been verified in xlate()
813          */
814         if (!to_priv(dev))
815                 return -ENODEV;
816
817         group = iommu_group_get_for_dev(dev);
818         if (IS_ERR(group))
819                 return PTR_ERR(group);
820
821         spin_lock(&ipmmu_slave_devices_lock);
822         list_add(&to_priv(dev)->list, &ipmmu_slave_devices);
823         spin_unlock(&ipmmu_slave_devices_lock);
824         return 0;
825 }
826
827 static void ipmmu_remove_device_dma(struct device *dev)
828 {
829         struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
830
831         spin_lock(&ipmmu_slave_devices_lock);
832         list_del(&priv->list);
833         spin_unlock(&ipmmu_slave_devices_lock);
834
835         iommu_group_remove_device(dev);
836 }
837
838 static struct device *ipmmu_find_sibling_device(struct device *dev)
839 {
840         struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
841         struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL;
842         bool found = false;
843
844         spin_lock(&ipmmu_slave_devices_lock);
845
846         list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) {
847                 if (priv == sibling_priv)
848                         continue;
849                 if (sibling_priv->mmu == priv->mmu) {
850                         found = true;
851                         break;
852                 }
853         }
854
855         spin_unlock(&ipmmu_slave_devices_lock);
856
857         return found ? sibling_priv->dev : NULL;
858 }
859
860 static struct iommu_group *ipmmu_find_group_dma(struct device *dev)
861 {
862         struct iommu_group *group;
863         struct device *sibling;
864
865         sibling = ipmmu_find_sibling_device(dev);
866         if (sibling)
867                 group = iommu_group_get(sibling);
868         if (!sibling || IS_ERR(group))
869                 group = generic_device_group(dev);
870
871         return group;
872 }
873
874 static const struct iommu_ops ipmmu_ops = {
875         .domain_alloc = ipmmu_domain_alloc_dma,
876         .domain_free = ipmmu_domain_free_dma,
877         .attach_dev = ipmmu_attach_device,
878         .detach_dev = ipmmu_detach_device,
879         .map = ipmmu_map,
880         .unmap = ipmmu_unmap,
881         .map_sg = default_iommu_map_sg,
882         .iova_to_phys = ipmmu_iova_to_phys,
883         .add_device = ipmmu_add_device_dma,
884         .remove_device = ipmmu_remove_device_dma,
885         .device_group = ipmmu_find_group_dma,
886         .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
887         .of_xlate = ipmmu_of_xlate,
888 };
889
890 #endif /* CONFIG_IOMMU_DMA */
891
892 /* -----------------------------------------------------------------------------
893  * Probe/remove and init
894  */
895
896 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
897 {
898         unsigned int i;
899
900         /* Disable all contexts. */
901         for (i = 0; i < 4; ++i)
902                 ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
903 }
904
905 static int ipmmu_probe(struct platform_device *pdev)
906 {
907         struct ipmmu_vmsa_device *mmu;
908         struct resource *res;
909         int irq;
910         int ret;
911
912         mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
913         if (!mmu) {
914                 dev_err(&pdev->dev, "cannot allocate device data\n");
915                 return -ENOMEM;
916         }
917
918         mmu->dev = &pdev->dev;
919         mmu->num_utlbs = 32;
920         spin_lock_init(&mmu->lock);
921         bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
922
923         /* Map I/O memory and request IRQ. */
924         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
925         mmu->base = devm_ioremap_resource(&pdev->dev, res);
926         if (IS_ERR(mmu->base))
927                 return PTR_ERR(mmu->base);
928
929         /*
930          * The IPMMU has two register banks, for secure and non-secure modes.
931          * The bank mapped at the beginning of the IPMMU address space
932          * corresponds to the running mode of the CPU. When running in secure
933          * mode the non-secure register bank is also available at an offset.
934          *
935          * Secure mode operation isn't clearly documented and is thus currently
936          * not implemented in the driver. Furthermore, preliminary tests of
937          * non-secure operation with the main register bank were not successful.
938          * Offset the registers base unconditionally to point to the non-secure
939          * alias space for now.
940          */
941         mmu->base += IM_NS_ALIAS_OFFSET;
942
943         irq = platform_get_irq(pdev, 0);
944         if (irq < 0) {
945                 dev_err(&pdev->dev, "no IRQ found\n");
946                 return irq;
947         }
948
949         ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
950                                dev_name(&pdev->dev), mmu);
951         if (ret < 0) {
952                 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
953                 return ret;
954         }
955
956         ipmmu_device_reset(mmu);
957
958         ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
959                                      dev_name(&pdev->dev));
960         if (ret)
961                 return ret;
962
963         iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
964         iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode);
965
966         ret = iommu_device_register(&mmu->iommu);
967         if (ret)
968                 return ret;
969
970         /*
971          * We can't create the ARM mapping here as it requires the bus to have
972          * an IOMMU, which only happens when bus_set_iommu() is called in
973          * ipmmu_init() after the probe function returns.
974          */
975
976         platform_set_drvdata(pdev, mmu);
977
978         return 0;
979 }
980
981 static int ipmmu_remove(struct platform_device *pdev)
982 {
983         struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
984
985         iommu_device_sysfs_remove(&mmu->iommu);
986         iommu_device_unregister(&mmu->iommu);
987
988 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
989         arm_iommu_release_mapping(mmu->mapping);
990 #endif
991
992         ipmmu_device_reset(mmu);
993
994         return 0;
995 }
996
997 static const struct of_device_id ipmmu_of_ids[] = {
998         { .compatible = "renesas,ipmmu-vmsa", },
999         { }
1000 };
1001
1002 static struct platform_driver ipmmu_driver = {
1003         .driver = {
1004                 .name = "ipmmu-vmsa",
1005                 .of_match_table = of_match_ptr(ipmmu_of_ids),
1006         },
1007         .probe = ipmmu_probe,
1008         .remove = ipmmu_remove,
1009 };
1010
1011 static int __init ipmmu_init(void)
1012 {
1013         int ret;
1014
1015         ret = platform_driver_register(&ipmmu_driver);
1016         if (ret < 0)
1017                 return ret;
1018
1019         if (!iommu_present(&platform_bus_type))
1020                 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1021
1022         return 0;
1023 }
1024
1025 static void __exit ipmmu_exit(void)
1026 {
1027         return platform_driver_unregister(&ipmmu_driver);
1028 }
1029
1030 subsys_initcall(ipmmu_init);
1031 module_exit(ipmmu_exit);
1032
1033 MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
1034 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1035 MODULE_LICENSE("GPL v2");