2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/delay.h>
24 #include <linux/dma-iommu.h>
25 #include <linux/err.h>
26 #include <linux/interrupt.h>
27 #include <linux/iommu.h>
28 #include <linux/iopoll.h>
29 #include <linux/module.h>
30 #include <linux/msi.h>
32 #include <linux/of_address.h>
33 #include <linux/of_iommu.h>
34 #include <linux/of_platform.h>
35 #include <linux/pci.h>
36 #include <linux/platform_device.h>
38 #include <linux/amba/bus.h>
40 #include "io-pgtable.h"
43 #define ARM_SMMU_IDR0 0x0
44 #define IDR0_ST_LVL_SHIFT 27
45 #define IDR0_ST_LVL_MASK 0x3
46 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
47 #define IDR0_STALL_MODEL_SHIFT 24
48 #define IDR0_STALL_MODEL_MASK 0x3
49 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
50 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
51 #define IDR0_TTENDIAN_SHIFT 21
52 #define IDR0_TTENDIAN_MASK 0x3
53 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
54 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
55 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
56 #define IDR0_CD2L (1 << 19)
57 #define IDR0_VMID16 (1 << 18)
58 #define IDR0_PRI (1 << 16)
59 #define IDR0_SEV (1 << 14)
60 #define IDR0_MSI (1 << 13)
61 #define IDR0_ASID16 (1 << 12)
62 #define IDR0_ATS (1 << 10)
63 #define IDR0_HYP (1 << 9)
64 #define IDR0_COHACC (1 << 4)
65 #define IDR0_TTF_SHIFT 2
66 #define IDR0_TTF_MASK 0x3
67 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
68 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
69 #define IDR0_S1P (1 << 1)
70 #define IDR0_S2P (1 << 0)
72 #define ARM_SMMU_IDR1 0x4
73 #define IDR1_TABLES_PRESET (1 << 30)
74 #define IDR1_QUEUES_PRESET (1 << 29)
75 #define IDR1_REL (1 << 28)
76 #define IDR1_CMDQ_SHIFT 21
77 #define IDR1_CMDQ_MASK 0x1f
78 #define IDR1_EVTQ_SHIFT 16
79 #define IDR1_EVTQ_MASK 0x1f
80 #define IDR1_PRIQ_SHIFT 11
81 #define IDR1_PRIQ_MASK 0x1f
82 #define IDR1_SSID_SHIFT 6
83 #define IDR1_SSID_MASK 0x1f
84 #define IDR1_SID_SHIFT 0
85 #define IDR1_SID_MASK 0x3f
87 #define ARM_SMMU_IDR5 0x14
88 #define IDR5_STALL_MAX_SHIFT 16
89 #define IDR5_STALL_MAX_MASK 0xffff
90 #define IDR5_GRAN64K (1 << 6)
91 #define IDR5_GRAN16K (1 << 5)
92 #define IDR5_GRAN4K (1 << 4)
93 #define IDR5_OAS_SHIFT 0
94 #define IDR5_OAS_MASK 0x7
95 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
96 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
97 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
98 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
99 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
100 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
102 #define ARM_SMMU_CR0 0x20
103 #define CR0_CMDQEN (1 << 3)
104 #define CR0_EVTQEN (1 << 2)
105 #define CR0_PRIQEN (1 << 1)
106 #define CR0_SMMUEN (1 << 0)
108 #define ARM_SMMU_CR0ACK 0x24
110 #define ARM_SMMU_CR1 0x28
114 #define CR1_CACHE_NC 0
115 #define CR1_CACHE_WB 1
116 #define CR1_CACHE_WT 2
117 #define CR1_TABLE_SH_SHIFT 10
118 #define CR1_TABLE_OC_SHIFT 8
119 #define CR1_TABLE_IC_SHIFT 6
120 #define CR1_QUEUE_SH_SHIFT 4
121 #define CR1_QUEUE_OC_SHIFT 2
122 #define CR1_QUEUE_IC_SHIFT 0
124 #define ARM_SMMU_CR2 0x2c
125 #define CR2_PTM (1 << 2)
126 #define CR2_RECINVSID (1 << 1)
127 #define CR2_E2H (1 << 0)
129 #define ARM_SMMU_GBPA 0x44
130 #define GBPA_ABORT (1 << 20)
131 #define GBPA_UPDATE (1 << 31)
133 #define ARM_SMMU_IRQ_CTRL 0x50
134 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
135 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
136 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
138 #define ARM_SMMU_IRQ_CTRLACK 0x54
140 #define ARM_SMMU_GERROR 0x60
141 #define GERROR_SFM_ERR (1 << 8)
142 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
143 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
144 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
145 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
146 #define GERROR_PRIQ_ABT_ERR (1 << 3)
147 #define GERROR_EVTQ_ABT_ERR (1 << 2)
148 #define GERROR_CMDQ_ERR (1 << 0)
149 #define GERROR_ERR_MASK 0xfd
151 #define ARM_SMMU_GERRORN 0x64
153 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
154 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
155 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
157 #define ARM_SMMU_STRTAB_BASE 0x80
158 #define STRTAB_BASE_RA (1UL << 62)
159 #define STRTAB_BASE_ADDR_SHIFT 6
160 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
162 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
163 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
164 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
165 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
166 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
167 #define STRTAB_BASE_CFG_FMT_SHIFT 16
168 #define STRTAB_BASE_CFG_FMT_MASK 0x3
169 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
170 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
172 #define ARM_SMMU_CMDQ_BASE 0x90
173 #define ARM_SMMU_CMDQ_PROD 0x98
174 #define ARM_SMMU_CMDQ_CONS 0x9c
176 #define ARM_SMMU_EVTQ_BASE 0xa0
177 #define ARM_SMMU_EVTQ_PROD 0x100a8
178 #define ARM_SMMU_EVTQ_CONS 0x100ac
179 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
180 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
181 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
183 #define ARM_SMMU_PRIQ_BASE 0xc0
184 #define ARM_SMMU_PRIQ_PROD 0x100c8
185 #define ARM_SMMU_PRIQ_CONS 0x100cc
186 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
187 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
188 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
190 /* Common MSI config fields */
191 #define MSI_CFG0_ADDR_SHIFT 2
192 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
193 #define MSI_CFG2_SH_SHIFT 4
194 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
195 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
196 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
197 #define MSI_CFG2_MEMATTR_SHIFT 0
198 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
200 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
201 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
202 #define Q_OVERFLOW_FLAG (1 << 31)
203 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
204 #define Q_ENT(q, p) ((q)->base + \
205 Q_IDX(q, p) * (q)->ent_dwords)
207 #define Q_BASE_RWA (1UL << 62)
208 #define Q_BASE_ADDR_SHIFT 5
209 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
210 #define Q_BASE_LOG2SIZE_SHIFT 0
211 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
216 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
217 * 2lvl: 128k L1 entries,
218 * 256 lazy entries per table (each table covers a PCI bus)
220 #define STRTAB_L1_SZ_SHIFT 20
221 #define STRTAB_SPLIT 8
223 #define STRTAB_L1_DESC_DWORDS 1
224 #define STRTAB_L1_DESC_SPAN_SHIFT 0
225 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
226 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
227 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
229 #define STRTAB_STE_DWORDS 8
230 #define STRTAB_STE_0_V (1UL << 0)
231 #define STRTAB_STE_0_CFG_SHIFT 1
232 #define STRTAB_STE_0_CFG_MASK 0x7UL
233 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
234 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
235 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
236 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
238 #define STRTAB_STE_0_S1FMT_SHIFT 4
239 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
240 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
241 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
242 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
243 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
245 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
246 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
247 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
248 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
249 #define STRTAB_STE_1_S1C_SH_NSH 0UL
250 #define STRTAB_STE_1_S1C_SH_OSH 2UL
251 #define STRTAB_STE_1_S1C_SH_ISH 3UL
252 #define STRTAB_STE_1_S1CIR_SHIFT 2
253 #define STRTAB_STE_1_S1COR_SHIFT 4
254 #define STRTAB_STE_1_S1CSH_SHIFT 6
256 #define STRTAB_STE_1_S1STALLD (1UL << 27)
258 #define STRTAB_STE_1_EATS_ABT 0UL
259 #define STRTAB_STE_1_EATS_TRANS 1UL
260 #define STRTAB_STE_1_EATS_S1CHK 2UL
261 #define STRTAB_STE_1_EATS_SHIFT 28
263 #define STRTAB_STE_1_STRW_NSEL1 0UL
264 #define STRTAB_STE_1_STRW_EL2 2UL
265 #define STRTAB_STE_1_STRW_SHIFT 30
267 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
268 #define STRTAB_STE_1_SHCFG_SHIFT 44
270 #define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
271 #define STRTAB_STE_1_PRIVCFG_SHIFT 48
273 #define STRTAB_STE_2_S2VMID_SHIFT 0
274 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
275 #define STRTAB_STE_2_VTCR_SHIFT 32
276 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
277 #define STRTAB_STE_2_S2AA64 (1UL << 51)
278 #define STRTAB_STE_2_S2ENDI (1UL << 52)
279 #define STRTAB_STE_2_S2PTW (1UL << 54)
280 #define STRTAB_STE_2_S2R (1UL << 58)
282 #define STRTAB_STE_3_S2TTB_SHIFT 4
283 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
285 /* Context descriptor (stage-1 only) */
286 #define CTXDESC_CD_DWORDS 8
287 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
288 #define ARM64_TCR_T0SZ_SHIFT 0
289 #define ARM64_TCR_T0SZ_MASK 0x1fUL
290 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
291 #define ARM64_TCR_TG0_SHIFT 14
292 #define ARM64_TCR_TG0_MASK 0x3UL
293 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
294 #define ARM64_TCR_IRGN0_SHIFT 8
295 #define ARM64_TCR_IRGN0_MASK 0x3UL
296 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
297 #define ARM64_TCR_ORGN0_SHIFT 10
298 #define ARM64_TCR_ORGN0_MASK 0x3UL
299 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
300 #define ARM64_TCR_SH0_SHIFT 12
301 #define ARM64_TCR_SH0_MASK 0x3UL
302 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
303 #define ARM64_TCR_EPD0_SHIFT 7
304 #define ARM64_TCR_EPD0_MASK 0x1UL
305 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
306 #define ARM64_TCR_EPD1_SHIFT 23
307 #define ARM64_TCR_EPD1_MASK 0x1UL
309 #define CTXDESC_CD_0_ENDI (1UL << 15)
310 #define CTXDESC_CD_0_V (1UL << 31)
312 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
313 #define ARM64_TCR_IPS_SHIFT 32
314 #define ARM64_TCR_IPS_MASK 0x7UL
315 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
316 #define ARM64_TCR_TBI0_SHIFT 37
317 #define ARM64_TCR_TBI0_MASK 0x1UL
319 #define CTXDESC_CD_0_AA64 (1UL << 41)
320 #define CTXDESC_CD_0_R (1UL << 45)
321 #define CTXDESC_CD_0_A (1UL << 46)
322 #define CTXDESC_CD_0_ASET_SHIFT 47
323 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
324 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
325 #define CTXDESC_CD_0_ASID_SHIFT 48
326 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
328 #define CTXDESC_CD_1_TTB0_SHIFT 4
329 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
331 #define CTXDESC_CD_3_MAIR_SHIFT 0
333 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
334 #define ARM_SMMU_TCR2CD(tcr, fld) \
335 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
336 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
339 #define CMDQ_ENT_DWORDS 2
340 #define CMDQ_MAX_SZ_SHIFT 8
342 #define CMDQ_ERR_SHIFT 24
343 #define CMDQ_ERR_MASK 0x7f
344 #define CMDQ_ERR_CERROR_NONE_IDX 0
345 #define CMDQ_ERR_CERROR_ILL_IDX 1
346 #define CMDQ_ERR_CERROR_ABT_IDX 2
348 #define CMDQ_0_OP_SHIFT 0
349 #define CMDQ_0_OP_MASK 0xffUL
350 #define CMDQ_0_SSV (1UL << 11)
352 #define CMDQ_PREFETCH_0_SID_SHIFT 32
353 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
354 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
356 #define CMDQ_CFGI_0_SID_SHIFT 32
357 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
358 #define CMDQ_CFGI_1_LEAF (1UL << 0)
359 #define CMDQ_CFGI_1_RANGE_SHIFT 0
360 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
362 #define CMDQ_TLBI_0_VMID_SHIFT 32
363 #define CMDQ_TLBI_0_ASID_SHIFT 48
364 #define CMDQ_TLBI_1_LEAF (1UL << 0)
365 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
366 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
368 #define CMDQ_PRI_0_SSID_SHIFT 12
369 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
370 #define CMDQ_PRI_0_SID_SHIFT 32
371 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
372 #define CMDQ_PRI_1_GRPID_SHIFT 0
373 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
374 #define CMDQ_PRI_1_RESP_SHIFT 12
375 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
376 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
377 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
379 #define CMDQ_SYNC_0_CS_SHIFT 12
380 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
381 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
384 #define EVTQ_ENT_DWORDS 4
385 #define EVTQ_MAX_SZ_SHIFT 7
387 #define EVTQ_0_ID_SHIFT 0
388 #define EVTQ_0_ID_MASK 0xffUL
391 #define PRIQ_ENT_DWORDS 2
392 #define PRIQ_MAX_SZ_SHIFT 8
394 #define PRIQ_0_SID_SHIFT 0
395 #define PRIQ_0_SID_MASK 0xffffffffUL
396 #define PRIQ_0_SSID_SHIFT 32
397 #define PRIQ_0_SSID_MASK 0xfffffUL
398 #define PRIQ_0_PERM_PRIV (1UL << 58)
399 #define PRIQ_0_PERM_EXEC (1UL << 59)
400 #define PRIQ_0_PERM_READ (1UL << 60)
401 #define PRIQ_0_PERM_WRITE (1UL << 61)
402 #define PRIQ_0_PRG_LAST (1UL << 62)
403 #define PRIQ_0_SSID_V (1UL << 63)
405 #define PRIQ_1_PRG_IDX_SHIFT 0
406 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
407 #define PRIQ_1_ADDR_SHIFT 12
408 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
410 /* High-level queue structures */
411 #define ARM_SMMU_POLL_TIMEOUT_US 100
413 static bool disable_bypass;
414 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
415 MODULE_PARM_DESC(disable_bypass,
416 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
424 enum arm_smmu_msi_index {
431 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
433 ARM_SMMU_EVTQ_IRQ_CFG0,
434 ARM_SMMU_EVTQ_IRQ_CFG1,
435 ARM_SMMU_EVTQ_IRQ_CFG2,
437 [GERROR_MSI_INDEX] = {
438 ARM_SMMU_GERROR_IRQ_CFG0,
439 ARM_SMMU_GERROR_IRQ_CFG1,
440 ARM_SMMU_GERROR_IRQ_CFG2,
443 ARM_SMMU_PRIQ_IRQ_CFG0,
444 ARM_SMMU_PRIQ_IRQ_CFG1,
445 ARM_SMMU_PRIQ_IRQ_CFG2,
449 struct arm_smmu_cmdq_ent {
452 bool substream_valid;
454 /* Command-specific fields */
456 #define CMDQ_OP_PREFETCH_CFG 0x1
463 #define CMDQ_OP_CFGI_STE 0x3
464 #define CMDQ_OP_CFGI_ALL 0x4
473 #define CMDQ_OP_TLBI_NH_ASID 0x11
474 #define CMDQ_OP_TLBI_NH_VA 0x12
475 #define CMDQ_OP_TLBI_EL2_ALL 0x20
476 #define CMDQ_OP_TLBI_S12_VMALL 0x28
477 #define CMDQ_OP_TLBI_S2_IPA 0x2a
478 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
486 #define CMDQ_OP_PRI_RESP 0x41
494 #define CMDQ_OP_CMD_SYNC 0x46
498 struct arm_smmu_queue {
499 int irq; /* Wired interrupt */
510 u32 __iomem *prod_reg;
511 u32 __iomem *cons_reg;
514 struct arm_smmu_cmdq {
515 struct arm_smmu_queue q;
519 struct arm_smmu_evtq {
520 struct arm_smmu_queue q;
524 struct arm_smmu_priq {
525 struct arm_smmu_queue q;
528 /* High-level stream table and context descriptor structures */
529 struct arm_smmu_strtab_l1_desc {
533 dma_addr_t l2ptr_dma;
536 struct arm_smmu_s1_cfg {
538 dma_addr_t cdptr_dma;
540 struct arm_smmu_ctx_desc {
548 struct arm_smmu_s2_cfg {
554 struct arm_smmu_strtab_ent {
557 bool bypass; /* Overrides s1/s2 config */
558 struct arm_smmu_s1_cfg *s1_cfg;
559 struct arm_smmu_s2_cfg *s2_cfg;
562 struct arm_smmu_strtab_cfg {
564 dma_addr_t strtab_dma;
565 struct arm_smmu_strtab_l1_desc *l1_desc;
566 unsigned int num_l1_ents;
572 /* An SMMUv3 instance */
573 struct arm_smmu_device {
577 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
578 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
579 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
580 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
581 #define ARM_SMMU_FEAT_PRI (1 << 4)
582 #define ARM_SMMU_FEAT_ATS (1 << 5)
583 #define ARM_SMMU_FEAT_SEV (1 << 6)
584 #define ARM_SMMU_FEAT_MSI (1 << 7)
585 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
586 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
587 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
588 #define ARM_SMMU_FEAT_STALLS (1 << 11)
589 #define ARM_SMMU_FEAT_HYP (1 << 12)
592 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
595 struct arm_smmu_cmdq cmdq;
596 struct arm_smmu_evtq evtq;
597 struct arm_smmu_priq priq;
601 unsigned long ias; /* IPA */
602 unsigned long oas; /* PA */
603 unsigned long pgsize_bitmap;
605 #define ARM_SMMU_MAX_ASIDS (1 << 16)
606 unsigned int asid_bits;
607 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
609 #define ARM_SMMU_MAX_VMIDS (1 << 16)
610 unsigned int vmid_bits;
611 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
613 unsigned int ssid_bits;
614 unsigned int sid_bits;
616 struct arm_smmu_strtab_cfg strtab_cfg;
619 /* SMMU private data for each master */
620 struct arm_smmu_master_data {
621 struct arm_smmu_device *smmu;
622 struct arm_smmu_strtab_ent ste;
625 /* SMMU private data for an IOMMU domain */
626 enum arm_smmu_domain_stage {
627 ARM_SMMU_DOMAIN_S1 = 0,
629 ARM_SMMU_DOMAIN_NESTED,
632 struct arm_smmu_domain {
633 struct arm_smmu_device *smmu;
634 struct mutex init_mutex; /* Protects smmu pointer */
636 struct io_pgtable_ops *pgtbl_ops;
637 spinlock_t pgtbl_lock;
639 enum arm_smmu_domain_stage stage;
641 struct arm_smmu_s1_cfg s1_cfg;
642 struct arm_smmu_s2_cfg s2_cfg;
645 struct iommu_domain domain;
648 struct arm_smmu_option_prop {
653 static struct arm_smmu_option_prop arm_smmu_options[] = {
654 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
658 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
660 return container_of(dom, struct arm_smmu_domain, domain);
663 static void parse_driver_options(struct arm_smmu_device *smmu)
668 if (of_property_read_bool(smmu->dev->of_node,
669 arm_smmu_options[i].prop)) {
670 smmu->options |= arm_smmu_options[i].opt;
671 dev_notice(smmu->dev, "option %s\n",
672 arm_smmu_options[i].prop);
674 } while (arm_smmu_options[++i].opt);
677 /* Low-level queue manipulation functions */
678 static bool queue_full(struct arm_smmu_queue *q)
680 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
681 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
684 static bool queue_empty(struct arm_smmu_queue *q)
686 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
687 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
690 static void queue_sync_cons(struct arm_smmu_queue *q)
692 q->cons = readl_relaxed(q->cons_reg);
695 static void queue_inc_cons(struct arm_smmu_queue *q)
697 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
699 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
702 * Ensure that all CPU accesses (reads and writes) to the queue
703 * are complete before we update the cons pointer.
706 writel_relaxed(q->cons, q->cons_reg);
709 static int queue_sync_prod(struct arm_smmu_queue *q)
712 u32 prod = readl_relaxed(q->prod_reg);
714 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
721 static void queue_inc_prod(struct arm_smmu_queue *q)
723 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
725 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
726 writel(q->prod, q->prod_reg);
730 * Wait for the SMMU to consume items. If drain is true, wait until the queue
731 * is empty. Otherwise, wait until there is at least one free slot.
733 static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
735 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
737 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
738 if (ktime_compare(ktime_get(), timeout) > 0)
752 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
756 for (i = 0; i < n_dwords; ++i)
757 *dst++ = cpu_to_le64(*src++);
760 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
765 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
770 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
774 for (i = 0; i < n_dwords; ++i)
775 *dst++ = le64_to_cpu(*src++);
778 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
783 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
788 /* High-level queue accessors */
789 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
791 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
792 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
794 switch (ent->opcode) {
795 case CMDQ_OP_TLBI_EL2_ALL:
796 case CMDQ_OP_TLBI_NSNH_ALL:
798 case CMDQ_OP_PREFETCH_CFG:
799 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
800 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
801 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
803 case CMDQ_OP_CFGI_STE:
804 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
805 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
807 case CMDQ_OP_CFGI_ALL:
808 /* Cover the entire SID range */
809 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
811 case CMDQ_OP_TLBI_NH_VA:
812 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
813 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
814 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
816 case CMDQ_OP_TLBI_S2_IPA:
817 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
818 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
819 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
821 case CMDQ_OP_TLBI_NH_ASID:
822 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
824 case CMDQ_OP_TLBI_S12_VMALL:
825 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
827 case CMDQ_OP_PRI_RESP:
828 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
829 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
830 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
831 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
832 switch (ent->pri.resp) {
834 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
837 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
840 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
846 case CMDQ_OP_CMD_SYNC:
847 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
856 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
858 static const char *cerror_str[] = {
859 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
860 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
861 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
865 u64 cmd[CMDQ_ENT_DWORDS];
866 struct arm_smmu_queue *q = &smmu->cmdq.q;
867 u32 cons = readl_relaxed(q->cons_reg);
868 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
869 struct arm_smmu_cmdq_ent cmd_sync = {
870 .opcode = CMDQ_OP_CMD_SYNC,
873 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
874 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
877 case CMDQ_ERR_CERROR_ABT_IDX:
878 dev_err(smmu->dev, "retrying command fetch\n");
879 case CMDQ_ERR_CERROR_NONE_IDX:
881 case CMDQ_ERR_CERROR_ILL_IDX:
888 * We may have concurrent producers, so we need to be careful
889 * not to touch any of the shadow cmdq state.
891 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
892 dev_err(smmu->dev, "skipping command in error state:\n");
893 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
894 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
896 /* Convert the erroneous command into a CMD_SYNC */
897 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
898 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
902 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
905 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
906 struct arm_smmu_cmdq_ent *ent)
908 u64 cmd[CMDQ_ENT_DWORDS];
910 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
911 struct arm_smmu_queue *q = &smmu->cmdq.q;
913 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
914 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
919 spin_lock_irqsave(&smmu->cmdq.lock, flags);
920 while (queue_insert_raw(q, cmd) == -ENOSPC) {
921 if (queue_poll_cons(q, false, wfe))
922 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
925 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
926 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
927 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
930 /* Context descriptor manipulation functions */
931 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
935 /* Repack the TCR. Just care about TTBR0 for now */
936 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
937 val |= ARM_SMMU_TCR2CD(tcr, TG0);
938 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
939 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
940 val |= ARM_SMMU_TCR2CD(tcr, SH0);
941 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
942 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
943 val |= ARM_SMMU_TCR2CD(tcr, IPS);
944 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
949 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
950 struct arm_smmu_s1_cfg *cfg)
955 * We don't need to issue any invalidation here, as we'll invalidate
956 * the STE when installing the new entry anyway.
958 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
962 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
963 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
965 cfg->cdptr[0] = cpu_to_le64(val);
967 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
968 cfg->cdptr[1] = cpu_to_le64(val);
970 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
973 /* Stream table manipulation functions */
975 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
979 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
980 << STRTAB_L1_DESC_SPAN_SHIFT;
981 val |= desc->l2ptr_dma &
982 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
984 *dst = cpu_to_le64(val);
987 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
989 struct arm_smmu_cmdq_ent cmd = {
990 .opcode = CMDQ_OP_CFGI_STE,
997 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
998 cmd.opcode = CMDQ_OP_CMD_SYNC;
999 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1002 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1003 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1006 * This is hideously complicated, but we only really care about
1007 * three cases at the moment:
1009 * 1. Invalid (all zero) -> bypass (init)
1010 * 2. Bypass -> translation (attach)
1011 * 3. Translation -> bypass (detach)
1013 * Given that we can't update the STE atomically and the SMMU
1014 * doesn't read the thing in a defined order, that leaves us
1015 * with the following maintenance requirements:
1017 * 1. Update Config, return (init time STEs aren't live)
1018 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1019 * 3. Update Config, sync
1021 u64 val = le64_to_cpu(dst[0]);
1022 bool ste_live = false;
1023 struct arm_smmu_cmdq_ent prefetch_cmd = {
1024 .opcode = CMDQ_OP_PREFETCH_CFG,
1030 if (val & STRTAB_STE_0_V) {
1033 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1035 case STRTAB_STE_0_CFG_BYPASS:
1037 case STRTAB_STE_0_CFG_S1_TRANS:
1038 case STRTAB_STE_0_CFG_S2_TRANS:
1041 case STRTAB_STE_0_CFG_ABORT:
1045 BUG(); /* STE corruption */
1049 /* Nuke the existing STE_0 value, as we're going to rewrite it */
1050 val = ste->valid ? STRTAB_STE_0_V : 0;
1053 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1054 : STRTAB_STE_0_CFG_BYPASS;
1055 dst[0] = cpu_to_le64(val);
1056 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1057 << STRTAB_STE_1_SHCFG_SHIFT);
1058 dst[2] = 0; /* Nuke the VMID */
1060 arm_smmu_sync_ste_for_sid(smmu, sid);
1066 dst[1] = cpu_to_le64(
1067 STRTAB_STE_1_S1C_CACHE_WBRA
1068 << STRTAB_STE_1_S1CIR_SHIFT |
1069 STRTAB_STE_1_S1C_CACHE_WBRA
1070 << STRTAB_STE_1_S1COR_SHIFT |
1071 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1072 #ifdef CONFIG_PCI_ATS
1073 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1075 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
1076 STRTAB_STE_1_PRIVCFG_UNPRIV <<
1077 STRTAB_STE_1_PRIVCFG_SHIFT);
1079 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1080 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1082 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1083 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1084 STRTAB_STE_0_CFG_S1_TRANS;
1089 dst[2] = cpu_to_le64(
1090 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1091 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1092 << STRTAB_STE_2_VTCR_SHIFT |
1094 STRTAB_STE_2_S2ENDI |
1096 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1099 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1100 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1102 val |= STRTAB_STE_0_CFG_S2_TRANS;
1105 arm_smmu_sync_ste_for_sid(smmu, sid);
1106 /* See comment in arm_smmu_write_ctx_desc() */
1107 WRITE_ONCE(dst[0], cpu_to_le64(val));
1108 arm_smmu_sync_ste_for_sid(smmu, sid);
1110 /* It's likely that we'll want to use the new STE soon */
1111 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1112 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1115 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1118 struct arm_smmu_strtab_ent ste = {
1123 for (i = 0; i < nent; ++i) {
1124 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1125 strtab += STRTAB_STE_DWORDS;
1129 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1133 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1134 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1139 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1140 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1142 desc->span = STRTAB_SPLIT + 1;
1143 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1144 GFP_KERNEL | __GFP_ZERO);
1147 "failed to allocate l2 stream table for SID %u\n",
1152 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1153 arm_smmu_write_strtab_l1_desc(strtab, desc);
1157 /* IRQ and event handlers */
1158 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1161 struct arm_smmu_device *smmu = dev;
1162 struct arm_smmu_queue *q = &smmu->evtq.q;
1163 u64 evt[EVTQ_ENT_DWORDS];
1166 while (!queue_remove_raw(q, evt)) {
1167 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1169 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1170 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1171 dev_info(smmu->dev, "\t0x%016llx\n",
1172 (unsigned long long)evt[i]);
1177 * Not much we can do on overflow, so scream and pretend we're
1180 if (queue_sync_prod(q) == -EOVERFLOW)
1181 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1182 } while (!queue_empty(q));
1184 /* Sync our overflow flag, as we believe we're up to speed */
1185 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1189 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1195 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1196 ssv = evt[0] & PRIQ_0_SSID_V;
1197 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1198 last = evt[0] & PRIQ_0_PRG_LAST;
1199 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1201 dev_info(smmu->dev, "unexpected PRI request received:\n");
1203 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1204 sid, ssid, grpid, last ? "L" : "",
1205 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1206 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1207 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1208 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1209 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1212 struct arm_smmu_cmdq_ent cmd = {
1213 .opcode = CMDQ_OP_PRI_RESP,
1214 .substream_valid = ssv,
1219 .resp = PRI_RESP_DENY,
1223 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1227 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1229 struct arm_smmu_device *smmu = dev;
1230 struct arm_smmu_queue *q = &smmu->priq.q;
1231 u64 evt[PRIQ_ENT_DWORDS];
1234 while (!queue_remove_raw(q, evt))
1235 arm_smmu_handle_ppr(smmu, evt);
1237 if (queue_sync_prod(q) == -EOVERFLOW)
1238 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1239 } while (!queue_empty(q));
1241 /* Sync our overflow flag, as we believe we're up to speed */
1242 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1243 writel(q->cons, q->cons_reg);
1247 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1249 /* We don't actually use CMD_SYNC interrupts for anything */
1253 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1255 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1257 u32 gerror, gerrorn, active;
1258 struct arm_smmu_device *smmu = dev;
1260 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1261 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1263 active = gerror ^ gerrorn;
1264 if (!(active & GERROR_ERR_MASK))
1265 return IRQ_NONE; /* No errors pending */
1268 "unexpected global error reported (0x%08x), this could be serious\n",
1271 if (active & GERROR_SFM_ERR) {
1272 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1273 arm_smmu_device_disable(smmu);
1276 if (active & GERROR_MSI_GERROR_ABT_ERR)
1277 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1279 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1280 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1282 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1283 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1285 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1286 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1287 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1290 if (active & GERROR_PRIQ_ABT_ERR)
1291 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1293 if (active & GERROR_EVTQ_ABT_ERR)
1294 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1296 if (active & GERROR_CMDQ_ERR)
1297 arm_smmu_cmdq_skip_err(smmu);
1299 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1303 /* IO_PGTABLE API */
1304 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1306 struct arm_smmu_cmdq_ent cmd;
1308 cmd.opcode = CMDQ_OP_CMD_SYNC;
1309 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1312 static void arm_smmu_tlb_sync(void *cookie)
1314 struct arm_smmu_domain *smmu_domain = cookie;
1315 __arm_smmu_tlb_sync(smmu_domain->smmu);
1318 static void arm_smmu_tlb_inv_context(void *cookie)
1320 struct arm_smmu_domain *smmu_domain = cookie;
1321 struct arm_smmu_device *smmu = smmu_domain->smmu;
1322 struct arm_smmu_cmdq_ent cmd;
1324 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1325 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1326 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1329 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1330 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1333 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1334 __arm_smmu_tlb_sync(smmu);
1337 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1338 size_t granule, bool leaf, void *cookie)
1340 struct arm_smmu_domain *smmu_domain = cookie;
1341 struct arm_smmu_device *smmu = smmu_domain->smmu;
1342 struct arm_smmu_cmdq_ent cmd = {
1349 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1350 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1351 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1353 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1354 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1358 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1359 cmd.tlbi.addr += granule;
1360 } while (size -= granule);
1363 static struct iommu_gather_ops arm_smmu_gather_ops = {
1364 .tlb_flush_all = arm_smmu_tlb_inv_context,
1365 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1366 .tlb_sync = arm_smmu_tlb_sync,
1370 static bool arm_smmu_capable(enum iommu_cap cap)
1373 case IOMMU_CAP_CACHE_COHERENCY:
1375 case IOMMU_CAP_INTR_REMAP:
1376 return true; /* MSIs are just memory writes */
1377 case IOMMU_CAP_NOEXEC:
1384 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1386 struct arm_smmu_domain *smmu_domain;
1388 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1392 * Allocate the domain and initialise some of its data structures.
1393 * We can't really do anything meaningful until we've added a
1396 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1400 if (type == IOMMU_DOMAIN_DMA &&
1401 iommu_get_dma_cookie(&smmu_domain->domain)) {
1406 mutex_init(&smmu_domain->init_mutex);
1407 spin_lock_init(&smmu_domain->pgtbl_lock);
1408 return &smmu_domain->domain;
1411 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1413 int idx, size = 1 << span;
1416 idx = find_first_zero_bit(map, size);
1419 } while (test_and_set_bit(idx, map));
1424 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1426 clear_bit(idx, map);
1429 static void arm_smmu_domain_free(struct iommu_domain *domain)
1431 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1432 struct arm_smmu_device *smmu = smmu_domain->smmu;
1434 iommu_put_dma_cookie(domain);
1435 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1437 /* Free the CD and ASID, if we allocated them */
1438 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1439 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1442 dmam_free_coherent(smmu_domain->smmu->dev,
1443 CTXDESC_CD_DWORDS << 3,
1447 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1450 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1452 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1458 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1459 struct io_pgtable_cfg *pgtbl_cfg)
1463 struct arm_smmu_device *smmu = smmu_domain->smmu;
1464 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1466 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1470 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1472 GFP_KERNEL | __GFP_ZERO);
1474 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1479 cfg->cd.asid = (u16)asid;
1480 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1481 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1482 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1486 arm_smmu_bitmap_free(smmu->asid_map, asid);
1490 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1491 struct io_pgtable_cfg *pgtbl_cfg)
1494 struct arm_smmu_device *smmu = smmu_domain->smmu;
1495 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1497 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1501 cfg->vmid = (u16)vmid;
1502 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1503 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1507 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1510 unsigned long ias, oas;
1511 enum io_pgtable_fmt fmt;
1512 struct io_pgtable_cfg pgtbl_cfg;
1513 struct io_pgtable_ops *pgtbl_ops;
1514 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1515 struct io_pgtable_cfg *);
1516 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1517 struct arm_smmu_device *smmu = smmu_domain->smmu;
1519 /* Restrict the stage to what we can actually support */
1520 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1521 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1522 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1523 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1525 switch (smmu_domain->stage) {
1526 case ARM_SMMU_DOMAIN_S1:
1529 fmt = ARM_64_LPAE_S1;
1530 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1532 case ARM_SMMU_DOMAIN_NESTED:
1533 case ARM_SMMU_DOMAIN_S2:
1536 fmt = ARM_64_LPAE_S2;
1537 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1543 pgtbl_cfg = (struct io_pgtable_cfg) {
1544 .pgsize_bitmap = smmu->pgsize_bitmap,
1547 .tlb = &arm_smmu_gather_ops,
1548 .iommu_dev = smmu->dev,
1551 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1555 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1556 domain->geometry.aperture_end = (1UL << ias) - 1;
1557 domain->geometry.force_aperture = true;
1559 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1561 free_io_pgtable_ops(pgtbl_ops);
1565 smmu_domain->pgtbl_ops = pgtbl_ops;
1569 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1572 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1574 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1575 struct arm_smmu_strtab_l1_desc *l1_desc;
1578 /* Two-level walk */
1579 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1580 l1_desc = &cfg->l1_desc[idx];
1581 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1582 step = &l1_desc->l2ptr[idx];
1584 /* Simple linear lookup */
1585 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1591 static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1594 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1595 struct arm_smmu_device *smmu = master->smmu;
1597 for (i = 0; i < fwspec->num_ids; ++i) {
1598 u32 sid = fwspec->ids[i];
1599 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1601 /* Bridged PCI devices may end up with duplicated IDs */
1602 for (j = 0; j < i; j++)
1603 if (fwspec->ids[j] == sid)
1608 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1614 static void arm_smmu_detach_dev(struct device *dev)
1616 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
1618 master->ste.bypass = true;
1619 if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
1620 dev_warn(dev, "failed to install bypass STE\n");
1623 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1626 struct arm_smmu_device *smmu;
1627 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1628 struct arm_smmu_master_data *master;
1629 struct arm_smmu_strtab_ent *ste;
1631 if (!dev->iommu_fwspec)
1634 master = dev->iommu_fwspec->iommu_priv;
1635 smmu = master->smmu;
1638 /* Already attached to a different domain? */
1640 arm_smmu_detach_dev(dev);
1642 mutex_lock(&smmu_domain->init_mutex);
1644 if (!smmu_domain->smmu) {
1645 smmu_domain->smmu = smmu;
1646 ret = arm_smmu_domain_finalise(domain);
1648 smmu_domain->smmu = NULL;
1651 } else if (smmu_domain->smmu != smmu) {
1653 "cannot attach to SMMU %s (upstream of %s)\n",
1654 dev_name(smmu_domain->smmu->dev),
1655 dev_name(smmu->dev));
1660 ste->bypass = false;
1663 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1664 ste->s1_cfg = &smmu_domain->s1_cfg;
1666 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1669 ste->s2_cfg = &smmu_domain->s2_cfg;
1672 ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1677 mutex_unlock(&smmu_domain->init_mutex);
1681 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1682 phys_addr_t paddr, size_t size, int prot)
1685 unsigned long flags;
1686 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1687 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1692 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1693 ret = ops->map(ops, iova, paddr, size, prot);
1694 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1699 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1702 unsigned long flags;
1703 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1704 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1709 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1710 ret = ops->unmap(ops, iova, size);
1711 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1716 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1719 unsigned long flags;
1720 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1721 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1726 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1727 ret = ops->iova_to_phys(ops, iova);
1728 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1733 static struct platform_driver arm_smmu_driver;
1735 static int arm_smmu_match_node(struct device *dev, void *data)
1737 return dev->of_node == data;
1740 static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
1742 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1743 np, arm_smmu_match_node);
1745 return dev ? dev_get_drvdata(dev) : NULL;
1748 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1750 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1752 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1753 limit *= 1UL << STRTAB_SPLIT;
1758 static struct iommu_ops arm_smmu_ops;
1760 static int arm_smmu_add_device(struct device *dev)
1763 struct arm_smmu_device *smmu;
1764 struct arm_smmu_master_data *master;
1765 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1766 struct iommu_group *group;
1768 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1771 * We _can_ actually withstand dodgy bus code re-calling add_device()
1772 * without an intervening remove_device()/of_xlate() sequence, but
1773 * we're not going to do so quietly...
1775 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1776 master = fwspec->iommu_priv;
1777 smmu = master->smmu;
1779 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
1782 master = kzalloc(sizeof(*master), GFP_KERNEL);
1786 master->smmu = smmu;
1787 fwspec->iommu_priv = master;
1790 /* Check the SIDs are in range of the SMMU and our stream table */
1791 for (i = 0; i < fwspec->num_ids; i++) {
1792 u32 sid = fwspec->ids[i];
1794 if (!arm_smmu_sid_in_range(smmu, sid))
1797 /* Ensure l2 strtab is initialised */
1798 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1799 ret = arm_smmu_init_l2_strtab(smmu, sid);
1805 group = iommu_group_get_for_dev(dev);
1807 iommu_group_put(group);
1809 return PTR_ERR_OR_ZERO(group);
1812 static void arm_smmu_remove_device(struct device *dev)
1814 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1815 struct arm_smmu_master_data *master;
1817 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1820 master = fwspec->iommu_priv;
1821 if (master && master->ste.valid)
1822 arm_smmu_detach_dev(dev);
1823 iommu_group_remove_device(dev);
1825 iommu_fwspec_free(dev);
1828 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1830 struct iommu_group *group;
1833 * We don't support devices sharing stream IDs other than PCI RID
1834 * aliases, since the necessary ID-to-device lookup becomes rather
1835 * impractical given a potential sparse 32-bit stream ID space.
1837 if (dev_is_pci(dev))
1838 group = pci_device_group(dev);
1840 group = generic_device_group(dev);
1845 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1846 enum iommu_attr attr, void *data)
1848 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1851 case DOMAIN_ATTR_NESTING:
1852 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1859 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1860 enum iommu_attr attr, void *data)
1863 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1865 mutex_lock(&smmu_domain->init_mutex);
1868 case DOMAIN_ATTR_NESTING:
1869 if (smmu_domain->smmu) {
1875 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1877 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1885 mutex_unlock(&smmu_domain->init_mutex);
1889 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1891 return iommu_fwspec_add_ids(dev, args->args, 1);
1894 static struct iommu_ops arm_smmu_ops = {
1895 .capable = arm_smmu_capable,
1896 .domain_alloc = arm_smmu_domain_alloc,
1897 .domain_free = arm_smmu_domain_free,
1898 .attach_dev = arm_smmu_attach_dev,
1899 .map = arm_smmu_map,
1900 .unmap = arm_smmu_unmap,
1901 .map_sg = default_iommu_map_sg,
1902 .iova_to_phys = arm_smmu_iova_to_phys,
1903 .add_device = arm_smmu_add_device,
1904 .remove_device = arm_smmu_remove_device,
1905 .device_group = arm_smmu_device_group,
1906 .domain_get_attr = arm_smmu_domain_get_attr,
1907 .domain_set_attr = arm_smmu_domain_set_attr,
1908 .of_xlate = arm_smmu_of_xlate,
1909 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1912 /* Probing and initialisation functions */
1913 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1914 struct arm_smmu_queue *q,
1915 unsigned long prod_off,
1916 unsigned long cons_off,
1919 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1921 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1923 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1928 q->prod_reg = smmu->base + prod_off;
1929 q->cons_reg = smmu->base + cons_off;
1930 q->ent_dwords = dwords;
1932 q->q_base = Q_BASE_RWA;
1933 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1934 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1935 << Q_BASE_LOG2SIZE_SHIFT;
1937 q->prod = q->cons = 0;
1941 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1946 spin_lock_init(&smmu->cmdq.lock);
1947 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1948 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1953 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1954 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1959 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1962 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1963 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
1966 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
1969 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1970 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
1971 void *strtab = smmu->strtab_cfg.strtab;
1973 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
1974 if (!cfg->l1_desc) {
1975 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
1979 for (i = 0; i < cfg->num_l1_ents; ++i) {
1980 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
1981 strtab += STRTAB_L1_DESC_DWORDS << 3;
1987 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
1992 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1995 * If we can resolve everything with a single L2 table, then we
1996 * just need a single L1 descriptor. Otherwise, calculate the L1
1997 * size, capped to the SIDSIZE.
1999 if (smmu->sid_bits < STRTAB_SPLIT) {
2002 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2003 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2005 cfg->num_l1_ents = 1 << size;
2007 size += STRTAB_SPLIT;
2008 if (size < smmu->sid_bits)
2010 "2-level strtab only covers %u/%u bits of SID\n",
2011 size, smmu->sid_bits);
2013 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2014 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2015 GFP_KERNEL | __GFP_ZERO);
2018 "failed to allocate l1 stream table (%u bytes)\n",
2022 cfg->strtab = strtab;
2024 /* Configure strtab_base_cfg for 2 levels */
2025 reg = STRTAB_BASE_CFG_FMT_2LVL;
2026 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2027 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2028 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2029 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2030 cfg->strtab_base_cfg = reg;
2032 return arm_smmu_init_l1_strtab(smmu);
2035 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2040 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2042 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2043 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2044 GFP_KERNEL | __GFP_ZERO);
2047 "failed to allocate linear stream table (%u bytes)\n",
2051 cfg->strtab = strtab;
2052 cfg->num_l1_ents = 1 << smmu->sid_bits;
2054 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2055 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2056 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2057 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2058 cfg->strtab_base_cfg = reg;
2060 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2064 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2069 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2070 ret = arm_smmu_init_strtab_2lvl(smmu);
2072 ret = arm_smmu_init_strtab_linear(smmu);
2077 /* Set the strtab base address */
2078 reg = smmu->strtab_cfg.strtab_dma &
2079 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2080 reg |= STRTAB_BASE_RA;
2081 smmu->strtab_cfg.strtab_base = reg;
2083 /* Allocate the first VMID for stage-2 bypass STEs */
2084 set_bit(0, smmu->vmid_map);
2088 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2092 ret = arm_smmu_init_queues(smmu);
2096 return arm_smmu_init_strtab(smmu);
2099 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2100 unsigned int reg_off, unsigned int ack_off)
2104 writel_relaxed(val, smmu->base + reg_off);
2105 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2106 1, ARM_SMMU_POLL_TIMEOUT_US);
2109 /* GBPA is "special" */
2110 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2113 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2115 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2116 1, ARM_SMMU_POLL_TIMEOUT_US);
2122 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2123 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2124 1, ARM_SMMU_POLL_TIMEOUT_US);
2127 static void arm_smmu_free_msis(void *data)
2129 struct device *dev = data;
2130 platform_msi_domain_free_irqs(dev);
2133 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2135 phys_addr_t doorbell;
2136 struct device *dev = msi_desc_to_dev(desc);
2137 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2138 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2140 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2141 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2143 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2144 writel_relaxed(msg->data, smmu->base + cfg[1]);
2145 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2148 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2150 struct msi_desc *desc;
2151 int ret, nvec = ARM_SMMU_MAX_MSIS;
2152 struct device *dev = smmu->dev;
2154 /* Clear the MSI address regs */
2155 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2156 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2158 if (smmu->features & ARM_SMMU_FEAT_PRI)
2159 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2163 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2166 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2167 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2169 dev_warn(dev, "failed to allocate MSIs\n");
2173 for_each_msi_entry(desc, dev) {
2174 switch (desc->platform.msi_index) {
2175 case EVTQ_MSI_INDEX:
2176 smmu->evtq.q.irq = desc->irq;
2178 case GERROR_MSI_INDEX:
2179 smmu->gerr_irq = desc->irq;
2181 case PRIQ_MSI_INDEX:
2182 smmu->priq.q.irq = desc->irq;
2184 default: /* Unknown */
2189 /* Add callback to free MSIs on teardown */
2190 devm_add_action(dev, arm_smmu_free_msis, dev);
2193 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2196 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2198 /* Disable IRQs first */
2199 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2200 ARM_SMMU_IRQ_CTRLACK);
2202 dev_err(smmu->dev, "failed to disable irqs\n");
2206 arm_smmu_setup_msis(smmu);
2208 /* Request interrupt lines */
2209 irq = smmu->evtq.q.irq;
2211 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2212 arm_smmu_evtq_thread,
2214 "arm-smmu-v3-evtq", smmu);
2216 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2219 irq = smmu->cmdq.q.irq;
2221 ret = devm_request_irq(smmu->dev, irq,
2222 arm_smmu_cmdq_sync_handler, 0,
2223 "arm-smmu-v3-cmdq-sync", smmu);
2225 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2228 irq = smmu->gerr_irq;
2230 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2231 0, "arm-smmu-v3-gerror", smmu);
2233 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2236 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2237 irq = smmu->priq.q.irq;
2239 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2240 arm_smmu_priq_thread,
2246 "failed to enable priq irq\n");
2248 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2252 /* Enable interrupt generation on the SMMU */
2253 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2254 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2256 dev_warn(smmu->dev, "failed to enable irqs\n");
2261 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2265 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2267 dev_err(smmu->dev, "failed to clear cr0\n");
2272 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2276 struct arm_smmu_cmdq_ent cmd;
2278 /* Clear CR0 and sync (disables SMMU and queue processing) */
2279 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2280 if (reg & CR0_SMMUEN)
2281 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2283 ret = arm_smmu_device_disable(smmu);
2287 /* CR1 (table and queue memory attributes) */
2288 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2289 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2290 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2291 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2292 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2293 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2294 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2296 /* CR2 (random crap) */
2297 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2298 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2301 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2302 smmu->base + ARM_SMMU_STRTAB_BASE);
2303 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2304 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2307 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2308 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2309 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2311 enables = CR0_CMDQEN;
2312 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2315 dev_err(smmu->dev, "failed to enable command queue\n");
2319 /* Invalidate any cached configuration */
2320 cmd.opcode = CMDQ_OP_CFGI_ALL;
2321 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2322 cmd.opcode = CMDQ_OP_CMD_SYNC;
2323 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2325 /* Invalidate any stale TLB entries */
2326 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2327 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2328 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2331 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2332 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2333 cmd.opcode = CMDQ_OP_CMD_SYNC;
2334 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2337 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2338 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2339 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2341 enables |= CR0_EVTQEN;
2342 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2345 dev_err(smmu->dev, "failed to enable event queue\n");
2350 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2351 writeq_relaxed(smmu->priq.q.q_base,
2352 smmu->base + ARM_SMMU_PRIQ_BASE);
2353 writel_relaxed(smmu->priq.q.prod,
2354 smmu->base + ARM_SMMU_PRIQ_PROD);
2355 writel_relaxed(smmu->priq.q.cons,
2356 smmu->base + ARM_SMMU_PRIQ_CONS);
2358 enables |= CR0_PRIQEN;
2359 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2362 dev_err(smmu->dev, "failed to enable PRI queue\n");
2367 ret = arm_smmu_setup_irqs(smmu);
2369 dev_err(smmu->dev, "failed to setup irqs\n");
2374 /* Enable the SMMU interface, or ensure bypass */
2375 if (!bypass || disable_bypass) {
2376 enables |= CR0_SMMUEN;
2378 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2380 dev_err(smmu->dev, "GBPA not responding to update\n");
2384 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2387 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2394 static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
2400 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2402 /* 2-level structures */
2403 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2404 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2406 if (reg & IDR0_CD2L)
2407 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2410 * Translation table endianness.
2411 * We currently require the same endianness as the CPU, but this
2412 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2414 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2415 case IDR0_TTENDIAN_MIXED:
2416 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2419 case IDR0_TTENDIAN_BE:
2420 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2423 case IDR0_TTENDIAN_LE:
2424 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2428 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2432 /* Boolean feature flags */
2433 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2434 smmu->features |= ARM_SMMU_FEAT_PRI;
2436 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2437 smmu->features |= ARM_SMMU_FEAT_ATS;
2440 smmu->features |= ARM_SMMU_FEAT_SEV;
2443 smmu->features |= ARM_SMMU_FEAT_MSI;
2446 smmu->features |= ARM_SMMU_FEAT_HYP;
2449 * The dma-coherent property is used in preference to the ID
2450 * register, but warn on mismatch.
2452 coherent = of_dma_is_coherent(smmu->dev->of_node);
2454 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2456 if (!!(reg & IDR0_COHACC) != coherent)
2457 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2458 coherent ? "true" : "false");
2460 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2461 case IDR0_STALL_MODEL_STALL:
2463 case IDR0_STALL_MODEL_FORCE:
2464 smmu->features |= ARM_SMMU_FEAT_STALLS;
2468 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2471 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2473 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2474 dev_err(smmu->dev, "no translation support!\n");
2478 /* We only support the AArch64 table format at present */
2479 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2480 case IDR0_TTF_AARCH32_64:
2483 case IDR0_TTF_AARCH64:
2486 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2490 /* ASID/VMID sizes */
2491 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2492 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2495 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2496 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2497 dev_err(smmu->dev, "embedded implementation not supported\n");
2501 /* Queue sizes, capped at 4k */
2502 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2503 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2504 if (!smmu->cmdq.q.max_n_shift) {
2505 /* Odd alignment restrictions on the base, so ignore for now */
2506 dev_err(smmu->dev, "unit-length command queue not supported\n");
2510 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2511 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2512 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2513 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2515 /* SID/SSID sizes */
2516 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2517 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2520 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2522 /* Maximum number of outstanding stalls */
2523 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2524 & IDR5_STALL_MAX_MASK;
2527 if (reg & IDR5_GRAN64K)
2528 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2529 if (reg & IDR5_GRAN16K)
2530 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2531 if (reg & IDR5_GRAN4K)
2532 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2534 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2535 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2537 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2539 /* Output address size */
2540 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2541 case IDR5_OAS_32_BIT:
2544 case IDR5_OAS_36_BIT:
2547 case IDR5_OAS_40_BIT:
2550 case IDR5_OAS_42_BIT:
2553 case IDR5_OAS_44_BIT:
2558 "unknown output address size. Truncating to 48-bit\n");
2560 case IDR5_OAS_48_BIT:
2564 /* Set the DMA mask for our table walker */
2565 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2567 "failed to set DMA mask for table walker\n");
2569 smmu->ias = max(smmu->ias, smmu->oas);
2571 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2572 smmu->ias, smmu->oas, smmu->features);
2576 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2579 struct resource *res;
2580 struct arm_smmu_device *smmu;
2581 struct device *dev = &pdev->dev;
2585 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2586 dev_err(dev, "missing #iommu-cells property\n");
2587 else if (cells != 1)
2588 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2592 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2594 dev_err(dev, "failed to allocate arm_smmu_device\n");
2600 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2601 if (resource_size(res) + 1 < SZ_128K) {
2602 dev_err(dev, "MMIO region too small (%pr)\n", res);
2606 smmu->base = devm_ioremap_resource(dev, res);
2607 if (IS_ERR(smmu->base))
2608 return PTR_ERR(smmu->base);
2610 /* Interrupt lines */
2611 irq = platform_get_irq_byname(pdev, "eventq");
2613 smmu->evtq.q.irq = irq;
2615 irq = platform_get_irq_byname(pdev, "priq");
2617 smmu->priq.q.irq = irq;
2619 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2621 smmu->cmdq.q.irq = irq;
2623 irq = platform_get_irq_byname(pdev, "gerror");
2625 smmu->gerr_irq = irq;
2627 parse_driver_options(smmu);
2630 ret = arm_smmu_device_probe(smmu);
2634 /* Initialise in-memory data structures */
2635 ret = arm_smmu_init_structures(smmu);
2639 /* Record our private device structure */
2640 platform_set_drvdata(pdev, smmu);
2642 /* Reset the device */
2643 ret = arm_smmu_device_reset(smmu, bypass);
2647 /* And we're up. Go go go! */
2648 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
2650 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2652 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2657 #ifdef CONFIG_ARM_AMBA
2658 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2659 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2664 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2665 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2672 static int arm_smmu_device_remove(struct platform_device *pdev)
2674 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2676 arm_smmu_device_disable(smmu);
2680 static struct of_device_id arm_smmu_of_match[] = {
2681 { .compatible = "arm,smmu-v3", },
2684 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2686 static struct platform_driver arm_smmu_driver = {
2688 .name = "arm-smmu-v3",
2689 .of_match_table = of_match_ptr(arm_smmu_of_match),
2691 .probe = arm_smmu_device_dt_probe,
2692 .remove = arm_smmu_device_remove,
2695 static int __init arm_smmu_init(void)
2697 static bool registered;
2701 ret = platform_driver_register(&arm_smmu_driver);
2707 static void __exit arm_smmu_exit(void)
2709 return platform_driver_unregister(&arm_smmu_driver);
2712 subsys_initcall(arm_smmu_init);
2713 module_exit(arm_smmu_exit);
2715 static int __init arm_smmu_of_init(struct device_node *np)
2717 int ret = arm_smmu_init();
2722 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2727 IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
2729 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2730 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2731 MODULE_LICENSE("GPL v2");