2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/acpi.h>
24 #include <linux/acpi_iort.h>
25 #include <linux/delay.h>
26 #include <linux/dma-iommu.h>
27 #include <linux/err.h>
28 #include <linux/interrupt.h>
29 #include <linux/iommu.h>
30 #include <linux/iopoll.h>
31 #include <linux/module.h>
32 #include <linux/msi.h>
34 #include <linux/of_address.h>
35 #include <linux/of_iommu.h>
36 #include <linux/of_platform.h>
37 #include <linux/pci.h>
38 #include <linux/platform_device.h>
40 #include <linux/amba/bus.h>
42 #include "io-pgtable.h"
45 #define ARM_SMMU_IDR0 0x0
46 #define IDR0_ST_LVL_SHIFT 27
47 #define IDR0_ST_LVL_MASK 0x3
48 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
49 #define IDR0_STALL_MODEL_SHIFT 24
50 #define IDR0_STALL_MODEL_MASK 0x3
51 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
52 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
53 #define IDR0_TTENDIAN_SHIFT 21
54 #define IDR0_TTENDIAN_MASK 0x3
55 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
56 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
57 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
58 #define IDR0_CD2L (1 << 19)
59 #define IDR0_VMID16 (1 << 18)
60 #define IDR0_PRI (1 << 16)
61 #define IDR0_SEV (1 << 14)
62 #define IDR0_MSI (1 << 13)
63 #define IDR0_ASID16 (1 << 12)
64 #define IDR0_ATS (1 << 10)
65 #define IDR0_HYP (1 << 9)
66 #define IDR0_COHACC (1 << 4)
67 #define IDR0_TTF_SHIFT 2
68 #define IDR0_TTF_MASK 0x3
69 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
70 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
71 #define IDR0_S1P (1 << 1)
72 #define IDR0_S2P (1 << 0)
74 #define ARM_SMMU_IDR1 0x4
75 #define IDR1_TABLES_PRESET (1 << 30)
76 #define IDR1_QUEUES_PRESET (1 << 29)
77 #define IDR1_REL (1 << 28)
78 #define IDR1_CMDQ_SHIFT 21
79 #define IDR1_CMDQ_MASK 0x1f
80 #define IDR1_EVTQ_SHIFT 16
81 #define IDR1_EVTQ_MASK 0x1f
82 #define IDR1_PRIQ_SHIFT 11
83 #define IDR1_PRIQ_MASK 0x1f
84 #define IDR1_SSID_SHIFT 6
85 #define IDR1_SSID_MASK 0x1f
86 #define IDR1_SID_SHIFT 0
87 #define IDR1_SID_MASK 0x3f
89 #define ARM_SMMU_IDR5 0x14
90 #define IDR5_STALL_MAX_SHIFT 16
91 #define IDR5_STALL_MAX_MASK 0xffff
92 #define IDR5_GRAN64K (1 << 6)
93 #define IDR5_GRAN16K (1 << 5)
94 #define IDR5_GRAN4K (1 << 4)
95 #define IDR5_OAS_SHIFT 0
96 #define IDR5_OAS_MASK 0x7
97 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
98 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
99 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
100 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
101 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
102 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
104 #define ARM_SMMU_CR0 0x20
105 #define CR0_CMDQEN (1 << 3)
106 #define CR0_EVTQEN (1 << 2)
107 #define CR0_PRIQEN (1 << 1)
108 #define CR0_SMMUEN (1 << 0)
110 #define ARM_SMMU_CR0ACK 0x24
112 #define ARM_SMMU_CR1 0x28
116 #define CR1_CACHE_NC 0
117 #define CR1_CACHE_WB 1
118 #define CR1_CACHE_WT 2
119 #define CR1_TABLE_SH_SHIFT 10
120 #define CR1_TABLE_OC_SHIFT 8
121 #define CR1_TABLE_IC_SHIFT 6
122 #define CR1_QUEUE_SH_SHIFT 4
123 #define CR1_QUEUE_OC_SHIFT 2
124 #define CR1_QUEUE_IC_SHIFT 0
126 #define ARM_SMMU_CR2 0x2c
127 #define CR2_PTM (1 << 2)
128 #define CR2_RECINVSID (1 << 1)
129 #define CR2_E2H (1 << 0)
131 #define ARM_SMMU_GBPA 0x44
132 #define GBPA_ABORT (1 << 20)
133 #define GBPA_UPDATE (1 << 31)
135 #define ARM_SMMU_IRQ_CTRL 0x50
136 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
137 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
138 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
140 #define ARM_SMMU_IRQ_CTRLACK 0x54
142 #define ARM_SMMU_GERROR 0x60
143 #define GERROR_SFM_ERR (1 << 8)
144 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
145 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
146 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
147 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
148 #define GERROR_PRIQ_ABT_ERR (1 << 3)
149 #define GERROR_EVTQ_ABT_ERR (1 << 2)
150 #define GERROR_CMDQ_ERR (1 << 0)
151 #define GERROR_ERR_MASK 0xfd
153 #define ARM_SMMU_GERRORN 0x64
155 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
156 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
157 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
159 #define ARM_SMMU_STRTAB_BASE 0x80
160 #define STRTAB_BASE_RA (1UL << 62)
161 #define STRTAB_BASE_ADDR_SHIFT 6
162 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
164 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
165 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169 #define STRTAB_BASE_CFG_FMT_SHIFT 16
170 #define STRTAB_BASE_CFG_FMT_MASK 0x3
171 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
174 #define ARM_SMMU_CMDQ_BASE 0x90
175 #define ARM_SMMU_CMDQ_PROD 0x98
176 #define ARM_SMMU_CMDQ_CONS 0x9c
178 #define ARM_SMMU_EVTQ_BASE 0xa0
179 #define ARM_SMMU_EVTQ_PROD 0x100a8
180 #define ARM_SMMU_EVTQ_CONS 0x100ac
181 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
185 #define ARM_SMMU_PRIQ_BASE 0xc0
186 #define ARM_SMMU_PRIQ_PROD 0x100c8
187 #define ARM_SMMU_PRIQ_CONS 0x100cc
188 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
192 /* Common MSI config fields */
193 #define MSI_CFG0_ADDR_SHIFT 2
194 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
195 #define MSI_CFG2_SH_SHIFT 4
196 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
197 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
198 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
199 #define MSI_CFG2_MEMATTR_SHIFT 0
200 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
202 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
203 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
204 #define Q_OVERFLOW_FLAG (1 << 31)
205 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
206 #define Q_ENT(q, p) ((q)->base + \
207 Q_IDX(q, p) * (q)->ent_dwords)
209 #define Q_BASE_RWA (1UL << 62)
210 #define Q_BASE_ADDR_SHIFT 5
211 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
212 #define Q_BASE_LOG2SIZE_SHIFT 0
213 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
218 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
219 * 2lvl: 128k L1 entries,
220 * 256 lazy entries per table (each table covers a PCI bus)
222 #define STRTAB_L1_SZ_SHIFT 20
223 #define STRTAB_SPLIT 8
225 #define STRTAB_L1_DESC_DWORDS 1
226 #define STRTAB_L1_DESC_SPAN_SHIFT 0
227 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
228 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
229 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_DWORDS 8
232 #define STRTAB_STE_0_V (1UL << 0)
233 #define STRTAB_STE_0_CFG_SHIFT 1
234 #define STRTAB_STE_0_CFG_MASK 0x7UL
235 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
236 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
237 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
238 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
240 #define STRTAB_STE_0_S1FMT_SHIFT 4
241 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
242 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
243 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
244 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
245 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
247 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
248 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
249 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
250 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
251 #define STRTAB_STE_1_S1C_SH_NSH 0UL
252 #define STRTAB_STE_1_S1C_SH_OSH 2UL
253 #define STRTAB_STE_1_S1C_SH_ISH 3UL
254 #define STRTAB_STE_1_S1CIR_SHIFT 2
255 #define STRTAB_STE_1_S1COR_SHIFT 4
256 #define STRTAB_STE_1_S1CSH_SHIFT 6
258 #define STRTAB_STE_1_S1STALLD (1UL << 27)
260 #define STRTAB_STE_1_EATS_ABT 0UL
261 #define STRTAB_STE_1_EATS_TRANS 1UL
262 #define STRTAB_STE_1_EATS_S1CHK 2UL
263 #define STRTAB_STE_1_EATS_SHIFT 28
265 #define STRTAB_STE_1_STRW_NSEL1 0UL
266 #define STRTAB_STE_1_STRW_EL2 2UL
267 #define STRTAB_STE_1_STRW_SHIFT 30
269 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
270 #define STRTAB_STE_1_SHCFG_SHIFT 44
272 #define STRTAB_STE_2_S2VMID_SHIFT 0
273 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
274 #define STRTAB_STE_2_VTCR_SHIFT 32
275 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
276 #define STRTAB_STE_2_S2AA64 (1UL << 51)
277 #define STRTAB_STE_2_S2ENDI (1UL << 52)
278 #define STRTAB_STE_2_S2PTW (1UL << 54)
279 #define STRTAB_STE_2_S2R (1UL << 58)
281 #define STRTAB_STE_3_S2TTB_SHIFT 4
282 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
284 /* Context descriptor (stage-1 only) */
285 #define CTXDESC_CD_DWORDS 8
286 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
287 #define ARM64_TCR_T0SZ_SHIFT 0
288 #define ARM64_TCR_T0SZ_MASK 0x1fUL
289 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
290 #define ARM64_TCR_TG0_SHIFT 14
291 #define ARM64_TCR_TG0_MASK 0x3UL
292 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
293 #define ARM64_TCR_IRGN0_SHIFT 8
294 #define ARM64_TCR_IRGN0_MASK 0x3UL
295 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
296 #define ARM64_TCR_ORGN0_SHIFT 10
297 #define ARM64_TCR_ORGN0_MASK 0x3UL
298 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
299 #define ARM64_TCR_SH0_SHIFT 12
300 #define ARM64_TCR_SH0_MASK 0x3UL
301 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
302 #define ARM64_TCR_EPD0_SHIFT 7
303 #define ARM64_TCR_EPD0_MASK 0x1UL
304 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
305 #define ARM64_TCR_EPD1_SHIFT 23
306 #define ARM64_TCR_EPD1_MASK 0x1UL
308 #define CTXDESC_CD_0_ENDI (1UL << 15)
309 #define CTXDESC_CD_0_V (1UL << 31)
311 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
312 #define ARM64_TCR_IPS_SHIFT 32
313 #define ARM64_TCR_IPS_MASK 0x7UL
314 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
315 #define ARM64_TCR_TBI0_SHIFT 37
316 #define ARM64_TCR_TBI0_MASK 0x1UL
318 #define CTXDESC_CD_0_AA64 (1UL << 41)
319 #define CTXDESC_CD_0_R (1UL << 45)
320 #define CTXDESC_CD_0_A (1UL << 46)
321 #define CTXDESC_CD_0_ASET_SHIFT 47
322 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
323 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
324 #define CTXDESC_CD_0_ASID_SHIFT 48
325 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
327 #define CTXDESC_CD_1_TTB0_SHIFT 4
328 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
330 #define CTXDESC_CD_3_MAIR_SHIFT 0
332 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
333 #define ARM_SMMU_TCR2CD(tcr, fld) \
334 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
335 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
338 #define CMDQ_ENT_DWORDS 2
339 #define CMDQ_MAX_SZ_SHIFT 8
341 #define CMDQ_ERR_SHIFT 24
342 #define CMDQ_ERR_MASK 0x7f
343 #define CMDQ_ERR_CERROR_NONE_IDX 0
344 #define CMDQ_ERR_CERROR_ILL_IDX 1
345 #define CMDQ_ERR_CERROR_ABT_IDX 2
347 #define CMDQ_0_OP_SHIFT 0
348 #define CMDQ_0_OP_MASK 0xffUL
349 #define CMDQ_0_SSV (1UL << 11)
351 #define CMDQ_PREFETCH_0_SID_SHIFT 32
352 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
353 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
355 #define CMDQ_CFGI_0_SID_SHIFT 32
356 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
357 #define CMDQ_CFGI_1_LEAF (1UL << 0)
358 #define CMDQ_CFGI_1_RANGE_SHIFT 0
359 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
361 #define CMDQ_TLBI_0_VMID_SHIFT 32
362 #define CMDQ_TLBI_0_ASID_SHIFT 48
363 #define CMDQ_TLBI_1_LEAF (1UL << 0)
364 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
365 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
367 #define CMDQ_PRI_0_SSID_SHIFT 12
368 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
369 #define CMDQ_PRI_0_SID_SHIFT 32
370 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
371 #define CMDQ_PRI_1_GRPID_SHIFT 0
372 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
373 #define CMDQ_PRI_1_RESP_SHIFT 12
374 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
375 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
376 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
378 #define CMDQ_SYNC_0_CS_SHIFT 12
379 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
380 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
383 #define EVTQ_ENT_DWORDS 4
384 #define EVTQ_MAX_SZ_SHIFT 7
386 #define EVTQ_0_ID_SHIFT 0
387 #define EVTQ_0_ID_MASK 0xffUL
390 #define PRIQ_ENT_DWORDS 2
391 #define PRIQ_MAX_SZ_SHIFT 8
393 #define PRIQ_0_SID_SHIFT 0
394 #define PRIQ_0_SID_MASK 0xffffffffUL
395 #define PRIQ_0_SSID_SHIFT 32
396 #define PRIQ_0_SSID_MASK 0xfffffUL
397 #define PRIQ_0_PERM_PRIV (1UL << 58)
398 #define PRIQ_0_PERM_EXEC (1UL << 59)
399 #define PRIQ_0_PERM_READ (1UL << 60)
400 #define PRIQ_0_PERM_WRITE (1UL << 61)
401 #define PRIQ_0_PRG_LAST (1UL << 62)
402 #define PRIQ_0_SSID_V (1UL << 63)
404 #define PRIQ_1_PRG_IDX_SHIFT 0
405 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
406 #define PRIQ_1_ADDR_SHIFT 12
407 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
409 /* High-level queue structures */
410 #define ARM_SMMU_POLL_TIMEOUT_US 100
411 #define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */
413 #define MSI_IOVA_BASE 0x8000000
414 #define MSI_IOVA_LENGTH 0x100000
416 /* Until ACPICA headers cover IORT rev. C */
417 #ifndef ACPI_IORT_SMMU_HISILICON_HI161X
418 #define ACPI_IORT_SMMU_HISILICON_HI161X 0x1
421 #ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
422 #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
425 static bool disable_bypass;
426 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
427 MODULE_PARM_DESC(disable_bypass,
428 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
436 enum arm_smmu_msi_index {
443 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
445 ARM_SMMU_EVTQ_IRQ_CFG0,
446 ARM_SMMU_EVTQ_IRQ_CFG1,
447 ARM_SMMU_EVTQ_IRQ_CFG2,
449 [GERROR_MSI_INDEX] = {
450 ARM_SMMU_GERROR_IRQ_CFG0,
451 ARM_SMMU_GERROR_IRQ_CFG1,
452 ARM_SMMU_GERROR_IRQ_CFG2,
455 ARM_SMMU_PRIQ_IRQ_CFG0,
456 ARM_SMMU_PRIQ_IRQ_CFG1,
457 ARM_SMMU_PRIQ_IRQ_CFG2,
461 struct arm_smmu_cmdq_ent {
464 bool substream_valid;
466 /* Command-specific fields */
468 #define CMDQ_OP_PREFETCH_CFG 0x1
475 #define CMDQ_OP_CFGI_STE 0x3
476 #define CMDQ_OP_CFGI_ALL 0x4
485 #define CMDQ_OP_TLBI_NH_ASID 0x11
486 #define CMDQ_OP_TLBI_NH_VA 0x12
487 #define CMDQ_OP_TLBI_EL2_ALL 0x20
488 #define CMDQ_OP_TLBI_S12_VMALL 0x28
489 #define CMDQ_OP_TLBI_S2_IPA 0x2a
490 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
498 #define CMDQ_OP_PRI_RESP 0x41
506 #define CMDQ_OP_CMD_SYNC 0x46
510 struct arm_smmu_queue {
511 int irq; /* Wired interrupt */
522 u32 __iomem *prod_reg;
523 u32 __iomem *cons_reg;
526 struct arm_smmu_cmdq {
527 struct arm_smmu_queue q;
531 struct arm_smmu_evtq {
532 struct arm_smmu_queue q;
536 struct arm_smmu_priq {
537 struct arm_smmu_queue q;
540 /* High-level stream table and context descriptor structures */
541 struct arm_smmu_strtab_l1_desc {
545 dma_addr_t l2ptr_dma;
548 struct arm_smmu_s1_cfg {
550 dma_addr_t cdptr_dma;
552 struct arm_smmu_ctx_desc {
560 struct arm_smmu_s2_cfg {
566 struct arm_smmu_strtab_ent {
568 * An STE is "assigned" if the master emitting the corresponding SID
569 * is attached to a domain. The behaviour of an unassigned STE is
570 * determined by the disable_bypass parameter, whereas an assigned
571 * STE behaves according to s1_cfg/s2_cfg, which themselves are
572 * configured according to the domain type.
575 struct arm_smmu_s1_cfg *s1_cfg;
576 struct arm_smmu_s2_cfg *s2_cfg;
579 struct arm_smmu_strtab_cfg {
581 dma_addr_t strtab_dma;
582 struct arm_smmu_strtab_l1_desc *l1_desc;
583 unsigned int num_l1_ents;
589 /* An SMMUv3 instance */
590 struct arm_smmu_device {
594 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
595 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
596 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
597 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
598 #define ARM_SMMU_FEAT_PRI (1 << 4)
599 #define ARM_SMMU_FEAT_ATS (1 << 5)
600 #define ARM_SMMU_FEAT_SEV (1 << 6)
601 #define ARM_SMMU_FEAT_MSI (1 << 7)
602 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
603 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
604 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
605 #define ARM_SMMU_FEAT_STALLS (1 << 11)
606 #define ARM_SMMU_FEAT_HYP (1 << 12)
609 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
610 #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
613 struct arm_smmu_cmdq cmdq;
614 struct arm_smmu_evtq evtq;
615 struct arm_smmu_priq priq;
620 unsigned long ias; /* IPA */
621 unsigned long oas; /* PA */
622 unsigned long pgsize_bitmap;
624 #define ARM_SMMU_MAX_ASIDS (1 << 16)
625 unsigned int asid_bits;
626 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
628 #define ARM_SMMU_MAX_VMIDS (1 << 16)
629 unsigned int vmid_bits;
630 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
632 unsigned int ssid_bits;
633 unsigned int sid_bits;
635 struct arm_smmu_strtab_cfg strtab_cfg;
637 /* IOMMU core code handle */
638 struct iommu_device iommu;
641 /* SMMU private data for each master */
642 struct arm_smmu_master_data {
643 struct arm_smmu_device *smmu;
644 struct arm_smmu_strtab_ent ste;
647 /* SMMU private data for an IOMMU domain */
648 enum arm_smmu_domain_stage {
649 ARM_SMMU_DOMAIN_S1 = 0,
651 ARM_SMMU_DOMAIN_NESTED,
652 ARM_SMMU_DOMAIN_BYPASS,
655 struct arm_smmu_domain {
656 struct arm_smmu_device *smmu;
657 struct mutex init_mutex; /* Protects smmu pointer */
659 struct io_pgtable_ops *pgtbl_ops;
661 enum arm_smmu_domain_stage stage;
663 struct arm_smmu_s1_cfg s1_cfg;
664 struct arm_smmu_s2_cfg s2_cfg;
667 struct iommu_domain domain;
670 struct arm_smmu_option_prop {
675 static struct arm_smmu_option_prop arm_smmu_options[] = {
676 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
677 { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
681 static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
682 struct arm_smmu_device *smmu)
684 if ((offset > SZ_64K) &&
685 (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY))
688 return smmu->base + offset;
691 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
693 return container_of(dom, struct arm_smmu_domain, domain);
696 static void parse_driver_options(struct arm_smmu_device *smmu)
701 if (of_property_read_bool(smmu->dev->of_node,
702 arm_smmu_options[i].prop)) {
703 smmu->options |= arm_smmu_options[i].opt;
704 dev_notice(smmu->dev, "option %s\n",
705 arm_smmu_options[i].prop);
707 } while (arm_smmu_options[++i].opt);
710 /* Low-level queue manipulation functions */
711 static bool queue_full(struct arm_smmu_queue *q)
713 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
714 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
717 static bool queue_empty(struct arm_smmu_queue *q)
719 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
720 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
723 static void queue_sync_cons(struct arm_smmu_queue *q)
725 q->cons = readl_relaxed(q->cons_reg);
728 static void queue_inc_cons(struct arm_smmu_queue *q)
730 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
732 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
735 * Ensure that all CPU accesses (reads and writes) to the queue
736 * are complete before we update the cons pointer.
739 writel_relaxed(q->cons, q->cons_reg);
742 static int queue_sync_prod(struct arm_smmu_queue *q)
745 u32 prod = readl_relaxed(q->prod_reg);
747 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
754 static void queue_inc_prod(struct arm_smmu_queue *q)
756 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
758 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
759 writel(q->prod, q->prod_reg);
763 * Wait for the SMMU to consume items. If drain is true, wait until the queue
764 * is empty. Otherwise, wait until there is at least one free slot.
766 static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
769 unsigned int delay = 1;
771 /* Wait longer if it's queue drain */
772 timeout = ktime_add_us(ktime_get(), drain ?
773 ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US :
774 ARM_SMMU_POLL_TIMEOUT_US);
776 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
777 if (ktime_compare(ktime_get(), timeout) > 0)
792 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
796 for (i = 0; i < n_dwords; ++i)
797 *dst++ = cpu_to_le64(*src++);
800 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
805 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
810 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
814 for (i = 0; i < n_dwords; ++i)
815 *dst++ = le64_to_cpu(*src++);
818 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
823 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
828 /* High-level queue accessors */
829 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
831 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
832 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
834 switch (ent->opcode) {
835 case CMDQ_OP_TLBI_EL2_ALL:
836 case CMDQ_OP_TLBI_NSNH_ALL:
838 case CMDQ_OP_PREFETCH_CFG:
839 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
840 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
841 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
843 case CMDQ_OP_CFGI_STE:
844 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
845 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
847 case CMDQ_OP_CFGI_ALL:
848 /* Cover the entire SID range */
849 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
851 case CMDQ_OP_TLBI_NH_VA:
852 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
853 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
854 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
856 case CMDQ_OP_TLBI_S2_IPA:
857 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
858 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
859 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
861 case CMDQ_OP_TLBI_NH_ASID:
862 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
864 case CMDQ_OP_TLBI_S12_VMALL:
865 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
867 case CMDQ_OP_PRI_RESP:
868 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
869 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
870 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
871 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
872 switch (ent->pri.resp) {
874 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
877 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
880 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
886 case CMDQ_OP_CMD_SYNC:
887 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
896 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
898 static const char *cerror_str[] = {
899 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
900 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
901 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
905 u64 cmd[CMDQ_ENT_DWORDS];
906 struct arm_smmu_queue *q = &smmu->cmdq.q;
907 u32 cons = readl_relaxed(q->cons_reg);
908 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
909 struct arm_smmu_cmdq_ent cmd_sync = {
910 .opcode = CMDQ_OP_CMD_SYNC,
913 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
914 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
917 case CMDQ_ERR_CERROR_ABT_IDX:
918 dev_err(smmu->dev, "retrying command fetch\n");
919 case CMDQ_ERR_CERROR_NONE_IDX:
921 case CMDQ_ERR_CERROR_ILL_IDX:
928 * We may have concurrent producers, so we need to be careful
929 * not to touch any of the shadow cmdq state.
931 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
932 dev_err(smmu->dev, "skipping command in error state:\n");
933 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
934 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
936 /* Convert the erroneous command into a CMD_SYNC */
937 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
938 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
942 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
945 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
946 struct arm_smmu_cmdq_ent *ent)
948 u64 cmd[CMDQ_ENT_DWORDS];
950 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
951 struct arm_smmu_queue *q = &smmu->cmdq.q;
953 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
954 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
959 spin_lock_irqsave(&smmu->cmdq.lock, flags);
960 while (queue_insert_raw(q, cmd) == -ENOSPC) {
961 if (queue_poll_cons(q, false, wfe))
962 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
965 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
966 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
967 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
970 /* Context descriptor manipulation functions */
971 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
975 /* Repack the TCR. Just care about TTBR0 for now */
976 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
977 val |= ARM_SMMU_TCR2CD(tcr, TG0);
978 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
979 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
980 val |= ARM_SMMU_TCR2CD(tcr, SH0);
981 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
982 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
983 val |= ARM_SMMU_TCR2CD(tcr, IPS);
984 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
989 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
990 struct arm_smmu_s1_cfg *cfg)
995 * We don't need to issue any invalidation here, as we'll invalidate
996 * the STE when installing the new entry anyway.
998 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
1002 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
1003 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
1005 cfg->cdptr[0] = cpu_to_le64(val);
1007 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
1008 cfg->cdptr[1] = cpu_to_le64(val);
1010 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
1013 /* Stream table manipulation functions */
1015 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
1019 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
1020 << STRTAB_L1_DESC_SPAN_SHIFT;
1021 val |= desc->l2ptr_dma &
1022 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
1024 *dst = cpu_to_le64(val);
1027 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1029 struct arm_smmu_cmdq_ent cmd = {
1030 .opcode = CMDQ_OP_CFGI_STE,
1037 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1038 cmd.opcode = CMDQ_OP_CMD_SYNC;
1039 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1042 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1043 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1046 * This is hideously complicated, but we only really care about
1047 * three cases at the moment:
1049 * 1. Invalid (all zero) -> bypass/fault (init)
1050 * 2. Bypass/fault -> translation/bypass (attach)
1051 * 3. Translation/bypass -> bypass/fault (detach)
1053 * Given that we can't update the STE atomically and the SMMU
1054 * doesn't read the thing in a defined order, that leaves us
1055 * with the following maintenance requirements:
1057 * 1. Update Config, return (init time STEs aren't live)
1058 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1059 * 3. Update Config, sync
1061 u64 val = le64_to_cpu(dst[0]);
1062 bool ste_live = false;
1063 struct arm_smmu_cmdq_ent prefetch_cmd = {
1064 .opcode = CMDQ_OP_PREFETCH_CFG,
1070 if (val & STRTAB_STE_0_V) {
1073 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1075 case STRTAB_STE_0_CFG_BYPASS:
1077 case STRTAB_STE_0_CFG_S1_TRANS:
1078 case STRTAB_STE_0_CFG_S2_TRANS:
1081 case STRTAB_STE_0_CFG_ABORT:
1085 BUG(); /* STE corruption */
1089 /* Nuke the existing STE_0 value, as we're going to rewrite it */
1090 val = STRTAB_STE_0_V;
1093 if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
1094 if (!ste->assigned && disable_bypass)
1095 val |= STRTAB_STE_0_CFG_ABORT;
1097 val |= STRTAB_STE_0_CFG_BYPASS;
1099 dst[0] = cpu_to_le64(val);
1100 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1101 << STRTAB_STE_1_SHCFG_SHIFT);
1102 dst[2] = 0; /* Nuke the VMID */
1104 arm_smmu_sync_ste_for_sid(smmu, sid);
1110 dst[1] = cpu_to_le64(
1111 STRTAB_STE_1_S1C_CACHE_WBRA
1112 << STRTAB_STE_1_S1CIR_SHIFT |
1113 STRTAB_STE_1_S1C_CACHE_WBRA
1114 << STRTAB_STE_1_S1COR_SHIFT |
1115 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1116 #ifdef CONFIG_PCI_ATS
1117 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1119 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1121 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1122 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1124 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1125 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1126 STRTAB_STE_0_CFG_S1_TRANS;
1131 dst[2] = cpu_to_le64(
1132 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1133 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1134 << STRTAB_STE_2_VTCR_SHIFT |
1136 STRTAB_STE_2_S2ENDI |
1138 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1141 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1142 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1144 val |= STRTAB_STE_0_CFG_S2_TRANS;
1147 arm_smmu_sync_ste_for_sid(smmu, sid);
1148 /* See comment in arm_smmu_write_ctx_desc() */
1149 WRITE_ONCE(dst[0], cpu_to_le64(val));
1150 arm_smmu_sync_ste_for_sid(smmu, sid);
1152 /* It's likely that we'll want to use the new STE soon */
1153 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1154 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1157 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1160 struct arm_smmu_strtab_ent ste = { .assigned = false };
1162 for (i = 0; i < nent; ++i) {
1163 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1164 strtab += STRTAB_STE_DWORDS;
1168 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1172 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1173 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1178 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1179 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1181 desc->span = STRTAB_SPLIT + 1;
1182 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1183 GFP_KERNEL | __GFP_ZERO);
1186 "failed to allocate l2 stream table for SID %u\n",
1191 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1192 arm_smmu_write_strtab_l1_desc(strtab, desc);
1196 /* IRQ and event handlers */
1197 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1200 struct arm_smmu_device *smmu = dev;
1201 struct arm_smmu_queue *q = &smmu->evtq.q;
1202 u64 evt[EVTQ_ENT_DWORDS];
1205 while (!queue_remove_raw(q, evt)) {
1206 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1208 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1209 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1210 dev_info(smmu->dev, "\t0x%016llx\n",
1211 (unsigned long long)evt[i]);
1217 * Not much we can do on overflow, so scream and pretend we're
1220 if (queue_sync_prod(q) == -EOVERFLOW)
1221 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1222 } while (!queue_empty(q));
1224 /* Sync our overflow flag, as we believe we're up to speed */
1225 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1229 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1235 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1236 ssv = evt[0] & PRIQ_0_SSID_V;
1237 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1238 last = evt[0] & PRIQ_0_PRG_LAST;
1239 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1241 dev_info(smmu->dev, "unexpected PRI request received:\n");
1243 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1244 sid, ssid, grpid, last ? "L" : "",
1245 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1246 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1247 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1248 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1249 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1252 struct arm_smmu_cmdq_ent cmd = {
1253 .opcode = CMDQ_OP_PRI_RESP,
1254 .substream_valid = ssv,
1259 .resp = PRI_RESP_DENY,
1263 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1267 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1269 struct arm_smmu_device *smmu = dev;
1270 struct arm_smmu_queue *q = &smmu->priq.q;
1271 u64 evt[PRIQ_ENT_DWORDS];
1274 while (!queue_remove_raw(q, evt))
1275 arm_smmu_handle_ppr(smmu, evt);
1277 if (queue_sync_prod(q) == -EOVERFLOW)
1278 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1279 } while (!queue_empty(q));
1281 /* Sync our overflow flag, as we believe we're up to speed */
1282 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1283 writel(q->cons, q->cons_reg);
1287 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1289 /* We don't actually use CMD_SYNC interrupts for anything */
1293 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1295 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1297 u32 gerror, gerrorn, active;
1298 struct arm_smmu_device *smmu = dev;
1300 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1301 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1303 active = gerror ^ gerrorn;
1304 if (!(active & GERROR_ERR_MASK))
1305 return IRQ_NONE; /* No errors pending */
1308 "unexpected global error reported (0x%08x), this could be serious\n",
1311 if (active & GERROR_SFM_ERR) {
1312 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1313 arm_smmu_device_disable(smmu);
1316 if (active & GERROR_MSI_GERROR_ABT_ERR)
1317 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1319 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1320 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1322 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1323 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1325 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1326 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1327 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1330 if (active & GERROR_PRIQ_ABT_ERR)
1331 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1333 if (active & GERROR_EVTQ_ABT_ERR)
1334 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1336 if (active & GERROR_CMDQ_ERR)
1337 arm_smmu_cmdq_skip_err(smmu);
1339 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1343 static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
1345 struct arm_smmu_device *smmu = dev;
1347 arm_smmu_evtq_thread(irq, dev);
1348 if (smmu->features & ARM_SMMU_FEAT_PRI)
1349 arm_smmu_priq_thread(irq, dev);
1354 static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
1356 arm_smmu_gerror_handler(irq, dev);
1357 arm_smmu_cmdq_sync_handler(irq, dev);
1358 return IRQ_WAKE_THREAD;
1361 /* IO_PGTABLE API */
1362 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1364 struct arm_smmu_cmdq_ent cmd;
1366 cmd.opcode = CMDQ_OP_CMD_SYNC;
1367 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1370 static void arm_smmu_tlb_sync(void *cookie)
1372 struct arm_smmu_domain *smmu_domain = cookie;
1373 __arm_smmu_tlb_sync(smmu_domain->smmu);
1376 static void arm_smmu_tlb_inv_context(void *cookie)
1378 struct arm_smmu_domain *smmu_domain = cookie;
1379 struct arm_smmu_device *smmu = smmu_domain->smmu;
1380 struct arm_smmu_cmdq_ent cmd;
1382 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1383 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1384 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1387 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1388 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1391 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1392 __arm_smmu_tlb_sync(smmu);
1395 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1396 size_t granule, bool leaf, void *cookie)
1398 struct arm_smmu_domain *smmu_domain = cookie;
1399 struct arm_smmu_device *smmu = smmu_domain->smmu;
1400 struct arm_smmu_cmdq_ent cmd = {
1407 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1408 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1409 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1411 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1412 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1416 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1417 cmd.tlbi.addr += granule;
1418 } while (size -= granule);
1421 static const struct iommu_gather_ops arm_smmu_gather_ops = {
1422 .tlb_flush_all = arm_smmu_tlb_inv_context,
1423 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1424 .tlb_sync = arm_smmu_tlb_sync,
1428 static bool arm_smmu_capable(enum iommu_cap cap)
1431 case IOMMU_CAP_CACHE_COHERENCY:
1433 case IOMMU_CAP_NOEXEC:
1440 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1442 struct arm_smmu_domain *smmu_domain;
1444 if (type != IOMMU_DOMAIN_UNMANAGED &&
1445 type != IOMMU_DOMAIN_DMA &&
1446 type != IOMMU_DOMAIN_IDENTITY)
1450 * Allocate the domain and initialise some of its data structures.
1451 * We can't really do anything meaningful until we've added a
1454 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1458 if (type == IOMMU_DOMAIN_DMA &&
1459 iommu_get_dma_cookie(&smmu_domain->domain)) {
1464 mutex_init(&smmu_domain->init_mutex);
1465 return &smmu_domain->domain;
1468 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1470 int idx, size = 1 << span;
1473 idx = find_first_zero_bit(map, size);
1476 } while (test_and_set_bit(idx, map));
1481 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1483 clear_bit(idx, map);
1486 static void arm_smmu_domain_free(struct iommu_domain *domain)
1488 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1489 struct arm_smmu_device *smmu = smmu_domain->smmu;
1491 iommu_put_dma_cookie(domain);
1492 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1494 /* Free the CD and ASID, if we allocated them */
1495 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1496 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1499 dmam_free_coherent(smmu_domain->smmu->dev,
1500 CTXDESC_CD_DWORDS << 3,
1504 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1507 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1509 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1515 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1516 struct io_pgtable_cfg *pgtbl_cfg)
1520 struct arm_smmu_device *smmu = smmu_domain->smmu;
1521 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1523 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1527 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1529 GFP_KERNEL | __GFP_ZERO);
1531 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1536 cfg->cd.asid = (u16)asid;
1537 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1538 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1539 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1543 arm_smmu_bitmap_free(smmu->asid_map, asid);
1547 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1548 struct io_pgtable_cfg *pgtbl_cfg)
1551 struct arm_smmu_device *smmu = smmu_domain->smmu;
1552 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1554 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1558 cfg->vmid = (u16)vmid;
1559 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1560 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1564 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1567 unsigned long ias, oas;
1568 enum io_pgtable_fmt fmt;
1569 struct io_pgtable_cfg pgtbl_cfg;
1570 struct io_pgtable_ops *pgtbl_ops;
1571 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1572 struct io_pgtable_cfg *);
1573 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1574 struct arm_smmu_device *smmu = smmu_domain->smmu;
1576 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
1577 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
1581 /* Restrict the stage to what we can actually support */
1582 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1583 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1584 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1585 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1587 switch (smmu_domain->stage) {
1588 case ARM_SMMU_DOMAIN_S1:
1591 fmt = ARM_64_LPAE_S1;
1592 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1594 case ARM_SMMU_DOMAIN_NESTED:
1595 case ARM_SMMU_DOMAIN_S2:
1598 fmt = ARM_64_LPAE_S2;
1599 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1605 pgtbl_cfg = (struct io_pgtable_cfg) {
1606 .pgsize_bitmap = smmu->pgsize_bitmap,
1609 .tlb = &arm_smmu_gather_ops,
1610 .iommu_dev = smmu->dev,
1613 if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
1614 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
1616 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1620 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1621 domain->geometry.aperture_end = (1UL << ias) - 1;
1622 domain->geometry.force_aperture = true;
1624 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1626 free_io_pgtable_ops(pgtbl_ops);
1630 smmu_domain->pgtbl_ops = pgtbl_ops;
1634 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1637 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1639 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1640 struct arm_smmu_strtab_l1_desc *l1_desc;
1643 /* Two-level walk */
1644 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1645 l1_desc = &cfg->l1_desc[idx];
1646 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1647 step = &l1_desc->l2ptr[idx];
1649 /* Simple linear lookup */
1650 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1656 static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1659 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1660 struct arm_smmu_device *smmu = master->smmu;
1662 for (i = 0; i < fwspec->num_ids; ++i) {
1663 u32 sid = fwspec->ids[i];
1664 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1666 /* Bridged PCI devices may end up with duplicated IDs */
1667 for (j = 0; j < i; j++)
1668 if (fwspec->ids[j] == sid)
1673 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1677 static void arm_smmu_detach_dev(struct device *dev)
1679 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
1681 master->ste.assigned = false;
1682 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1685 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1688 struct arm_smmu_device *smmu;
1689 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1690 struct arm_smmu_master_data *master;
1691 struct arm_smmu_strtab_ent *ste;
1693 if (!dev->iommu_fwspec)
1696 master = dev->iommu_fwspec->iommu_priv;
1697 smmu = master->smmu;
1700 /* Already attached to a different domain? */
1702 arm_smmu_detach_dev(dev);
1704 mutex_lock(&smmu_domain->init_mutex);
1706 if (!smmu_domain->smmu) {
1707 smmu_domain->smmu = smmu;
1708 ret = arm_smmu_domain_finalise(domain);
1710 smmu_domain->smmu = NULL;
1713 } else if (smmu_domain->smmu != smmu) {
1715 "cannot attach to SMMU %s (upstream of %s)\n",
1716 dev_name(smmu_domain->smmu->dev),
1717 dev_name(smmu->dev));
1722 ste->assigned = true;
1724 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
1727 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1728 ste->s1_cfg = &smmu_domain->s1_cfg;
1730 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1733 ste->s2_cfg = &smmu_domain->s2_cfg;
1736 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1738 mutex_unlock(&smmu_domain->init_mutex);
1742 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1743 phys_addr_t paddr, size_t size, int prot)
1745 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1750 return ops->map(ops, iova, paddr, size, prot);
1754 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1756 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1761 return ops->unmap(ops, iova, size);
1765 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1767 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1769 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1775 return ops->iova_to_phys(ops, iova);
1778 static struct platform_driver arm_smmu_driver;
1780 static int arm_smmu_match_node(struct device *dev, void *data)
1782 return dev->fwnode == data;
1786 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1788 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1789 fwnode, arm_smmu_match_node);
1791 return dev ? dev_get_drvdata(dev) : NULL;
1794 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1796 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1798 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1799 limit *= 1UL << STRTAB_SPLIT;
1804 static struct iommu_ops arm_smmu_ops;
1806 static int arm_smmu_add_device(struct device *dev)
1809 struct arm_smmu_device *smmu;
1810 struct arm_smmu_master_data *master;
1811 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1812 struct iommu_group *group;
1814 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1817 * We _can_ actually withstand dodgy bus code re-calling add_device()
1818 * without an intervening remove_device()/of_xlate() sequence, but
1819 * we're not going to do so quietly...
1821 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1822 master = fwspec->iommu_priv;
1823 smmu = master->smmu;
1825 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1828 master = kzalloc(sizeof(*master), GFP_KERNEL);
1832 master->smmu = smmu;
1833 fwspec->iommu_priv = master;
1836 /* Check the SIDs are in range of the SMMU and our stream table */
1837 for (i = 0; i < fwspec->num_ids; i++) {
1838 u32 sid = fwspec->ids[i];
1840 if (!arm_smmu_sid_in_range(smmu, sid))
1843 /* Ensure l2 strtab is initialised */
1844 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1845 ret = arm_smmu_init_l2_strtab(smmu, sid);
1851 group = iommu_group_get_for_dev(dev);
1852 if (!IS_ERR(group)) {
1853 iommu_group_put(group);
1854 iommu_device_link(&smmu->iommu, dev);
1857 return PTR_ERR_OR_ZERO(group);
1860 static void arm_smmu_remove_device(struct device *dev)
1862 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1863 struct arm_smmu_master_data *master;
1864 struct arm_smmu_device *smmu;
1866 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1869 master = fwspec->iommu_priv;
1870 smmu = master->smmu;
1871 if (master && master->ste.assigned)
1872 arm_smmu_detach_dev(dev);
1873 iommu_group_remove_device(dev);
1874 iommu_device_unlink(&smmu->iommu, dev);
1876 iommu_fwspec_free(dev);
1879 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1881 struct iommu_group *group;
1884 * We don't support devices sharing stream IDs other than PCI RID
1885 * aliases, since the necessary ID-to-device lookup becomes rather
1886 * impractical given a potential sparse 32-bit stream ID space.
1888 if (dev_is_pci(dev))
1889 group = pci_device_group(dev);
1891 group = generic_device_group(dev);
1896 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1897 enum iommu_attr attr, void *data)
1899 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1901 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1905 case DOMAIN_ATTR_NESTING:
1906 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1913 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1914 enum iommu_attr attr, void *data)
1917 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1919 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1922 mutex_lock(&smmu_domain->init_mutex);
1925 case DOMAIN_ATTR_NESTING:
1926 if (smmu_domain->smmu) {
1932 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1934 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1942 mutex_unlock(&smmu_domain->init_mutex);
1946 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1948 return iommu_fwspec_add_ids(dev, args->args, 1);
1951 static void arm_smmu_get_resv_regions(struct device *dev,
1952 struct list_head *head)
1954 struct iommu_resv_region *region;
1955 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1957 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1958 prot, IOMMU_RESV_SW_MSI);
1962 list_add_tail(®ion->list, head);
1964 iommu_dma_get_resv_regions(dev, head);
1967 static void arm_smmu_put_resv_regions(struct device *dev,
1968 struct list_head *head)
1970 struct iommu_resv_region *entry, *next;
1972 list_for_each_entry_safe(entry, next, head, list)
1976 static struct iommu_ops arm_smmu_ops = {
1977 .capable = arm_smmu_capable,
1978 .domain_alloc = arm_smmu_domain_alloc,
1979 .domain_free = arm_smmu_domain_free,
1980 .attach_dev = arm_smmu_attach_dev,
1981 .map = arm_smmu_map,
1982 .unmap = arm_smmu_unmap,
1983 .map_sg = default_iommu_map_sg,
1984 .iova_to_phys = arm_smmu_iova_to_phys,
1985 .add_device = arm_smmu_add_device,
1986 .remove_device = arm_smmu_remove_device,
1987 .device_group = arm_smmu_device_group,
1988 .domain_get_attr = arm_smmu_domain_get_attr,
1989 .domain_set_attr = arm_smmu_domain_set_attr,
1990 .of_xlate = arm_smmu_of_xlate,
1991 .get_resv_regions = arm_smmu_get_resv_regions,
1992 .put_resv_regions = arm_smmu_put_resv_regions,
1993 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1996 /* Probing and initialisation functions */
1997 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1998 struct arm_smmu_queue *q,
1999 unsigned long prod_off,
2000 unsigned long cons_off,
2003 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
2005 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
2007 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
2012 q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
2013 q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
2014 q->ent_dwords = dwords;
2016 q->q_base = Q_BASE_RWA;
2017 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
2018 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
2019 << Q_BASE_LOG2SIZE_SHIFT;
2021 q->prod = q->cons = 0;
2025 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
2030 spin_lock_init(&smmu->cmdq.lock);
2031 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
2032 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
2037 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
2038 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
2043 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2046 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
2047 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
2050 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2053 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2054 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2055 void *strtab = smmu->strtab_cfg.strtab;
2057 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2058 if (!cfg->l1_desc) {
2059 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2063 for (i = 0; i < cfg->num_l1_ents; ++i) {
2064 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2065 strtab += STRTAB_L1_DESC_DWORDS << 3;
2071 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2076 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2078 /* Calculate the L1 size, capped to the SIDSIZE. */
2079 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2080 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2081 cfg->num_l1_ents = 1 << size;
2083 size += STRTAB_SPLIT;
2084 if (size < smmu->sid_bits)
2086 "2-level strtab only covers %u/%u bits of SID\n",
2087 size, smmu->sid_bits);
2089 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2090 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2091 GFP_KERNEL | __GFP_ZERO);
2094 "failed to allocate l1 stream table (%u bytes)\n",
2098 cfg->strtab = strtab;
2100 /* Configure strtab_base_cfg for 2 levels */
2101 reg = STRTAB_BASE_CFG_FMT_2LVL;
2102 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2103 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2104 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2105 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2106 cfg->strtab_base_cfg = reg;
2108 return arm_smmu_init_l1_strtab(smmu);
2111 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2116 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2118 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2119 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2120 GFP_KERNEL | __GFP_ZERO);
2123 "failed to allocate linear stream table (%u bytes)\n",
2127 cfg->strtab = strtab;
2128 cfg->num_l1_ents = 1 << smmu->sid_bits;
2130 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2131 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2132 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2133 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2134 cfg->strtab_base_cfg = reg;
2136 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2140 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2145 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2146 ret = arm_smmu_init_strtab_2lvl(smmu);
2148 ret = arm_smmu_init_strtab_linear(smmu);
2153 /* Set the strtab base address */
2154 reg = smmu->strtab_cfg.strtab_dma &
2155 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2156 reg |= STRTAB_BASE_RA;
2157 smmu->strtab_cfg.strtab_base = reg;
2159 /* Allocate the first VMID for stage-2 bypass STEs */
2160 set_bit(0, smmu->vmid_map);
2164 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2168 ret = arm_smmu_init_queues(smmu);
2172 return arm_smmu_init_strtab(smmu);
2175 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2176 unsigned int reg_off, unsigned int ack_off)
2180 writel_relaxed(val, smmu->base + reg_off);
2181 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2182 1, ARM_SMMU_POLL_TIMEOUT_US);
2185 /* GBPA is "special" */
2186 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2189 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2191 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2192 1, ARM_SMMU_POLL_TIMEOUT_US);
2198 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2199 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2200 1, ARM_SMMU_POLL_TIMEOUT_US);
2203 static void arm_smmu_free_msis(void *data)
2205 struct device *dev = data;
2206 platform_msi_domain_free_irqs(dev);
2209 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2211 phys_addr_t doorbell;
2212 struct device *dev = msi_desc_to_dev(desc);
2213 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2214 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2216 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2217 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2219 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2220 writel_relaxed(msg->data, smmu->base + cfg[1]);
2221 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2224 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2226 struct msi_desc *desc;
2227 int ret, nvec = ARM_SMMU_MAX_MSIS;
2228 struct device *dev = smmu->dev;
2230 /* Clear the MSI address regs */
2231 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2232 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2234 if (smmu->features & ARM_SMMU_FEAT_PRI)
2235 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2239 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2242 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2243 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2245 dev_warn(dev, "failed to allocate MSIs\n");
2249 for_each_msi_entry(desc, dev) {
2250 switch (desc->platform.msi_index) {
2251 case EVTQ_MSI_INDEX:
2252 smmu->evtq.q.irq = desc->irq;
2254 case GERROR_MSI_INDEX:
2255 smmu->gerr_irq = desc->irq;
2257 case PRIQ_MSI_INDEX:
2258 smmu->priq.q.irq = desc->irq;
2260 default: /* Unknown */
2265 /* Add callback to free MSIs on teardown */
2266 devm_add_action(dev, arm_smmu_free_msis, dev);
2269 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
2273 arm_smmu_setup_msis(smmu);
2275 /* Request interrupt lines */
2276 irq = smmu->evtq.q.irq;
2278 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2279 arm_smmu_evtq_thread,
2281 "arm-smmu-v3-evtq", smmu);
2283 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2286 irq = smmu->cmdq.q.irq;
2288 ret = devm_request_irq(smmu->dev, irq,
2289 arm_smmu_cmdq_sync_handler, 0,
2290 "arm-smmu-v3-cmdq-sync", smmu);
2292 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2295 irq = smmu->gerr_irq;
2297 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2298 0, "arm-smmu-v3-gerror", smmu);
2300 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2303 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2304 irq = smmu->priq.q.irq;
2306 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2307 arm_smmu_priq_thread,
2313 "failed to enable priq irq\n");
2318 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2321 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2323 /* Disable IRQs first */
2324 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2325 ARM_SMMU_IRQ_CTRLACK);
2327 dev_err(smmu->dev, "failed to disable irqs\n");
2331 irq = smmu->combined_irq;
2334 * Cavium ThunderX2 implementation doesn't not support unique
2335 * irq lines. Use single irq line for all the SMMUv3 interrupts.
2337 ret = devm_request_threaded_irq(smmu->dev, irq,
2338 arm_smmu_combined_irq_handler,
2339 arm_smmu_combined_irq_thread,
2341 "arm-smmu-v3-combined-irq", smmu);
2343 dev_warn(smmu->dev, "failed to enable combined irq\n");
2345 arm_smmu_setup_unique_irqs(smmu);
2347 if (smmu->features & ARM_SMMU_FEAT_PRI)
2348 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2350 /* Enable interrupt generation on the SMMU */
2351 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2352 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2354 dev_warn(smmu->dev, "failed to enable irqs\n");
2359 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2363 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2365 dev_err(smmu->dev, "failed to clear cr0\n");
2370 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2374 struct arm_smmu_cmdq_ent cmd;
2376 /* Clear CR0 and sync (disables SMMU and queue processing) */
2377 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2378 if (reg & CR0_SMMUEN)
2379 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2381 ret = arm_smmu_device_disable(smmu);
2385 /* CR1 (table and queue memory attributes) */
2386 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2387 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2388 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2389 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2390 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2391 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2392 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2394 /* CR2 (random crap) */
2395 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2396 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2399 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2400 smmu->base + ARM_SMMU_STRTAB_BASE);
2401 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2402 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2405 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2406 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2407 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2409 enables = CR0_CMDQEN;
2410 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2413 dev_err(smmu->dev, "failed to enable command queue\n");
2417 /* Invalidate any cached configuration */
2418 cmd.opcode = CMDQ_OP_CFGI_ALL;
2419 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2420 cmd.opcode = CMDQ_OP_CMD_SYNC;
2421 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2423 /* Invalidate any stale TLB entries */
2424 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2425 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2426 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2429 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2430 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2431 cmd.opcode = CMDQ_OP_CMD_SYNC;
2432 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2435 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2436 writel_relaxed(smmu->evtq.q.prod,
2437 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
2438 writel_relaxed(smmu->evtq.q.cons,
2439 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
2441 enables |= CR0_EVTQEN;
2442 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2445 dev_err(smmu->dev, "failed to enable event queue\n");
2450 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2451 writeq_relaxed(smmu->priq.q.q_base,
2452 smmu->base + ARM_SMMU_PRIQ_BASE);
2453 writel_relaxed(smmu->priq.q.prod,
2454 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
2455 writel_relaxed(smmu->priq.q.cons,
2456 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
2458 enables |= CR0_PRIQEN;
2459 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2462 dev_err(smmu->dev, "failed to enable PRI queue\n");
2467 ret = arm_smmu_setup_irqs(smmu);
2469 dev_err(smmu->dev, "failed to setup irqs\n");
2474 /* Enable the SMMU interface, or ensure bypass */
2475 if (!bypass || disable_bypass) {
2476 enables |= CR0_SMMUEN;
2478 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2480 dev_err(smmu->dev, "GBPA not responding to update\n");
2484 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2487 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2494 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
2497 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
2500 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2502 /* 2-level structures */
2503 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2504 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2506 if (reg & IDR0_CD2L)
2507 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2510 * Translation table endianness.
2511 * We currently require the same endianness as the CPU, but this
2512 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2514 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2515 case IDR0_TTENDIAN_MIXED:
2516 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2519 case IDR0_TTENDIAN_BE:
2520 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2523 case IDR0_TTENDIAN_LE:
2524 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2528 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2532 /* Boolean feature flags */
2533 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2534 smmu->features |= ARM_SMMU_FEAT_PRI;
2536 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2537 smmu->features |= ARM_SMMU_FEAT_ATS;
2540 smmu->features |= ARM_SMMU_FEAT_SEV;
2543 smmu->features |= ARM_SMMU_FEAT_MSI;
2546 smmu->features |= ARM_SMMU_FEAT_HYP;
2549 * The coherency feature as set by FW is used in preference to the ID
2550 * register, but warn on mismatch.
2552 if (!!(reg & IDR0_COHACC) != coherent)
2553 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2554 coherent ? "true" : "false");
2556 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2557 case IDR0_STALL_MODEL_STALL:
2559 case IDR0_STALL_MODEL_FORCE:
2560 smmu->features |= ARM_SMMU_FEAT_STALLS;
2564 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2567 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2569 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2570 dev_err(smmu->dev, "no translation support!\n");
2574 /* We only support the AArch64 table format at present */
2575 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2576 case IDR0_TTF_AARCH32_64:
2579 case IDR0_TTF_AARCH64:
2582 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2586 /* ASID/VMID sizes */
2587 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2588 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2591 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2592 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2593 dev_err(smmu->dev, "embedded implementation not supported\n");
2597 /* Queue sizes, capped at 4k */
2598 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2599 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2600 if (!smmu->cmdq.q.max_n_shift) {
2601 /* Odd alignment restrictions on the base, so ignore for now */
2602 dev_err(smmu->dev, "unit-length command queue not supported\n");
2606 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2607 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2608 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2609 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2611 /* SID/SSID sizes */
2612 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2613 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2616 * If the SMMU supports fewer bits than would fill a single L2 stream
2617 * table, use a linear table instead.
2619 if (smmu->sid_bits <= STRTAB_SPLIT)
2620 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
2623 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2625 /* Maximum number of outstanding stalls */
2626 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2627 & IDR5_STALL_MAX_MASK;
2630 if (reg & IDR5_GRAN64K)
2631 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2632 if (reg & IDR5_GRAN16K)
2633 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2634 if (reg & IDR5_GRAN4K)
2635 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2637 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2638 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2640 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2642 /* Output address size */
2643 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2644 case IDR5_OAS_32_BIT:
2647 case IDR5_OAS_36_BIT:
2650 case IDR5_OAS_40_BIT:
2653 case IDR5_OAS_42_BIT:
2656 case IDR5_OAS_44_BIT:
2661 "unknown output address size. Truncating to 48-bit\n");
2663 case IDR5_OAS_48_BIT:
2667 /* Set the DMA mask for our table walker */
2668 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2670 "failed to set DMA mask for table walker\n");
2672 smmu->ias = max(smmu->ias, smmu->oas);
2674 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2675 smmu->ias, smmu->oas, smmu->features);
2680 static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
2683 case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
2684 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
2686 case ACPI_IORT_SMMU_HISILICON_HI161X:
2687 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
2691 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
2694 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2695 struct arm_smmu_device *smmu)
2697 struct acpi_iort_smmu_v3 *iort_smmu;
2698 struct device *dev = smmu->dev;
2699 struct acpi_iort_node *node;
2701 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
2703 /* Retrieve SMMUv3 specific data */
2704 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
2706 acpi_smmu_get_options(iort_smmu->model, smmu);
2708 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
2709 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2714 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2715 struct arm_smmu_device *smmu)
2721 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2722 struct arm_smmu_device *smmu)
2724 struct device *dev = &pdev->dev;
2728 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2729 dev_err(dev, "missing #iommu-cells property\n");
2730 else if (cells != 1)
2731 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2735 parse_driver_options(smmu);
2737 if (of_dma_is_coherent(dev->of_node))
2738 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2743 static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
2745 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
2751 static int arm_smmu_device_probe(struct platform_device *pdev)
2754 struct resource *res;
2755 resource_size_t ioaddr;
2756 struct arm_smmu_device *smmu;
2757 struct device *dev = &pdev->dev;
2760 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2762 dev_err(dev, "failed to allocate arm_smmu_device\n");
2768 ret = arm_smmu_device_dt_probe(pdev, smmu);
2770 ret = arm_smmu_device_acpi_probe(pdev, smmu);
2775 /* Set bypass mode according to firmware probing result */
2779 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2780 if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) {
2781 dev_err(dev, "MMIO region too small (%pr)\n", res);
2784 ioaddr = res->start;
2786 smmu->base = devm_ioremap_resource(dev, res);
2787 if (IS_ERR(smmu->base))
2788 return PTR_ERR(smmu->base);
2790 /* Interrupt lines */
2792 irq = platform_get_irq_byname(pdev, "combined");
2794 smmu->combined_irq = irq;
2796 irq = platform_get_irq_byname(pdev, "eventq");
2798 smmu->evtq.q.irq = irq;
2800 irq = platform_get_irq_byname(pdev, "priq");
2802 smmu->priq.q.irq = irq;
2804 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2806 smmu->cmdq.q.irq = irq;
2808 irq = platform_get_irq_byname(pdev, "gerror");
2810 smmu->gerr_irq = irq;
2813 ret = arm_smmu_device_hw_probe(smmu);
2817 /* Initialise in-memory data structures */
2818 ret = arm_smmu_init_structures(smmu);
2822 /* Record our private device structure */
2823 platform_set_drvdata(pdev, smmu);
2825 /* Reset the device */
2826 ret = arm_smmu_device_reset(smmu, bypass);
2830 /* And we're up. Go go go! */
2831 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
2832 "smmu3.%pa", &ioaddr);
2836 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2837 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2839 ret = iommu_device_register(&smmu->iommu);
2841 dev_err(dev, "Failed to register iommu\n");
2846 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2848 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2853 #ifdef CONFIG_ARM_AMBA
2854 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2855 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2860 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2861 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2868 static int arm_smmu_device_remove(struct platform_device *pdev)
2870 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2872 arm_smmu_device_disable(smmu);
2877 static void arm_smmu_device_shutdown(struct platform_device *pdev)
2879 arm_smmu_device_remove(pdev);
2882 static const struct of_device_id arm_smmu_of_match[] = {
2883 { .compatible = "arm,smmu-v3", },
2886 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2888 static struct platform_driver arm_smmu_driver = {
2890 .name = "arm-smmu-v3",
2891 .of_match_table = of_match_ptr(arm_smmu_of_match),
2893 .probe = arm_smmu_device_probe,
2894 .remove = arm_smmu_device_remove,
2895 .shutdown = arm_smmu_device_shutdown,
2897 module_platform_driver(arm_smmu_driver);
2899 IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL);
2901 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2902 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2903 MODULE_LICENSE("GPL v2");