2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/acpi.h>
24 #include <linux/acpi_iort.h>
25 #include <linux/delay.h>
26 #include <linux/dma-iommu.h>
27 #include <linux/err.h>
28 #include <linux/interrupt.h>
29 #include <linux/iommu.h>
30 #include <linux/iopoll.h>
31 #include <linux/module.h>
32 #include <linux/msi.h>
34 #include <linux/of_address.h>
35 #include <linux/of_iommu.h>
36 #include <linux/of_platform.h>
37 #include <linux/pci.h>
38 #include <linux/platform_device.h>
40 #include <linux/amba/bus.h>
42 #include "io-pgtable.h"
45 #define ARM_SMMU_IDR0 0x0
46 #define IDR0_ST_LVL_SHIFT 27
47 #define IDR0_ST_LVL_MASK 0x3
48 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
49 #define IDR0_STALL_MODEL_SHIFT 24
50 #define IDR0_STALL_MODEL_MASK 0x3
51 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
52 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
53 #define IDR0_TTENDIAN_SHIFT 21
54 #define IDR0_TTENDIAN_MASK 0x3
55 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
56 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
57 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
58 #define IDR0_CD2L (1 << 19)
59 #define IDR0_VMID16 (1 << 18)
60 #define IDR0_PRI (1 << 16)
61 #define IDR0_SEV (1 << 14)
62 #define IDR0_MSI (1 << 13)
63 #define IDR0_ASID16 (1 << 12)
64 #define IDR0_ATS (1 << 10)
65 #define IDR0_HYP (1 << 9)
66 #define IDR0_COHACC (1 << 4)
67 #define IDR0_TTF_SHIFT 2
68 #define IDR0_TTF_MASK 0x3
69 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
70 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
71 #define IDR0_S1P (1 << 1)
72 #define IDR0_S2P (1 << 0)
74 #define ARM_SMMU_IDR1 0x4
75 #define IDR1_TABLES_PRESET (1 << 30)
76 #define IDR1_QUEUES_PRESET (1 << 29)
77 #define IDR1_REL (1 << 28)
78 #define IDR1_CMDQ_SHIFT 21
79 #define IDR1_CMDQ_MASK 0x1f
80 #define IDR1_EVTQ_SHIFT 16
81 #define IDR1_EVTQ_MASK 0x1f
82 #define IDR1_PRIQ_SHIFT 11
83 #define IDR1_PRIQ_MASK 0x1f
84 #define IDR1_SSID_SHIFT 6
85 #define IDR1_SSID_MASK 0x1f
86 #define IDR1_SID_SHIFT 0
87 #define IDR1_SID_MASK 0x3f
89 #define ARM_SMMU_IDR5 0x14
90 #define IDR5_STALL_MAX_SHIFT 16
91 #define IDR5_STALL_MAX_MASK 0xffff
92 #define IDR5_GRAN64K (1 << 6)
93 #define IDR5_GRAN16K (1 << 5)
94 #define IDR5_GRAN4K (1 << 4)
95 #define IDR5_OAS_SHIFT 0
96 #define IDR5_OAS_MASK 0x7
97 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
98 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
99 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
100 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
101 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
102 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
104 #define ARM_SMMU_CR0 0x20
105 #define CR0_CMDQEN (1 << 3)
106 #define CR0_EVTQEN (1 << 2)
107 #define CR0_PRIQEN (1 << 1)
108 #define CR0_SMMUEN (1 << 0)
110 #define ARM_SMMU_CR0ACK 0x24
112 #define ARM_SMMU_CR1 0x28
116 #define CR1_CACHE_NC 0
117 #define CR1_CACHE_WB 1
118 #define CR1_CACHE_WT 2
119 #define CR1_TABLE_SH_SHIFT 10
120 #define CR1_TABLE_OC_SHIFT 8
121 #define CR1_TABLE_IC_SHIFT 6
122 #define CR1_QUEUE_SH_SHIFT 4
123 #define CR1_QUEUE_OC_SHIFT 2
124 #define CR1_QUEUE_IC_SHIFT 0
126 #define ARM_SMMU_CR2 0x2c
127 #define CR2_PTM (1 << 2)
128 #define CR2_RECINVSID (1 << 1)
129 #define CR2_E2H (1 << 0)
131 #define ARM_SMMU_GBPA 0x44
132 #define GBPA_ABORT (1 << 20)
133 #define GBPA_UPDATE (1 << 31)
135 #define ARM_SMMU_IRQ_CTRL 0x50
136 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
137 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
138 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
140 #define ARM_SMMU_IRQ_CTRLACK 0x54
142 #define ARM_SMMU_GERROR 0x60
143 #define GERROR_SFM_ERR (1 << 8)
144 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
145 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
146 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
147 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
148 #define GERROR_PRIQ_ABT_ERR (1 << 3)
149 #define GERROR_EVTQ_ABT_ERR (1 << 2)
150 #define GERROR_CMDQ_ERR (1 << 0)
151 #define GERROR_ERR_MASK 0xfd
153 #define ARM_SMMU_GERRORN 0x64
155 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
156 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
157 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
159 #define ARM_SMMU_STRTAB_BASE 0x80
160 #define STRTAB_BASE_RA (1UL << 62)
161 #define STRTAB_BASE_ADDR_SHIFT 6
162 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
164 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
165 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169 #define STRTAB_BASE_CFG_FMT_SHIFT 16
170 #define STRTAB_BASE_CFG_FMT_MASK 0x3
171 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
174 #define ARM_SMMU_CMDQ_BASE 0x90
175 #define ARM_SMMU_CMDQ_PROD 0x98
176 #define ARM_SMMU_CMDQ_CONS 0x9c
178 #define ARM_SMMU_EVTQ_BASE 0xa0
179 #define ARM_SMMU_EVTQ_PROD 0x100a8
180 #define ARM_SMMU_EVTQ_CONS 0x100ac
181 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
185 #define ARM_SMMU_PRIQ_BASE 0xc0
186 #define ARM_SMMU_PRIQ_PROD 0x100c8
187 #define ARM_SMMU_PRIQ_CONS 0x100cc
188 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
192 /* Common MSI config fields */
193 #define MSI_CFG0_ADDR_SHIFT 2
194 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
195 #define MSI_CFG2_SH_SHIFT 4
196 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
197 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
198 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
199 #define MSI_CFG2_MEMATTR_SHIFT 0
200 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
202 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
203 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
204 #define Q_OVERFLOW_FLAG (1 << 31)
205 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
206 #define Q_ENT(q, p) ((q)->base + \
207 Q_IDX(q, p) * (q)->ent_dwords)
209 #define Q_BASE_RWA (1UL << 62)
210 #define Q_BASE_ADDR_SHIFT 5
211 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
212 #define Q_BASE_LOG2SIZE_SHIFT 0
213 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
218 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
219 * 2lvl: 128k L1 entries,
220 * 256 lazy entries per table (each table covers a PCI bus)
222 #define STRTAB_L1_SZ_SHIFT 20
223 #define STRTAB_SPLIT 8
225 #define STRTAB_L1_DESC_DWORDS 1
226 #define STRTAB_L1_DESC_SPAN_SHIFT 0
227 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
228 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
229 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_DWORDS 8
232 #define STRTAB_STE_0_V (1UL << 0)
233 #define STRTAB_STE_0_CFG_SHIFT 1
234 #define STRTAB_STE_0_CFG_MASK 0x7UL
235 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
236 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
237 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
238 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
240 #define STRTAB_STE_0_S1FMT_SHIFT 4
241 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
242 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
243 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
244 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
245 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
247 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
248 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
249 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
250 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
251 #define STRTAB_STE_1_S1C_SH_NSH 0UL
252 #define STRTAB_STE_1_S1C_SH_OSH 2UL
253 #define STRTAB_STE_1_S1C_SH_ISH 3UL
254 #define STRTAB_STE_1_S1CIR_SHIFT 2
255 #define STRTAB_STE_1_S1COR_SHIFT 4
256 #define STRTAB_STE_1_S1CSH_SHIFT 6
258 #define STRTAB_STE_1_S1STALLD (1UL << 27)
260 #define STRTAB_STE_1_EATS_ABT 0UL
261 #define STRTAB_STE_1_EATS_TRANS 1UL
262 #define STRTAB_STE_1_EATS_S1CHK 2UL
263 #define STRTAB_STE_1_EATS_SHIFT 28
265 #define STRTAB_STE_1_STRW_NSEL1 0UL
266 #define STRTAB_STE_1_STRW_EL2 2UL
267 #define STRTAB_STE_1_STRW_SHIFT 30
269 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
270 #define STRTAB_STE_1_SHCFG_SHIFT 44
272 #define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
273 #define STRTAB_STE_1_PRIVCFG_SHIFT 48
275 #define STRTAB_STE_2_S2VMID_SHIFT 0
276 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
277 #define STRTAB_STE_2_VTCR_SHIFT 32
278 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
279 #define STRTAB_STE_2_S2AA64 (1UL << 51)
280 #define STRTAB_STE_2_S2ENDI (1UL << 52)
281 #define STRTAB_STE_2_S2PTW (1UL << 54)
282 #define STRTAB_STE_2_S2R (1UL << 58)
284 #define STRTAB_STE_3_S2TTB_SHIFT 4
285 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
287 /* Context descriptor (stage-1 only) */
288 #define CTXDESC_CD_DWORDS 8
289 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
290 #define ARM64_TCR_T0SZ_SHIFT 0
291 #define ARM64_TCR_T0SZ_MASK 0x1fUL
292 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
293 #define ARM64_TCR_TG0_SHIFT 14
294 #define ARM64_TCR_TG0_MASK 0x3UL
295 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
296 #define ARM64_TCR_IRGN0_SHIFT 8
297 #define ARM64_TCR_IRGN0_MASK 0x3UL
298 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
299 #define ARM64_TCR_ORGN0_SHIFT 10
300 #define ARM64_TCR_ORGN0_MASK 0x3UL
301 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
302 #define ARM64_TCR_SH0_SHIFT 12
303 #define ARM64_TCR_SH0_MASK 0x3UL
304 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
305 #define ARM64_TCR_EPD0_SHIFT 7
306 #define ARM64_TCR_EPD0_MASK 0x1UL
307 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
308 #define ARM64_TCR_EPD1_SHIFT 23
309 #define ARM64_TCR_EPD1_MASK 0x1UL
311 #define CTXDESC_CD_0_ENDI (1UL << 15)
312 #define CTXDESC_CD_0_V (1UL << 31)
314 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
315 #define ARM64_TCR_IPS_SHIFT 32
316 #define ARM64_TCR_IPS_MASK 0x7UL
317 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
318 #define ARM64_TCR_TBI0_SHIFT 37
319 #define ARM64_TCR_TBI0_MASK 0x1UL
321 #define CTXDESC_CD_0_AA64 (1UL << 41)
322 #define CTXDESC_CD_0_R (1UL << 45)
323 #define CTXDESC_CD_0_A (1UL << 46)
324 #define CTXDESC_CD_0_ASET_SHIFT 47
325 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
326 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
327 #define CTXDESC_CD_0_ASID_SHIFT 48
328 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
330 #define CTXDESC_CD_1_TTB0_SHIFT 4
331 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
333 #define CTXDESC_CD_3_MAIR_SHIFT 0
335 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
336 #define ARM_SMMU_TCR2CD(tcr, fld) \
337 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
338 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
341 #define CMDQ_ENT_DWORDS 2
342 #define CMDQ_MAX_SZ_SHIFT 8
344 #define CMDQ_ERR_SHIFT 24
345 #define CMDQ_ERR_MASK 0x7f
346 #define CMDQ_ERR_CERROR_NONE_IDX 0
347 #define CMDQ_ERR_CERROR_ILL_IDX 1
348 #define CMDQ_ERR_CERROR_ABT_IDX 2
350 #define CMDQ_0_OP_SHIFT 0
351 #define CMDQ_0_OP_MASK 0xffUL
352 #define CMDQ_0_SSV (1UL << 11)
354 #define CMDQ_PREFETCH_0_SID_SHIFT 32
355 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
356 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
358 #define CMDQ_CFGI_0_SID_SHIFT 32
359 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
360 #define CMDQ_CFGI_1_LEAF (1UL << 0)
361 #define CMDQ_CFGI_1_RANGE_SHIFT 0
362 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
364 #define CMDQ_TLBI_0_VMID_SHIFT 32
365 #define CMDQ_TLBI_0_ASID_SHIFT 48
366 #define CMDQ_TLBI_1_LEAF (1UL << 0)
367 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
368 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
370 #define CMDQ_PRI_0_SSID_SHIFT 12
371 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
372 #define CMDQ_PRI_0_SID_SHIFT 32
373 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
374 #define CMDQ_PRI_1_GRPID_SHIFT 0
375 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
376 #define CMDQ_PRI_1_RESP_SHIFT 12
377 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
378 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
379 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
381 #define CMDQ_SYNC_0_CS_SHIFT 12
382 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
383 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
386 #define EVTQ_ENT_DWORDS 4
387 #define EVTQ_MAX_SZ_SHIFT 7
389 #define EVTQ_0_ID_SHIFT 0
390 #define EVTQ_0_ID_MASK 0xffUL
393 #define PRIQ_ENT_DWORDS 2
394 #define PRIQ_MAX_SZ_SHIFT 8
396 #define PRIQ_0_SID_SHIFT 0
397 #define PRIQ_0_SID_MASK 0xffffffffUL
398 #define PRIQ_0_SSID_SHIFT 32
399 #define PRIQ_0_SSID_MASK 0xfffffUL
400 #define PRIQ_0_PERM_PRIV (1UL << 58)
401 #define PRIQ_0_PERM_EXEC (1UL << 59)
402 #define PRIQ_0_PERM_READ (1UL << 60)
403 #define PRIQ_0_PERM_WRITE (1UL << 61)
404 #define PRIQ_0_PRG_LAST (1UL << 62)
405 #define PRIQ_0_SSID_V (1UL << 63)
407 #define PRIQ_1_PRG_IDX_SHIFT 0
408 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
409 #define PRIQ_1_ADDR_SHIFT 12
410 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
412 /* High-level queue structures */
413 #define ARM_SMMU_POLL_TIMEOUT_US 100
415 static bool disable_bypass;
416 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
417 MODULE_PARM_DESC(disable_bypass,
418 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
426 enum arm_smmu_msi_index {
433 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
435 ARM_SMMU_EVTQ_IRQ_CFG0,
436 ARM_SMMU_EVTQ_IRQ_CFG1,
437 ARM_SMMU_EVTQ_IRQ_CFG2,
439 [GERROR_MSI_INDEX] = {
440 ARM_SMMU_GERROR_IRQ_CFG0,
441 ARM_SMMU_GERROR_IRQ_CFG1,
442 ARM_SMMU_GERROR_IRQ_CFG2,
445 ARM_SMMU_PRIQ_IRQ_CFG0,
446 ARM_SMMU_PRIQ_IRQ_CFG1,
447 ARM_SMMU_PRIQ_IRQ_CFG2,
451 struct arm_smmu_cmdq_ent {
454 bool substream_valid;
456 /* Command-specific fields */
458 #define CMDQ_OP_PREFETCH_CFG 0x1
465 #define CMDQ_OP_CFGI_STE 0x3
466 #define CMDQ_OP_CFGI_ALL 0x4
475 #define CMDQ_OP_TLBI_NH_ASID 0x11
476 #define CMDQ_OP_TLBI_NH_VA 0x12
477 #define CMDQ_OP_TLBI_EL2_ALL 0x20
478 #define CMDQ_OP_TLBI_S12_VMALL 0x28
479 #define CMDQ_OP_TLBI_S2_IPA 0x2a
480 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
488 #define CMDQ_OP_PRI_RESP 0x41
496 #define CMDQ_OP_CMD_SYNC 0x46
500 struct arm_smmu_queue {
501 int irq; /* Wired interrupt */
512 u32 __iomem *prod_reg;
513 u32 __iomem *cons_reg;
516 struct arm_smmu_cmdq {
517 struct arm_smmu_queue q;
521 struct arm_smmu_evtq {
522 struct arm_smmu_queue q;
526 struct arm_smmu_priq {
527 struct arm_smmu_queue q;
530 /* High-level stream table and context descriptor structures */
531 struct arm_smmu_strtab_l1_desc {
535 dma_addr_t l2ptr_dma;
538 struct arm_smmu_s1_cfg {
540 dma_addr_t cdptr_dma;
542 struct arm_smmu_ctx_desc {
550 struct arm_smmu_s2_cfg {
556 struct arm_smmu_strtab_ent {
559 bool bypass; /* Overrides s1/s2 config */
560 struct arm_smmu_s1_cfg *s1_cfg;
561 struct arm_smmu_s2_cfg *s2_cfg;
564 struct arm_smmu_strtab_cfg {
566 dma_addr_t strtab_dma;
567 struct arm_smmu_strtab_l1_desc *l1_desc;
568 unsigned int num_l1_ents;
574 /* An SMMUv3 instance */
575 struct arm_smmu_device {
579 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
580 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
581 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
582 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
583 #define ARM_SMMU_FEAT_PRI (1 << 4)
584 #define ARM_SMMU_FEAT_ATS (1 << 5)
585 #define ARM_SMMU_FEAT_SEV (1 << 6)
586 #define ARM_SMMU_FEAT_MSI (1 << 7)
587 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
588 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
589 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
590 #define ARM_SMMU_FEAT_STALLS (1 << 11)
591 #define ARM_SMMU_FEAT_HYP (1 << 12)
594 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
597 struct arm_smmu_cmdq cmdq;
598 struct arm_smmu_evtq evtq;
599 struct arm_smmu_priq priq;
603 unsigned long ias; /* IPA */
604 unsigned long oas; /* PA */
605 unsigned long pgsize_bitmap;
607 #define ARM_SMMU_MAX_ASIDS (1 << 16)
608 unsigned int asid_bits;
609 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
611 #define ARM_SMMU_MAX_VMIDS (1 << 16)
612 unsigned int vmid_bits;
613 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
615 unsigned int ssid_bits;
616 unsigned int sid_bits;
618 struct arm_smmu_strtab_cfg strtab_cfg;
620 /* IOMMU core code handle */
621 struct iommu_device iommu;
624 /* SMMU private data for each master */
625 struct arm_smmu_master_data {
626 struct arm_smmu_device *smmu;
627 struct arm_smmu_strtab_ent ste;
630 /* SMMU private data for an IOMMU domain */
631 enum arm_smmu_domain_stage {
632 ARM_SMMU_DOMAIN_S1 = 0,
634 ARM_SMMU_DOMAIN_NESTED,
637 struct arm_smmu_domain {
638 struct arm_smmu_device *smmu;
639 struct mutex init_mutex; /* Protects smmu pointer */
641 struct io_pgtable_ops *pgtbl_ops;
642 spinlock_t pgtbl_lock;
644 enum arm_smmu_domain_stage stage;
646 struct arm_smmu_s1_cfg s1_cfg;
647 struct arm_smmu_s2_cfg s2_cfg;
650 struct iommu_domain domain;
653 struct arm_smmu_option_prop {
658 static struct arm_smmu_option_prop arm_smmu_options[] = {
659 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
663 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
665 return container_of(dom, struct arm_smmu_domain, domain);
668 static void parse_driver_options(struct arm_smmu_device *smmu)
673 if (of_property_read_bool(smmu->dev->of_node,
674 arm_smmu_options[i].prop)) {
675 smmu->options |= arm_smmu_options[i].opt;
676 dev_notice(smmu->dev, "option %s\n",
677 arm_smmu_options[i].prop);
679 } while (arm_smmu_options[++i].opt);
682 /* Low-level queue manipulation functions */
683 static bool queue_full(struct arm_smmu_queue *q)
685 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
686 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
689 static bool queue_empty(struct arm_smmu_queue *q)
691 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
692 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
695 static void queue_sync_cons(struct arm_smmu_queue *q)
697 q->cons = readl_relaxed(q->cons_reg);
700 static void queue_inc_cons(struct arm_smmu_queue *q)
702 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
704 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
705 writel(q->cons, q->cons_reg);
708 static int queue_sync_prod(struct arm_smmu_queue *q)
711 u32 prod = readl_relaxed(q->prod_reg);
713 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
720 static void queue_inc_prod(struct arm_smmu_queue *q)
722 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
724 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
725 writel(q->prod, q->prod_reg);
729 * Wait for the SMMU to consume items. If drain is true, wait until the queue
730 * is empty. Otherwise, wait until there is at least one free slot.
732 static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
734 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
736 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
737 if (ktime_compare(ktime_get(), timeout) > 0)
751 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
755 for (i = 0; i < n_dwords; ++i)
756 *dst++ = cpu_to_le64(*src++);
759 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
764 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
769 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
773 for (i = 0; i < n_dwords; ++i)
774 *dst++ = le64_to_cpu(*src++);
777 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
782 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
787 /* High-level queue accessors */
788 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
790 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
791 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
793 switch (ent->opcode) {
794 case CMDQ_OP_TLBI_EL2_ALL:
795 case CMDQ_OP_TLBI_NSNH_ALL:
797 case CMDQ_OP_PREFETCH_CFG:
798 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
799 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
800 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
802 case CMDQ_OP_CFGI_STE:
803 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
804 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
806 case CMDQ_OP_CFGI_ALL:
807 /* Cover the entire SID range */
808 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
810 case CMDQ_OP_TLBI_NH_VA:
811 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
812 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
813 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
815 case CMDQ_OP_TLBI_S2_IPA:
816 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
817 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
818 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
820 case CMDQ_OP_TLBI_NH_ASID:
821 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
823 case CMDQ_OP_TLBI_S12_VMALL:
824 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
826 case CMDQ_OP_PRI_RESP:
827 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
828 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
829 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
830 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
831 switch (ent->pri.resp) {
833 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
836 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
839 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
845 case CMDQ_OP_CMD_SYNC:
846 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
855 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
857 static const char *cerror_str[] = {
858 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
859 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
860 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
864 u64 cmd[CMDQ_ENT_DWORDS];
865 struct arm_smmu_queue *q = &smmu->cmdq.q;
866 u32 cons = readl_relaxed(q->cons_reg);
867 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
868 struct arm_smmu_cmdq_ent cmd_sync = {
869 .opcode = CMDQ_OP_CMD_SYNC,
872 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
873 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
876 case CMDQ_ERR_CERROR_ABT_IDX:
877 dev_err(smmu->dev, "retrying command fetch\n");
878 case CMDQ_ERR_CERROR_NONE_IDX:
880 case CMDQ_ERR_CERROR_ILL_IDX:
887 * We may have concurrent producers, so we need to be careful
888 * not to touch any of the shadow cmdq state.
890 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
891 dev_err(smmu->dev, "skipping command in error state:\n");
892 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
893 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
895 /* Convert the erroneous command into a CMD_SYNC */
896 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
897 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
901 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
904 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
905 struct arm_smmu_cmdq_ent *ent)
907 u64 cmd[CMDQ_ENT_DWORDS];
909 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
910 struct arm_smmu_queue *q = &smmu->cmdq.q;
912 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
913 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
918 spin_lock_irqsave(&smmu->cmdq.lock, flags);
919 while (queue_insert_raw(q, cmd) == -ENOSPC) {
920 if (queue_poll_cons(q, false, wfe))
921 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
924 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
925 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
926 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
929 /* Context descriptor manipulation functions */
930 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
934 /* Repack the TCR. Just care about TTBR0 for now */
935 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
936 val |= ARM_SMMU_TCR2CD(tcr, TG0);
937 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
938 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
939 val |= ARM_SMMU_TCR2CD(tcr, SH0);
940 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
941 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
942 val |= ARM_SMMU_TCR2CD(tcr, IPS);
943 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
948 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
949 struct arm_smmu_s1_cfg *cfg)
954 * We don't need to issue any invalidation here, as we'll invalidate
955 * the STE when installing the new entry anyway.
957 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
961 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
962 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
964 cfg->cdptr[0] = cpu_to_le64(val);
966 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
967 cfg->cdptr[1] = cpu_to_le64(val);
969 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
972 /* Stream table manipulation functions */
974 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
978 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
979 << STRTAB_L1_DESC_SPAN_SHIFT;
980 val |= desc->l2ptr_dma &
981 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
983 *dst = cpu_to_le64(val);
986 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
988 struct arm_smmu_cmdq_ent cmd = {
989 .opcode = CMDQ_OP_CFGI_STE,
996 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
997 cmd.opcode = CMDQ_OP_CMD_SYNC;
998 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1001 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1002 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1005 * This is hideously complicated, but we only really care about
1006 * three cases at the moment:
1008 * 1. Invalid (all zero) -> bypass (init)
1009 * 2. Bypass -> translation (attach)
1010 * 3. Translation -> bypass (detach)
1012 * Given that we can't update the STE atomically and the SMMU
1013 * doesn't read the thing in a defined order, that leaves us
1014 * with the following maintenance requirements:
1016 * 1. Update Config, return (init time STEs aren't live)
1017 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1018 * 3. Update Config, sync
1020 u64 val = le64_to_cpu(dst[0]);
1021 bool ste_live = false;
1022 struct arm_smmu_cmdq_ent prefetch_cmd = {
1023 .opcode = CMDQ_OP_PREFETCH_CFG,
1029 if (val & STRTAB_STE_0_V) {
1032 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1034 case STRTAB_STE_0_CFG_BYPASS:
1036 case STRTAB_STE_0_CFG_S1_TRANS:
1037 case STRTAB_STE_0_CFG_S2_TRANS:
1040 case STRTAB_STE_0_CFG_ABORT:
1044 BUG(); /* STE corruption */
1048 /* Nuke the existing Config, as we're going to rewrite it */
1049 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1052 val |= STRTAB_STE_0_V;
1054 val &= ~STRTAB_STE_0_V;
1057 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1058 : STRTAB_STE_0_CFG_BYPASS;
1059 dst[0] = cpu_to_le64(val);
1060 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1061 << STRTAB_STE_1_SHCFG_SHIFT);
1062 dst[2] = 0; /* Nuke the VMID */
1064 arm_smmu_sync_ste_for_sid(smmu, sid);
1070 dst[1] = cpu_to_le64(
1071 STRTAB_STE_1_S1C_CACHE_WBRA
1072 << STRTAB_STE_1_S1CIR_SHIFT |
1073 STRTAB_STE_1_S1C_CACHE_WBRA
1074 << STRTAB_STE_1_S1COR_SHIFT |
1075 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1076 #ifdef CONFIG_PCI_ATS
1077 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1079 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
1080 STRTAB_STE_1_PRIVCFG_UNPRIV <<
1081 STRTAB_STE_1_PRIVCFG_SHIFT);
1083 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1084 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1086 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1087 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1088 STRTAB_STE_0_CFG_S1_TRANS;
1094 dst[2] = cpu_to_le64(
1095 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1096 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1097 << STRTAB_STE_2_VTCR_SHIFT |
1099 STRTAB_STE_2_S2ENDI |
1101 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1104 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1105 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1107 val |= STRTAB_STE_0_CFG_S2_TRANS;
1110 arm_smmu_sync_ste_for_sid(smmu, sid);
1111 dst[0] = cpu_to_le64(val);
1112 arm_smmu_sync_ste_for_sid(smmu, sid);
1114 /* It's likely that we'll want to use the new STE soon */
1115 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1116 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1119 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1122 struct arm_smmu_strtab_ent ste = {
1127 for (i = 0; i < nent; ++i) {
1128 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1129 strtab += STRTAB_STE_DWORDS;
1133 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1137 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1138 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1143 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1144 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1146 desc->span = STRTAB_SPLIT + 1;
1147 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1148 GFP_KERNEL | __GFP_ZERO);
1151 "failed to allocate l2 stream table for SID %u\n",
1156 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1157 arm_smmu_write_strtab_l1_desc(strtab, desc);
1161 /* IRQ and event handlers */
1162 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1165 struct arm_smmu_device *smmu = dev;
1166 struct arm_smmu_queue *q = &smmu->evtq.q;
1167 u64 evt[EVTQ_ENT_DWORDS];
1170 while (!queue_remove_raw(q, evt)) {
1171 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1173 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1174 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1175 dev_info(smmu->dev, "\t0x%016llx\n",
1176 (unsigned long long)evt[i]);
1181 * Not much we can do on overflow, so scream and pretend we're
1184 if (queue_sync_prod(q) == -EOVERFLOW)
1185 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1186 } while (!queue_empty(q));
1188 /* Sync our overflow flag, as we believe we're up to speed */
1189 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1193 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1199 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1200 ssv = evt[0] & PRIQ_0_SSID_V;
1201 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1202 last = evt[0] & PRIQ_0_PRG_LAST;
1203 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1205 dev_info(smmu->dev, "unexpected PRI request received:\n");
1207 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1208 sid, ssid, grpid, last ? "L" : "",
1209 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1210 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1211 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1212 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1213 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1216 struct arm_smmu_cmdq_ent cmd = {
1217 .opcode = CMDQ_OP_PRI_RESP,
1218 .substream_valid = ssv,
1223 .resp = PRI_RESP_DENY,
1227 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1231 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1233 struct arm_smmu_device *smmu = dev;
1234 struct arm_smmu_queue *q = &smmu->priq.q;
1235 u64 evt[PRIQ_ENT_DWORDS];
1238 while (!queue_remove_raw(q, evt))
1239 arm_smmu_handle_ppr(smmu, evt);
1241 if (queue_sync_prod(q) == -EOVERFLOW)
1242 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1243 } while (!queue_empty(q));
1245 /* Sync our overflow flag, as we believe we're up to speed */
1246 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1250 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1252 /* We don't actually use CMD_SYNC interrupts for anything */
1256 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1258 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1260 u32 gerror, gerrorn, active;
1261 struct arm_smmu_device *smmu = dev;
1263 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1264 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1266 active = gerror ^ gerrorn;
1267 if (!(active & GERROR_ERR_MASK))
1268 return IRQ_NONE; /* No errors pending */
1271 "unexpected global error reported (0x%08x), this could be serious\n",
1274 if (active & GERROR_SFM_ERR) {
1275 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1276 arm_smmu_device_disable(smmu);
1279 if (active & GERROR_MSI_GERROR_ABT_ERR)
1280 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1282 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1283 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1285 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1286 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1288 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1289 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1290 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1293 if (active & GERROR_PRIQ_ABT_ERR)
1294 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1296 if (active & GERROR_EVTQ_ABT_ERR)
1297 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1299 if (active & GERROR_CMDQ_ERR)
1300 arm_smmu_cmdq_skip_err(smmu);
1302 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1306 /* IO_PGTABLE API */
1307 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1309 struct arm_smmu_cmdq_ent cmd;
1311 cmd.opcode = CMDQ_OP_CMD_SYNC;
1312 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1315 static void arm_smmu_tlb_sync(void *cookie)
1317 struct arm_smmu_domain *smmu_domain = cookie;
1318 __arm_smmu_tlb_sync(smmu_domain->smmu);
1321 static void arm_smmu_tlb_inv_context(void *cookie)
1323 struct arm_smmu_domain *smmu_domain = cookie;
1324 struct arm_smmu_device *smmu = smmu_domain->smmu;
1325 struct arm_smmu_cmdq_ent cmd;
1327 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1328 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1329 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1332 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1333 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1336 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1337 __arm_smmu_tlb_sync(smmu);
1340 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1341 size_t granule, bool leaf, void *cookie)
1343 struct arm_smmu_domain *smmu_domain = cookie;
1344 struct arm_smmu_device *smmu = smmu_domain->smmu;
1345 struct arm_smmu_cmdq_ent cmd = {
1352 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1353 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1354 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1356 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1357 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1361 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1362 cmd.tlbi.addr += granule;
1363 } while (size -= granule);
1366 static const struct iommu_gather_ops arm_smmu_gather_ops = {
1367 .tlb_flush_all = arm_smmu_tlb_inv_context,
1368 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1369 .tlb_sync = arm_smmu_tlb_sync,
1373 static bool arm_smmu_capable(enum iommu_cap cap)
1376 case IOMMU_CAP_CACHE_COHERENCY:
1378 case IOMMU_CAP_INTR_REMAP:
1379 return true; /* MSIs are just memory writes */
1380 case IOMMU_CAP_NOEXEC:
1387 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1389 struct arm_smmu_domain *smmu_domain;
1391 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1395 * Allocate the domain and initialise some of its data structures.
1396 * We can't really do anything meaningful until we've added a
1399 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1403 if (type == IOMMU_DOMAIN_DMA &&
1404 iommu_get_dma_cookie(&smmu_domain->domain)) {
1409 mutex_init(&smmu_domain->init_mutex);
1410 spin_lock_init(&smmu_domain->pgtbl_lock);
1411 return &smmu_domain->domain;
1414 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1416 int idx, size = 1 << span;
1419 idx = find_first_zero_bit(map, size);
1422 } while (test_and_set_bit(idx, map));
1427 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1429 clear_bit(idx, map);
1432 static void arm_smmu_domain_free(struct iommu_domain *domain)
1434 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1435 struct arm_smmu_device *smmu = smmu_domain->smmu;
1437 iommu_put_dma_cookie(domain);
1438 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1440 /* Free the CD and ASID, if we allocated them */
1441 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1442 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1445 dmam_free_coherent(smmu_domain->smmu->dev,
1446 CTXDESC_CD_DWORDS << 3,
1450 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1453 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1455 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1461 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1462 struct io_pgtable_cfg *pgtbl_cfg)
1466 struct arm_smmu_device *smmu = smmu_domain->smmu;
1467 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1469 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1473 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1475 GFP_KERNEL | __GFP_ZERO);
1477 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1482 cfg->cd.asid = (u16)asid;
1483 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1484 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1485 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1489 arm_smmu_bitmap_free(smmu->asid_map, asid);
1493 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1494 struct io_pgtable_cfg *pgtbl_cfg)
1497 struct arm_smmu_device *smmu = smmu_domain->smmu;
1498 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1500 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1504 cfg->vmid = (u16)vmid;
1505 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1506 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1510 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1513 unsigned long ias, oas;
1514 enum io_pgtable_fmt fmt;
1515 struct io_pgtable_cfg pgtbl_cfg;
1516 struct io_pgtable_ops *pgtbl_ops;
1517 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1518 struct io_pgtable_cfg *);
1519 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1520 struct arm_smmu_device *smmu = smmu_domain->smmu;
1522 /* Restrict the stage to what we can actually support */
1523 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1524 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1525 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1526 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1528 switch (smmu_domain->stage) {
1529 case ARM_SMMU_DOMAIN_S1:
1532 fmt = ARM_64_LPAE_S1;
1533 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1535 case ARM_SMMU_DOMAIN_NESTED:
1536 case ARM_SMMU_DOMAIN_S2:
1539 fmt = ARM_64_LPAE_S2;
1540 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1546 pgtbl_cfg = (struct io_pgtable_cfg) {
1547 .pgsize_bitmap = smmu->pgsize_bitmap,
1550 .tlb = &arm_smmu_gather_ops,
1551 .iommu_dev = smmu->dev,
1554 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1558 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1559 domain->geometry.aperture_end = (1UL << ias) - 1;
1560 domain->geometry.force_aperture = true;
1561 smmu_domain->pgtbl_ops = pgtbl_ops;
1563 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1565 free_io_pgtable_ops(pgtbl_ops);
1570 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1573 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1575 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1576 struct arm_smmu_strtab_l1_desc *l1_desc;
1579 /* Two-level walk */
1580 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1581 l1_desc = &cfg->l1_desc[idx];
1582 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1583 step = &l1_desc->l2ptr[idx];
1585 /* Simple linear lookup */
1586 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1592 static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1595 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1596 struct arm_smmu_device *smmu = master->smmu;
1598 for (i = 0; i < fwspec->num_ids; ++i) {
1599 u32 sid = fwspec->ids[i];
1600 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1602 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1608 static void arm_smmu_detach_dev(struct device *dev)
1610 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
1612 master->ste.bypass = true;
1613 if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
1614 dev_warn(dev, "failed to install bypass STE\n");
1617 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1620 struct arm_smmu_device *smmu;
1621 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1622 struct arm_smmu_master_data *master;
1623 struct arm_smmu_strtab_ent *ste;
1625 if (!dev->iommu_fwspec)
1628 master = dev->iommu_fwspec->iommu_priv;
1629 smmu = master->smmu;
1632 /* Already attached to a different domain? */
1634 arm_smmu_detach_dev(dev);
1636 mutex_lock(&smmu_domain->init_mutex);
1638 if (!smmu_domain->smmu) {
1639 smmu_domain->smmu = smmu;
1640 ret = arm_smmu_domain_finalise(domain);
1642 smmu_domain->smmu = NULL;
1645 } else if (smmu_domain->smmu != smmu) {
1647 "cannot attach to SMMU %s (upstream of %s)\n",
1648 dev_name(smmu_domain->smmu->dev),
1649 dev_name(smmu->dev));
1654 ste->bypass = false;
1657 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1658 ste->s1_cfg = &smmu_domain->s1_cfg;
1660 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1663 ste->s2_cfg = &smmu_domain->s2_cfg;
1666 ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1671 mutex_unlock(&smmu_domain->init_mutex);
1675 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1676 phys_addr_t paddr, size_t size, int prot)
1679 unsigned long flags;
1680 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1681 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1686 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1687 ret = ops->map(ops, iova, paddr, size, prot);
1688 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1693 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1696 unsigned long flags;
1697 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1698 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1703 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1704 ret = ops->unmap(ops, iova, size);
1705 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1710 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1713 unsigned long flags;
1714 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1715 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1720 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1721 ret = ops->iova_to_phys(ops, iova);
1722 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1727 static struct platform_driver arm_smmu_driver;
1729 static int arm_smmu_match_node(struct device *dev, void *data)
1731 return dev->fwnode == data;
1735 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1737 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1738 fwnode, arm_smmu_match_node);
1740 return dev ? dev_get_drvdata(dev) : NULL;
1743 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1745 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1747 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1748 limit *= 1UL << STRTAB_SPLIT;
1753 static struct iommu_ops arm_smmu_ops;
1755 static int arm_smmu_add_device(struct device *dev)
1758 struct arm_smmu_device *smmu;
1759 struct arm_smmu_master_data *master;
1760 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1761 struct iommu_group *group;
1763 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1766 * We _can_ actually withstand dodgy bus code re-calling add_device()
1767 * without an intervening remove_device()/of_xlate() sequence, but
1768 * we're not going to do so quietly...
1770 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1771 master = fwspec->iommu_priv;
1772 smmu = master->smmu;
1774 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1777 master = kzalloc(sizeof(*master), GFP_KERNEL);
1781 master->smmu = smmu;
1782 fwspec->iommu_priv = master;
1785 /* Check the SIDs are in range of the SMMU and our stream table */
1786 for (i = 0; i < fwspec->num_ids; i++) {
1787 u32 sid = fwspec->ids[i];
1789 if (!arm_smmu_sid_in_range(smmu, sid))
1792 /* Ensure l2 strtab is initialised */
1793 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1794 ret = arm_smmu_init_l2_strtab(smmu, sid);
1800 group = iommu_group_get_for_dev(dev);
1801 if (!IS_ERR(group)) {
1802 iommu_group_put(group);
1803 iommu_device_link(&smmu->iommu, dev);
1806 return PTR_ERR_OR_ZERO(group);
1809 static void arm_smmu_remove_device(struct device *dev)
1811 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1812 struct arm_smmu_master_data *master;
1813 struct arm_smmu_device *smmu;
1815 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1818 master = fwspec->iommu_priv;
1819 smmu = master->smmu;
1820 if (master && master->ste.valid)
1821 arm_smmu_detach_dev(dev);
1822 iommu_group_remove_device(dev);
1823 iommu_device_unlink(&smmu->iommu, dev);
1825 iommu_fwspec_free(dev);
1828 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1830 struct iommu_group *group;
1833 * We don't support devices sharing stream IDs other than PCI RID
1834 * aliases, since the necessary ID-to-device lookup becomes rather
1835 * impractical given a potential sparse 32-bit stream ID space.
1837 if (dev_is_pci(dev))
1838 group = pci_device_group(dev);
1840 group = generic_device_group(dev);
1845 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1846 enum iommu_attr attr, void *data)
1848 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1851 case DOMAIN_ATTR_NESTING:
1852 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1859 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1860 enum iommu_attr attr, void *data)
1863 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1865 mutex_lock(&smmu_domain->init_mutex);
1868 case DOMAIN_ATTR_NESTING:
1869 if (smmu_domain->smmu) {
1875 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1877 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1885 mutex_unlock(&smmu_domain->init_mutex);
1889 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1891 return iommu_fwspec_add_ids(dev, args->args, 1);
1894 static struct iommu_ops arm_smmu_ops = {
1895 .capable = arm_smmu_capable,
1896 .domain_alloc = arm_smmu_domain_alloc,
1897 .domain_free = arm_smmu_domain_free,
1898 .attach_dev = arm_smmu_attach_dev,
1899 .map = arm_smmu_map,
1900 .unmap = arm_smmu_unmap,
1901 .map_sg = default_iommu_map_sg,
1902 .iova_to_phys = arm_smmu_iova_to_phys,
1903 .add_device = arm_smmu_add_device,
1904 .remove_device = arm_smmu_remove_device,
1905 .device_group = arm_smmu_device_group,
1906 .domain_get_attr = arm_smmu_domain_get_attr,
1907 .domain_set_attr = arm_smmu_domain_set_attr,
1908 .of_xlate = arm_smmu_of_xlate,
1909 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1912 /* Probing and initialisation functions */
1913 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1914 struct arm_smmu_queue *q,
1915 unsigned long prod_off,
1916 unsigned long cons_off,
1919 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1921 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1923 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1928 q->prod_reg = smmu->base + prod_off;
1929 q->cons_reg = smmu->base + cons_off;
1930 q->ent_dwords = dwords;
1932 q->q_base = Q_BASE_RWA;
1933 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1934 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1935 << Q_BASE_LOG2SIZE_SHIFT;
1937 q->prod = q->cons = 0;
1941 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1946 spin_lock_init(&smmu->cmdq.lock);
1947 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1948 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1953 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1954 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1959 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1962 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1963 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
1966 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
1969 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1970 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
1971 void *strtab = smmu->strtab_cfg.strtab;
1973 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
1974 if (!cfg->l1_desc) {
1975 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
1979 for (i = 0; i < cfg->num_l1_ents; ++i) {
1980 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
1981 strtab += STRTAB_L1_DESC_DWORDS << 3;
1987 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
1992 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1995 * If we can resolve everything with a single L2 table, then we
1996 * just need a single L1 descriptor. Otherwise, calculate the L1
1997 * size, capped to the SIDSIZE.
1999 if (smmu->sid_bits < STRTAB_SPLIT) {
2002 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2003 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2005 cfg->num_l1_ents = 1 << size;
2007 size += STRTAB_SPLIT;
2008 if (size < smmu->sid_bits)
2010 "2-level strtab only covers %u/%u bits of SID\n",
2011 size, smmu->sid_bits);
2013 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2014 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2015 GFP_KERNEL | __GFP_ZERO);
2018 "failed to allocate l1 stream table (%u bytes)\n",
2022 cfg->strtab = strtab;
2024 /* Configure strtab_base_cfg for 2 levels */
2025 reg = STRTAB_BASE_CFG_FMT_2LVL;
2026 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2027 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2028 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2029 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2030 cfg->strtab_base_cfg = reg;
2032 return arm_smmu_init_l1_strtab(smmu);
2035 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2040 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2042 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2043 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2044 GFP_KERNEL | __GFP_ZERO);
2047 "failed to allocate linear stream table (%u bytes)\n",
2051 cfg->strtab = strtab;
2052 cfg->num_l1_ents = 1 << smmu->sid_bits;
2054 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2055 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2056 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2057 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2058 cfg->strtab_base_cfg = reg;
2060 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2064 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2069 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2070 ret = arm_smmu_init_strtab_2lvl(smmu);
2072 ret = arm_smmu_init_strtab_linear(smmu);
2077 /* Set the strtab base address */
2078 reg = smmu->strtab_cfg.strtab_dma &
2079 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2080 reg |= STRTAB_BASE_RA;
2081 smmu->strtab_cfg.strtab_base = reg;
2083 /* Allocate the first VMID for stage-2 bypass STEs */
2084 set_bit(0, smmu->vmid_map);
2088 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2092 ret = arm_smmu_init_queues(smmu);
2096 return arm_smmu_init_strtab(smmu);
2099 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2100 unsigned int reg_off, unsigned int ack_off)
2104 writel_relaxed(val, smmu->base + reg_off);
2105 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2106 1, ARM_SMMU_POLL_TIMEOUT_US);
2109 /* GBPA is "special" */
2110 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2113 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2115 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2116 1, ARM_SMMU_POLL_TIMEOUT_US);
2122 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2123 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2124 1, ARM_SMMU_POLL_TIMEOUT_US);
2127 static void arm_smmu_free_msis(void *data)
2129 struct device *dev = data;
2130 platform_msi_domain_free_irqs(dev);
2133 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2135 phys_addr_t doorbell;
2136 struct device *dev = msi_desc_to_dev(desc);
2137 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2138 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2140 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2141 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2143 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2144 writel_relaxed(msg->data, smmu->base + cfg[1]);
2145 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2148 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2150 struct msi_desc *desc;
2151 int ret, nvec = ARM_SMMU_MAX_MSIS;
2152 struct device *dev = smmu->dev;
2154 /* Clear the MSI address regs */
2155 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2156 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2158 if (smmu->features & ARM_SMMU_FEAT_PRI)
2159 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2163 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2166 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2167 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2169 dev_warn(dev, "failed to allocate MSIs\n");
2173 for_each_msi_entry(desc, dev) {
2174 switch (desc->platform.msi_index) {
2175 case EVTQ_MSI_INDEX:
2176 smmu->evtq.q.irq = desc->irq;
2178 case GERROR_MSI_INDEX:
2179 smmu->gerr_irq = desc->irq;
2181 case PRIQ_MSI_INDEX:
2182 smmu->priq.q.irq = desc->irq;
2184 default: /* Unknown */
2189 /* Add callback to free MSIs on teardown */
2190 devm_add_action(dev, arm_smmu_free_msis, dev);
2193 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2196 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2198 /* Disable IRQs first */
2199 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2200 ARM_SMMU_IRQ_CTRLACK);
2202 dev_err(smmu->dev, "failed to disable irqs\n");
2206 arm_smmu_setup_msis(smmu);
2208 /* Request interrupt lines */
2209 irq = smmu->evtq.q.irq;
2211 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2212 arm_smmu_evtq_thread,
2214 "arm-smmu-v3-evtq", smmu);
2216 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2219 irq = smmu->cmdq.q.irq;
2221 ret = devm_request_irq(smmu->dev, irq,
2222 arm_smmu_cmdq_sync_handler, 0,
2223 "arm-smmu-v3-cmdq-sync", smmu);
2225 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2228 irq = smmu->gerr_irq;
2230 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2231 0, "arm-smmu-v3-gerror", smmu);
2233 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2236 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2237 irq = smmu->priq.q.irq;
2239 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2240 arm_smmu_priq_thread,
2246 "failed to enable priq irq\n");
2248 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2252 /* Enable interrupt generation on the SMMU */
2253 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2254 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2256 dev_warn(smmu->dev, "failed to enable irqs\n");
2261 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2265 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2267 dev_err(smmu->dev, "failed to clear cr0\n");
2272 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2276 struct arm_smmu_cmdq_ent cmd;
2278 /* Clear CR0 and sync (disables SMMU and queue processing) */
2279 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2280 if (reg & CR0_SMMUEN)
2281 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2283 ret = arm_smmu_device_disable(smmu);
2287 /* CR1 (table and queue memory attributes) */
2288 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2289 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2290 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2291 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2292 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2293 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2294 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2296 /* CR2 (random crap) */
2297 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2298 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2301 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2302 smmu->base + ARM_SMMU_STRTAB_BASE);
2303 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2304 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2307 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2308 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2309 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2311 enables = CR0_CMDQEN;
2312 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2315 dev_err(smmu->dev, "failed to enable command queue\n");
2319 /* Invalidate any cached configuration */
2320 cmd.opcode = CMDQ_OP_CFGI_ALL;
2321 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2322 cmd.opcode = CMDQ_OP_CMD_SYNC;
2323 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2325 /* Invalidate any stale TLB entries */
2326 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2327 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2328 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2331 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2332 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2333 cmd.opcode = CMDQ_OP_CMD_SYNC;
2334 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2337 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2338 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2339 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2341 enables |= CR0_EVTQEN;
2342 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2345 dev_err(smmu->dev, "failed to enable event queue\n");
2350 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2351 writeq_relaxed(smmu->priq.q.q_base,
2352 smmu->base + ARM_SMMU_PRIQ_BASE);
2353 writel_relaxed(smmu->priq.q.prod,
2354 smmu->base + ARM_SMMU_PRIQ_PROD);
2355 writel_relaxed(smmu->priq.q.cons,
2356 smmu->base + ARM_SMMU_PRIQ_CONS);
2358 enables |= CR0_PRIQEN;
2359 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2362 dev_err(smmu->dev, "failed to enable PRI queue\n");
2367 ret = arm_smmu_setup_irqs(smmu);
2369 dev_err(smmu->dev, "failed to setup irqs\n");
2374 /* Enable the SMMU interface, or ensure bypass */
2375 if (!bypass || disable_bypass) {
2376 enables |= CR0_SMMUEN;
2378 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2380 dev_err(smmu->dev, "GBPA not responding to update\n");
2384 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2387 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2394 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
2397 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
2400 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2402 /* 2-level structures */
2403 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2404 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2406 if (reg & IDR0_CD2L)
2407 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2410 * Translation table endianness.
2411 * We currently require the same endianness as the CPU, but this
2412 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2414 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2415 case IDR0_TTENDIAN_MIXED:
2416 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2419 case IDR0_TTENDIAN_BE:
2420 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2423 case IDR0_TTENDIAN_LE:
2424 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2428 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2432 /* Boolean feature flags */
2433 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2434 smmu->features |= ARM_SMMU_FEAT_PRI;
2436 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2437 smmu->features |= ARM_SMMU_FEAT_ATS;
2440 smmu->features |= ARM_SMMU_FEAT_SEV;
2443 smmu->features |= ARM_SMMU_FEAT_MSI;
2446 smmu->features |= ARM_SMMU_FEAT_HYP;
2449 * The coherency feature as set by FW is used in preference to the ID
2450 * register, but warn on mismatch.
2452 if (!!(reg & IDR0_COHACC) != coherent)
2453 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2454 coherent ? "true" : "false");
2456 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2457 case IDR0_STALL_MODEL_STALL:
2459 case IDR0_STALL_MODEL_FORCE:
2460 smmu->features |= ARM_SMMU_FEAT_STALLS;
2464 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2467 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2469 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2470 dev_err(smmu->dev, "no translation support!\n");
2474 /* We only support the AArch64 table format at present */
2475 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2476 case IDR0_TTF_AARCH32_64:
2479 case IDR0_TTF_AARCH64:
2482 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2486 /* ASID/VMID sizes */
2487 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2488 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2491 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2492 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2493 dev_err(smmu->dev, "embedded implementation not supported\n");
2497 /* Queue sizes, capped at 4k */
2498 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2499 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2500 if (!smmu->cmdq.q.max_n_shift) {
2501 /* Odd alignment restrictions on the base, so ignore for now */
2502 dev_err(smmu->dev, "unit-length command queue not supported\n");
2506 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2507 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2508 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2509 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2511 /* SID/SSID sizes */
2512 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2513 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2516 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2518 /* Maximum number of outstanding stalls */
2519 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2520 & IDR5_STALL_MAX_MASK;
2523 if (reg & IDR5_GRAN64K)
2524 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2525 if (reg & IDR5_GRAN16K)
2526 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2527 if (reg & IDR5_GRAN4K)
2528 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2530 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2531 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2533 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2535 /* Output address size */
2536 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2537 case IDR5_OAS_32_BIT:
2540 case IDR5_OAS_36_BIT:
2543 case IDR5_OAS_40_BIT:
2546 case IDR5_OAS_42_BIT:
2549 case IDR5_OAS_44_BIT:
2554 "unknown output address size. Truncating to 48-bit\n");
2556 case IDR5_OAS_48_BIT:
2560 /* Set the DMA mask for our table walker */
2561 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2563 "failed to set DMA mask for table walker\n");
2565 smmu->ias = max(smmu->ias, smmu->oas);
2567 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2568 smmu->ias, smmu->oas, smmu->features);
2573 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2574 struct arm_smmu_device *smmu)
2576 struct acpi_iort_smmu_v3 *iort_smmu;
2577 struct device *dev = smmu->dev;
2578 struct acpi_iort_node *node;
2580 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
2582 /* Retrieve SMMUv3 specific data */
2583 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
2585 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
2586 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2591 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2592 struct arm_smmu_device *smmu)
2598 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2599 struct arm_smmu_device *smmu)
2601 struct device *dev = &pdev->dev;
2605 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2606 dev_err(dev, "missing #iommu-cells property\n");
2607 else if (cells != 1)
2608 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2612 parse_driver_options(smmu);
2614 if (of_dma_is_coherent(dev->of_node))
2615 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2620 static int arm_smmu_device_probe(struct platform_device *pdev)
2623 struct resource *res;
2624 resource_size_t ioaddr;
2625 struct arm_smmu_device *smmu;
2626 struct device *dev = &pdev->dev;
2629 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2631 dev_err(dev, "failed to allocate arm_smmu_device\n");
2637 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2638 if (resource_size(res) + 1 < SZ_128K) {
2639 dev_err(dev, "MMIO region too small (%pr)\n", res);
2642 ioaddr = res->start;
2644 smmu->base = devm_ioremap_resource(dev, res);
2645 if (IS_ERR(smmu->base))
2646 return PTR_ERR(smmu->base);
2648 /* Interrupt lines */
2649 irq = platform_get_irq_byname(pdev, "eventq");
2651 smmu->evtq.q.irq = irq;
2653 irq = platform_get_irq_byname(pdev, "priq");
2655 smmu->priq.q.irq = irq;
2657 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2659 smmu->cmdq.q.irq = irq;
2661 irq = platform_get_irq_byname(pdev, "gerror");
2663 smmu->gerr_irq = irq;
2666 ret = arm_smmu_device_dt_probe(pdev, smmu);
2668 ret = arm_smmu_device_acpi_probe(pdev, smmu);
2673 /* Set bypass mode according to firmware probing result */
2677 ret = arm_smmu_device_hw_probe(smmu);
2681 /* Initialise in-memory data structures */
2682 ret = arm_smmu_init_structures(smmu);
2686 /* Record our private device structure */
2687 platform_set_drvdata(pdev, smmu);
2689 /* Reset the device */
2690 ret = arm_smmu_device_reset(smmu, bypass);
2694 /* And we're up. Go go go! */
2695 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
2696 "smmu3.%pa", &ioaddr);
2700 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2701 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2703 ret = iommu_device_register(&smmu->iommu);
2706 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2708 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2713 #ifdef CONFIG_ARM_AMBA
2714 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2715 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2720 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2721 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2728 static int arm_smmu_device_remove(struct platform_device *pdev)
2730 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2732 arm_smmu_device_disable(smmu);
2736 static struct of_device_id arm_smmu_of_match[] = {
2737 { .compatible = "arm,smmu-v3", },
2740 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2742 static struct platform_driver arm_smmu_driver = {
2744 .name = "arm-smmu-v3",
2745 .of_match_table = of_match_ptr(arm_smmu_of_match),
2747 .probe = arm_smmu_device_probe,
2748 .remove = arm_smmu_device_remove,
2751 static int __init arm_smmu_init(void)
2753 static bool registered;
2757 ret = platform_driver_register(&arm_smmu_driver);
2763 static void __exit arm_smmu_exit(void)
2765 return platform_driver_unregister(&arm_smmu_driver);
2768 subsys_initcall(arm_smmu_init);
2769 module_exit(arm_smmu_exit);
2771 static int __init arm_smmu_of_init(struct device_node *np)
2773 int ret = arm_smmu_init();
2778 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2783 IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
2786 static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
2788 if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
2789 return arm_smmu_init();
2793 IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
2796 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2797 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2798 MODULE_LICENSE("GPL v2");