iommu/arm-smmu: Add global/context fault implementation hooks
[linux-2.6-microblaze.git] / drivers / iommu / arm-smmu-impl.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Miscellaneous Arm SMMU implementation and integration quirks
3 // Copyright (C) 2019 Arm Limited
4
5 #define pr_fmt(fmt) "arm-smmu: " fmt
6
7 #include <linux/bitfield.h>
8 #include <linux/of.h>
9
10 #include "arm-smmu.h"
11
12
13 static int arm_smmu_gr0_ns(int offset)
14 {
15         switch(offset) {
16         case ARM_SMMU_GR0_sCR0:
17         case ARM_SMMU_GR0_sACR:
18         case ARM_SMMU_GR0_sGFSR:
19         case ARM_SMMU_GR0_sGFSYNR0:
20         case ARM_SMMU_GR0_sGFSYNR1:
21         case ARM_SMMU_GR0_sGFSYNR2:
22                 return offset + 0x400;
23         default:
24                 return offset;
25         }
26 }
27
28 static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
29                             int offset)
30 {
31         if (page == ARM_SMMU_GR0)
32                 offset = arm_smmu_gr0_ns(offset);
33         return readl_relaxed(arm_smmu_page(smmu, page) + offset);
34 }
35
36 static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
37                               int offset, u32 val)
38 {
39         if (page == ARM_SMMU_GR0)
40                 offset = arm_smmu_gr0_ns(offset);
41         writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
42 }
43
44 /* Since we don't care for sGFAR, we can do without 64-bit accessors */
45 static const struct arm_smmu_impl calxeda_impl = {
46         .read_reg = arm_smmu_read_ns,
47         .write_reg = arm_smmu_write_ns,
48 };
49
50
51 struct cavium_smmu {
52         struct arm_smmu_device smmu;
53         u32 id_base;
54 };
55
56 static int cavium_cfg_probe(struct arm_smmu_device *smmu)
57 {
58         static atomic_t context_count = ATOMIC_INIT(0);
59         struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
60         /*
61          * Cavium CN88xx erratum #27704.
62          * Ensure ASID and VMID allocation is unique across all SMMUs in
63          * the system.
64          */
65         cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
66         dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
67
68         return 0;
69 }
70
71 static int cavium_init_context(struct arm_smmu_domain *smmu_domain)
72 {
73         struct cavium_smmu *cs = container_of(smmu_domain->smmu,
74                                               struct cavium_smmu, smmu);
75
76         if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
77                 smmu_domain->cfg.vmid += cs->id_base;
78         else
79                 smmu_domain->cfg.asid += cs->id_base;
80
81         return 0;
82 }
83
84 static const struct arm_smmu_impl cavium_impl = {
85         .cfg_probe = cavium_cfg_probe,
86         .init_context = cavium_init_context,
87 };
88
89 static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
90 {
91         struct cavium_smmu *cs;
92
93         cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
94         if (!cs)
95                 return ERR_PTR(-ENOMEM);
96
97         cs->smmu = *smmu;
98         cs->smmu.impl = &cavium_impl;
99
100         devm_kfree(smmu->dev, smmu);
101
102         return &cs->smmu;
103 }
104
105
106 #define ARM_MMU500_ACTLR_CPRE           (1 << 1)
107
108 #define ARM_MMU500_ACR_CACHE_LOCK       (1 << 26)
109 #define ARM_MMU500_ACR_S2CRB_TLBEN      (1 << 10)
110 #define ARM_MMU500_ACR_SMTNMB_TLBEN     (1 << 8)
111
112 int arm_mmu500_reset(struct arm_smmu_device *smmu)
113 {
114         u32 reg, major;
115         int i;
116         /*
117          * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
118          * writes to the context bank ACTLRs will stick. And we just hope that
119          * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
120          */
121         reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
122         major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
123         reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
124         if (major >= 2)
125                 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
126         /*
127          * Allow unmatched Stream IDs to allocate bypass
128          * TLB entries for reduced latency.
129          */
130         reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
131         arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
132
133         /*
134          * Disable MMU-500's not-particularly-beneficial next-page
135          * prefetcher for the sake of errata #841119 and #826419.
136          */
137         for (i = 0; i < smmu->num_context_banks; ++i) {
138                 reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
139                 reg &= ~ARM_MMU500_ACTLR_CPRE;
140                 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
141         }
142
143         return 0;
144 }
145
146 static const struct arm_smmu_impl arm_mmu500_impl = {
147         .reset = arm_mmu500_reset,
148 };
149
150 static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
151 {
152         /*
153          * Marvell Armada-AP806 erratum #582743.
154          * Split all the readq to double readl
155          */
156         return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
157 }
158
159 static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
160                                u64 val)
161 {
162         /*
163          * Marvell Armada-AP806 erratum #582743.
164          * Split all the writeq to double writel
165          */
166         hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
167 }
168
169 static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
170 {
171
172         /*
173          * Armada-AP806 erratum #582743.
174          * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
175          * formats altogether and allow using 32 bits access on the
176          * interconnect.
177          */
178         smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
179                             ARM_SMMU_FEAT_FMT_AARCH64_16K |
180                             ARM_SMMU_FEAT_FMT_AARCH64_64K);
181
182         return 0;
183 }
184
185 static const struct arm_smmu_impl mrvl_mmu500_impl = {
186         .read_reg64 = mrvl_mmu500_readq,
187         .write_reg64 = mrvl_mmu500_writeq,
188         .cfg_probe = mrvl_mmu500_cfg_probe,
189         .reset = arm_mmu500_reset,
190 };
191
192
193 struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
194 {
195         const struct device_node *np = smmu->dev->of_node;
196
197         /*
198          * Set the impl for model-specific implementation quirks first,
199          * such that platform integration quirks can pick it up and
200          * inherit from it if necessary.
201          */
202         switch (smmu->model) {
203         case ARM_MMU500:
204                 smmu->impl = &arm_mmu500_impl;
205                 break;
206         case CAVIUM_SMMUV2:
207                 return cavium_smmu_impl_init(smmu);
208         default:
209                 break;
210         }
211
212         /* This is implicitly MMU-400 */
213         if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
214                 smmu->impl = &calxeda_impl;
215
216         if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
217                 return nvidia_smmu_impl_init(smmu);
218
219         if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
220             of_device_is_compatible(np, "qcom,sc7180-smmu-500") ||
221             of_device_is_compatible(np, "qcom,sm8150-smmu-500") ||
222             of_device_is_compatible(np, "qcom,sm8250-smmu-500"))
223                 return qcom_smmu_impl_init(smmu);
224
225         if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
226                 smmu->impl = &mrvl_mmu500_impl;
227
228         return smmu;
229 }