2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/atomic.h>
24 #include <linux/bitops.h>
25 #include <linux/iommu.h>
26 #include <linux/kernel.h>
27 #include <linux/sizes.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/dma-mapping.h>
32 #include <asm/barrier.h>
34 #include "io-pgtable.h"
36 #define ARM_LPAE_MAX_ADDR_BITS 52
37 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
38 #define ARM_LPAE_MAX_LEVELS 4
40 /* Struct accessors */
41 #define io_pgtable_to_data(x) \
42 container_of((x), struct arm_lpae_io_pgtable, iop)
44 #define io_pgtable_ops_to_data(x) \
45 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
48 * For consistency with the architecture, we always consider
49 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
51 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
54 * Calculate the right shift amount to get to the portion describing level l
55 * in a virtual address mapped by the pagetable in d.
57 #define ARM_LPAE_LVL_SHIFT(l,d) \
58 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
59 * (d)->bits_per_level) + (d)->pg_shift)
61 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
63 #define ARM_LPAE_PAGES_PER_PGD(d) \
64 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
67 * Calculate the index at level l used to map virtual address a using the
70 #define ARM_LPAE_PGD_IDX(l,d) \
71 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
73 #define ARM_LPAE_LVL_IDX(a,l,d) \
74 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
75 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
77 /* Calculate the block/page mapping size at level l for pagetable in d. */
78 #define ARM_LPAE_BLOCK_SIZE(l,d) \
79 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
80 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
83 #define ARM_LPAE_PTE_TYPE_SHIFT 0
84 #define ARM_LPAE_PTE_TYPE_MASK 0x3
86 #define ARM_LPAE_PTE_TYPE_BLOCK 1
87 #define ARM_LPAE_PTE_TYPE_TABLE 3
88 #define ARM_LPAE_PTE_TYPE_PAGE 3
90 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
92 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
93 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
94 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
95 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
96 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
97 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
98 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
99 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
101 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
102 /* Ignore the contiguous bit for block splitting */
103 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
104 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
105 ARM_LPAE_PTE_ATTR_HI_MASK)
106 /* Software bit for solving coherency races */
107 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
110 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
111 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
112 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
113 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
116 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
117 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
118 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
119 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
120 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
121 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
124 #define ARM_32_LPAE_TCR_EAE (1 << 31)
125 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
127 #define ARM_LPAE_TCR_EPD1 (1 << 23)
129 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
130 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
131 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
133 #define ARM_LPAE_TCR_SH0_SHIFT 12
134 #define ARM_LPAE_TCR_SH0_MASK 0x3
135 #define ARM_LPAE_TCR_SH_NS 0
136 #define ARM_LPAE_TCR_SH_OS 2
137 #define ARM_LPAE_TCR_SH_IS 3
139 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
140 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
141 #define ARM_LPAE_TCR_RGN_MASK 0x3
142 #define ARM_LPAE_TCR_RGN_NC 0
143 #define ARM_LPAE_TCR_RGN_WBWA 1
144 #define ARM_LPAE_TCR_RGN_WT 2
145 #define ARM_LPAE_TCR_RGN_WB 3
147 #define ARM_LPAE_TCR_SL0_SHIFT 6
148 #define ARM_LPAE_TCR_SL0_MASK 0x3
150 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
151 #define ARM_LPAE_TCR_SZ_MASK 0xf
153 #define ARM_LPAE_TCR_PS_SHIFT 16
154 #define ARM_LPAE_TCR_PS_MASK 0x7
156 #define ARM_LPAE_TCR_IPS_SHIFT 32
157 #define ARM_LPAE_TCR_IPS_MASK 0x7
159 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
160 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
161 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
162 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
163 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
164 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
165 #define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
167 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
168 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
169 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
170 #define ARM_LPAE_MAIR_ATTR_NC 0x44
171 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
172 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
173 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
174 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
176 /* IOPTE accessors */
177 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
179 #define iopte_type(pte,l) \
180 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
182 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
184 #define iopte_leaf(pte,l) \
185 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
186 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
187 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
189 struct arm_lpae_io_pgtable {
190 struct io_pgtable iop;
194 unsigned long pg_shift;
195 unsigned long bits_per_level;
200 typedef u64 arm_lpae_iopte;
202 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
203 struct arm_lpae_io_pgtable *data)
205 arm_lpae_iopte pte = paddr;
207 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
208 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
211 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
212 struct arm_lpae_io_pgtable *data)
214 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
216 if (data->pg_shift < 16)
219 /* Rotate the packed high-order bits back to the top */
220 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
223 static bool selftest_running = false;
225 static dma_addr_t __arm_lpae_dma_addr(void *pages)
227 return (dma_addr_t)virt_to_phys(pages);
230 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
231 struct io_pgtable_cfg *cfg)
233 struct device *dev = cfg->iommu_dev;
235 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
240 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
241 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
242 if (dma_mapping_error(dev, dma))
245 * We depend on the IOMMU being able to work with any physical
246 * address directly, so if the DMA layer suggests otherwise by
247 * translating or truncating them, that bodes very badly...
249 if (dma != virt_to_phys(pages))
256 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
257 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
259 free_pages_exact(pages, size);
263 static void __arm_lpae_free_pages(void *pages, size_t size,
264 struct io_pgtable_cfg *cfg)
266 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
267 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
268 size, DMA_TO_DEVICE);
269 free_pages_exact(pages, size);
272 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
273 struct io_pgtable_cfg *cfg)
275 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
276 sizeof(*ptep), DMA_TO_DEVICE);
279 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
280 struct io_pgtable_cfg *cfg)
284 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
285 __arm_lpae_sync_pte(ptep, cfg);
288 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
289 unsigned long iova, size_t size, int lvl,
290 arm_lpae_iopte *ptep);
292 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
293 phys_addr_t paddr, arm_lpae_iopte prot,
294 int lvl, arm_lpae_iopte *ptep)
296 arm_lpae_iopte pte = prot;
298 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
299 pte |= ARM_LPAE_PTE_NS;
301 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
302 pte |= ARM_LPAE_PTE_TYPE_PAGE;
304 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
306 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
307 pte |= paddr_to_iopte(paddr, data);
309 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
312 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
313 unsigned long iova, phys_addr_t paddr,
314 arm_lpae_iopte prot, int lvl,
315 arm_lpae_iopte *ptep)
317 arm_lpae_iopte pte = *ptep;
319 if (iopte_leaf(pte, lvl)) {
320 /* We require an unmap first */
321 WARN_ON(!selftest_running);
323 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
325 * We need to unmap and free the old table before
326 * overwriting it with a block entry.
328 arm_lpae_iopte *tblp;
329 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
331 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
332 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
336 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
340 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
341 arm_lpae_iopte *ptep,
343 struct io_pgtable_cfg *cfg)
345 arm_lpae_iopte old, new;
347 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
348 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
349 new |= ARM_LPAE_PTE_NSTABLE;
352 * Ensure the table itself is visible before its PTE can be.
353 * Whilst we could get away with cmpxchg64_release below, this
354 * doesn't have any ordering semantics when !CONFIG_SMP.
358 old = cmpxchg64_relaxed(ptep, curr, new);
360 if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
361 (old & ARM_LPAE_PTE_SW_SYNC))
364 /* Even if it's not ours, there's no point waiting; just kick it */
365 __arm_lpae_sync_pte(ptep, cfg);
367 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
372 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
373 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
374 int lvl, arm_lpae_iopte *ptep)
376 arm_lpae_iopte *cptep, pte;
377 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
378 size_t tblsz = ARM_LPAE_GRANULE(data);
379 struct io_pgtable_cfg *cfg = &data->iop.cfg;
381 /* Find our entry at the current level */
382 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
384 /* If we can install a leaf entry at this level, then do so */
385 if (size == block_size && (size & cfg->pgsize_bitmap))
386 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
388 /* We can't allocate tables at the final level */
389 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
392 /* Grab a pointer to the next level */
393 pte = READ_ONCE(*ptep);
395 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
399 pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
401 __arm_lpae_free_pages(cptep, tblsz, cfg);
402 } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
403 !(pte & ARM_LPAE_PTE_SW_SYNC)) {
404 __arm_lpae_sync_pte(ptep, cfg);
407 if (pte && !iopte_leaf(pte, lvl)) {
408 cptep = iopte_deref(pte, data);
410 /* We require an unmap first */
411 WARN_ON(!selftest_running);
416 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
419 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
424 if (data->iop.fmt == ARM_64_LPAE_S1 ||
425 data->iop.fmt == ARM_32_LPAE_S1) {
426 pte = ARM_LPAE_PTE_nG;
428 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
429 pte |= ARM_LPAE_PTE_AP_RDONLY;
431 if (!(prot & IOMMU_PRIV))
432 pte |= ARM_LPAE_PTE_AP_UNPRIV;
434 if (prot & IOMMU_MMIO)
435 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
436 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
437 else if (prot & IOMMU_CACHE)
438 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
439 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
441 pte = ARM_LPAE_PTE_HAP_FAULT;
442 if (prot & IOMMU_READ)
443 pte |= ARM_LPAE_PTE_HAP_READ;
444 if (prot & IOMMU_WRITE)
445 pte |= ARM_LPAE_PTE_HAP_WRITE;
446 if (prot & IOMMU_MMIO)
447 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
448 else if (prot & IOMMU_CACHE)
449 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
451 pte |= ARM_LPAE_PTE_MEMATTR_NC;
454 if (prot & IOMMU_NOEXEC)
455 pte |= ARM_LPAE_PTE_XN;
460 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
461 phys_addr_t paddr, size_t size, int iommu_prot)
463 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
464 arm_lpae_iopte *ptep = data->pgd;
465 int ret, lvl = ARM_LPAE_START_LVL(data);
468 /* If no access, then nothing to do */
469 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
472 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
473 paddr >= (1ULL << data->iop.cfg.oas)))
476 prot = arm_lpae_prot_to_pte(data, iommu_prot);
477 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
479 * Synchronise all PTE updates for the new mapping before there's
480 * a chance for anything to kick off a table walk for the new iova.
487 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
488 arm_lpae_iopte *ptep)
490 arm_lpae_iopte *start, *end;
491 unsigned long table_size;
493 if (lvl == ARM_LPAE_START_LVL(data))
494 table_size = data->pgd_size;
496 table_size = ARM_LPAE_GRANULE(data);
500 /* Only leaf entries at the last level */
501 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
504 end = (void *)ptep + table_size;
506 while (ptep != end) {
507 arm_lpae_iopte pte = *ptep++;
509 if (!pte || iopte_leaf(pte, lvl))
512 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
515 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
518 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
520 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
522 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
526 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
527 unsigned long iova, size_t size,
528 arm_lpae_iopte blk_pte, int lvl,
529 arm_lpae_iopte *ptep)
531 struct io_pgtable_cfg *cfg = &data->iop.cfg;
532 arm_lpae_iopte pte, *tablep;
533 phys_addr_t blk_paddr;
534 size_t tablesz = ARM_LPAE_GRANULE(data);
535 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
536 int i, unmap_idx = -1;
538 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
541 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
543 return 0; /* Bytes unmapped */
545 if (size == split_sz)
546 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
548 blk_paddr = iopte_to_paddr(blk_pte, data);
549 pte = iopte_prot(blk_pte);
551 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
556 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
559 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
560 if (pte != blk_pte) {
561 __arm_lpae_free_pages(tablep, tablesz, cfg);
563 * We may race against someone unmapping another part of this
564 * block, but anything else is invalid. We can't misinterpret
565 * a page entry here since we're never at the last level.
567 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
570 tablep = iopte_deref(pte, data);
574 return __arm_lpae_unmap(data, iova, size, lvl, tablep);
576 io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
580 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
581 unsigned long iova, size_t size, int lvl,
582 arm_lpae_iopte *ptep)
585 struct io_pgtable *iop = &data->iop;
587 /* Something went horribly wrong and we ran out of page table */
588 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
591 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
592 pte = READ_ONCE(*ptep);
596 /* If the size matches this level, we're in the right place */
597 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
598 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
600 if (!iopte_leaf(pte, lvl)) {
601 /* Also flush any partial walks */
602 io_pgtable_tlb_add_flush(iop, iova, size,
603 ARM_LPAE_GRANULE(data), false);
604 io_pgtable_tlb_sync(iop);
605 ptep = iopte_deref(pte, data);
606 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
608 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
612 } else if (iopte_leaf(pte, lvl)) {
614 * Insert a table at the next level to map the old region,
615 * minus the part we want to unmap
617 return arm_lpae_split_blk_unmap(data, iova, size, pte,
621 /* Keep on walkin' */
622 ptep = iopte_deref(pte, data);
623 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
626 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
629 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
630 arm_lpae_iopte *ptep = data->pgd;
631 int lvl = ARM_LPAE_START_LVL(data);
633 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
636 return __arm_lpae_unmap(data, iova, size, lvl, ptep);
639 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
642 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
643 arm_lpae_iopte pte, *ptep = data->pgd;
644 int lvl = ARM_LPAE_START_LVL(data);
647 /* Valid IOPTE pointer? */
651 /* Grab the IOPTE we're interested in */
652 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
653 pte = READ_ONCE(*ptep);
660 if (iopte_leaf(pte,lvl))
661 goto found_translation;
663 /* Take it to the next level */
664 ptep = iopte_deref(pte, data);
665 } while (++lvl < ARM_LPAE_MAX_LEVELS);
667 /* Ran out of page tables to walk */
671 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
672 return iopte_to_paddr(pte, data) | iova;
675 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
677 unsigned long granule, page_sizes;
678 unsigned int max_addr_bits = 48;
681 * We need to restrict the supported page sizes to match the
682 * translation regime for a particular granule. Aim to match
683 * the CPU page size if possible, otherwise prefer smaller sizes.
684 * While we're at it, restrict the block sizes to match the
687 if (cfg->pgsize_bitmap & PAGE_SIZE)
689 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
690 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
691 else if (cfg->pgsize_bitmap & PAGE_MASK)
692 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
698 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
701 page_sizes = (SZ_16K | SZ_32M);
705 page_sizes = (SZ_64K | SZ_512M);
707 page_sizes |= 1ULL << 42; /* 4TB */
713 cfg->pgsize_bitmap &= page_sizes;
714 cfg->ias = min(cfg->ias, max_addr_bits);
715 cfg->oas = min(cfg->oas, max_addr_bits);
718 static struct arm_lpae_io_pgtable *
719 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
721 unsigned long va_bits, pgd_bits;
722 struct arm_lpae_io_pgtable *data;
724 arm_lpae_restrict_pgsizes(cfg);
726 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
729 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
732 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
735 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
736 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
740 data = kmalloc(sizeof(*data), GFP_KERNEL);
744 data->pg_shift = __ffs(cfg->pgsize_bitmap);
745 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
747 va_bits = cfg->ias - data->pg_shift;
748 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
750 /* Calculate the actual size of our pgd (without concatenation) */
751 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
752 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
754 data->iop.ops = (struct io_pgtable_ops) {
756 .unmap = arm_lpae_unmap,
757 .iova_to_phys = arm_lpae_iova_to_phys,
763 static struct io_pgtable *
764 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
767 struct arm_lpae_io_pgtable *data;
769 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
772 data = arm_lpae_alloc_pgtable(cfg);
777 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
778 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
779 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
781 switch (ARM_LPAE_GRANULE(data)) {
783 reg |= ARM_LPAE_TCR_TG0_4K;
786 reg |= ARM_LPAE_TCR_TG0_16K;
789 reg |= ARM_LPAE_TCR_TG0_64K;
795 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
798 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
801 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
804 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
807 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
810 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
813 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
819 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
821 /* Disable speculative walks through TTBR1 */
822 reg |= ARM_LPAE_TCR_EPD1;
823 cfg->arm_lpae_s1_cfg.tcr = reg;
826 reg = (ARM_LPAE_MAIR_ATTR_NC
827 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
828 (ARM_LPAE_MAIR_ATTR_WBRWA
829 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
830 (ARM_LPAE_MAIR_ATTR_DEVICE
831 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
833 cfg->arm_lpae_s1_cfg.mair[0] = reg;
834 cfg->arm_lpae_s1_cfg.mair[1] = 0;
836 /* Looking good; allocate a pgd */
837 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
841 /* Ensure the empty pgd is visible before any actual TTBR write */
845 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
846 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
854 static struct io_pgtable *
855 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
858 struct arm_lpae_io_pgtable *data;
860 /* The NS quirk doesn't apply at stage 2 */
861 if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
864 data = arm_lpae_alloc_pgtable(cfg);
869 * Concatenate PGDs at level 1 if possible in order to reduce
870 * the depth of the stage-2 walk.
872 if (data->levels == ARM_LPAE_MAX_LEVELS) {
873 unsigned long pgd_pages;
875 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
876 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
877 data->pgd_size = pgd_pages << data->pg_shift;
883 reg = ARM_64_LPAE_S2_TCR_RES1 |
884 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
885 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
886 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
888 sl = ARM_LPAE_START_LVL(data);
890 switch (ARM_LPAE_GRANULE(data)) {
892 reg |= ARM_LPAE_TCR_TG0_4K;
893 sl++; /* SL0 format is different for 4K granule size */
896 reg |= ARM_LPAE_TCR_TG0_16K;
899 reg |= ARM_LPAE_TCR_TG0_64K;
905 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
908 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
911 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
914 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
917 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
920 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
923 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
929 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
930 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
931 cfg->arm_lpae_s2_cfg.vtcr = reg;
933 /* Allocate pgd pages */
934 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
938 /* Ensure the empty pgd is visible before any actual TTBR write */
942 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
950 static struct io_pgtable *
951 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
953 struct io_pgtable *iop;
955 if (cfg->ias > 32 || cfg->oas > 40)
958 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
959 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
961 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
962 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
968 static struct io_pgtable *
969 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
971 struct io_pgtable *iop;
973 if (cfg->ias > 40 || cfg->oas > 40)
976 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
977 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
979 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
984 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
985 .alloc = arm_64_lpae_alloc_pgtable_s1,
986 .free = arm_lpae_free_pgtable,
989 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
990 .alloc = arm_64_lpae_alloc_pgtable_s2,
991 .free = arm_lpae_free_pgtable,
994 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
995 .alloc = arm_32_lpae_alloc_pgtable_s1,
996 .free = arm_lpae_free_pgtable,
999 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1000 .alloc = arm_32_lpae_alloc_pgtable_s2,
1001 .free = arm_lpae_free_pgtable,
1004 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1006 static struct io_pgtable_cfg *cfg_cookie;
1008 static void dummy_tlb_flush_all(void *cookie)
1010 WARN_ON(cookie != cfg_cookie);
1013 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1014 size_t granule, bool leaf, void *cookie)
1016 WARN_ON(cookie != cfg_cookie);
1017 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1020 static void dummy_tlb_sync(void *cookie)
1022 WARN_ON(cookie != cfg_cookie);
1025 static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
1026 .tlb_flush_all = dummy_tlb_flush_all,
1027 .tlb_add_flush = dummy_tlb_add_flush,
1028 .tlb_sync = dummy_tlb_sync,
1031 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1033 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1034 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1036 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1037 cfg->pgsize_bitmap, cfg->ias);
1038 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1039 data->levels, data->pgd_size, data->pg_shift,
1040 data->bits_per_level, data->pgd);
1043 #define __FAIL(ops, i) ({ \
1044 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1045 arm_lpae_dump_ops(ops); \
1046 selftest_running = false; \
1050 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1052 static const enum io_pgtable_fmt fmts[] = {
1060 struct io_pgtable_ops *ops;
1062 selftest_running = true;
1064 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1066 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1068 pr_err("selftest: failed to allocate io pgtable ops\n");
1073 * Initial sanity checks.
1074 * Empty page tables shouldn't provide any translations.
1076 if (ops->iova_to_phys(ops, 42))
1077 return __FAIL(ops, i);
1079 if (ops->iova_to_phys(ops, SZ_1G + 42))
1080 return __FAIL(ops, i);
1082 if (ops->iova_to_phys(ops, SZ_2G + 42))
1083 return __FAIL(ops, i);
1086 * Distinct mappings of different granule sizes.
1089 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1092 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1096 return __FAIL(ops, i);
1098 /* Overlapping mappings */
1099 if (!ops->map(ops, iova, iova + size, size,
1100 IOMMU_READ | IOMMU_NOEXEC))
1101 return __FAIL(ops, i);
1103 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1104 return __FAIL(ops, i);
1110 size = 1UL << __ffs(cfg->pgsize_bitmap);
1111 if (ops->unmap(ops, SZ_1G + size, size) != size)
1112 return __FAIL(ops, i);
1114 /* Remap of partial unmap */
1115 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1116 return __FAIL(ops, i);
1118 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1119 return __FAIL(ops, i);
1123 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1124 while (j != BITS_PER_LONG) {
1127 if (ops->unmap(ops, iova, size) != size)
1128 return __FAIL(ops, i);
1130 if (ops->iova_to_phys(ops, iova + 42))
1131 return __FAIL(ops, i);
1133 /* Remap full block */
1134 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1135 return __FAIL(ops, i);
1137 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1138 return __FAIL(ops, i);
1142 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1145 free_io_pgtable_ops(ops);
1148 selftest_running = false;
1152 static int __init arm_lpae_do_selftests(void)
1154 static const unsigned long pgsize[] = {
1155 SZ_4K | SZ_2M | SZ_1G,
1160 static const unsigned int ias[] = {
1161 32, 36, 40, 42, 44, 48,
1164 int i, j, pass = 0, fail = 0;
1165 struct io_pgtable_cfg cfg = {
1166 .tlb = &dummy_tlb_ops,
1168 .quirks = IO_PGTABLE_QUIRK_NO_DMA,
1171 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1172 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1173 cfg.pgsize_bitmap = pgsize[i];
1175 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1177 if (arm_lpae_run_tests(&cfg))
1184 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1185 return fail ? -EFAULT : 0;
1187 subsys_initcall(arm_lpae_do_selftests);