1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic AMD IO page table allocator.
5 * Copyright (C) 2020 Advanced Micro Devices, Inc.
6 * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
9 #define pr_fmt(fmt) "AMD-Vi: " fmt
10 #define dev_fmt(fmt) pr_fmt(fmt)
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/barrier.h>
23 #include "amd_iommu_types.h"
24 #include "amd_iommu.h"
26 static void v1_tlb_flush_all(void *cookie)
30 static void v1_tlb_flush_walk(unsigned long iova, size_t size,
31 size_t granule, void *cookie)
35 static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
36 unsigned long iova, size_t granule,
41 static const struct iommu_flush_ops v1_flush_ops = {
42 .tlb_flush_all = v1_tlb_flush_all,
43 .tlb_flush_walk = v1_tlb_flush_walk,
44 .tlb_add_page = v1_tlb_add_page,
48 * Helper function to get the first pte of a large mapping
50 static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
53 unsigned long pte_mask, pg_size, cnt;
56 pg_size = PTE_PAGE_SIZE(*pte);
57 cnt = PAGE_SIZE_PTE_COUNT(pg_size);
58 pte_mask = ~((cnt << 3) - 1);
59 fpte = (u64 *)(((unsigned long)pte) & pte_mask);
70 /****************************************************************************
72 * The functions below are used the create the page table mappings for
73 * unity mapped regions.
75 ****************************************************************************/
77 static void free_page_list(struct page *freelist)
79 while (freelist != NULL) {
80 unsigned long p = (unsigned long)page_address(freelist);
82 freelist = freelist->freelist;
87 static struct page *free_pt_page(unsigned long pt, struct page *freelist)
89 struct page *p = virt_to_page((void *)pt);
91 p->freelist = freelist;
96 #define DEFINE_FREE_PT_FN(LVL, FN) \
97 static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
105 for (i = 0; i < 512; ++i) { \
107 if (!IOMMU_PTE_PRESENT(pt[i])) \
111 if (PM_PTE_LEVEL(pt[i]) == 0 || \
112 PM_PTE_LEVEL(pt[i]) == 7) \
115 p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
116 freelist = FN(p, freelist); \
119 return free_pt_page((unsigned long)pt, freelist); \
122 DEFINE_FREE_PT_FN(l2, free_pt_page)
123 DEFINE_FREE_PT_FN(l3, free_pt_l2)
124 DEFINE_FREE_PT_FN(l4, free_pt_l3)
125 DEFINE_FREE_PT_FN(l5, free_pt_l4)
126 DEFINE_FREE_PT_FN(l6, free_pt_l5)
128 static struct page *free_sub_pt(unsigned long root, int mode,
129 struct page *freelist)
133 case PAGE_MODE_7_LEVEL:
135 case PAGE_MODE_1_LEVEL:
136 freelist = free_pt_page(root, freelist);
138 case PAGE_MODE_2_LEVEL:
139 freelist = free_pt_l2(root, freelist);
141 case PAGE_MODE_3_LEVEL:
142 freelist = free_pt_l3(root, freelist);
144 case PAGE_MODE_4_LEVEL:
145 freelist = free_pt_l4(root, freelist);
147 case PAGE_MODE_5_LEVEL:
148 freelist = free_pt_l5(root, freelist);
150 case PAGE_MODE_6_LEVEL:
151 freelist = free_pt_l6(root, freelist);
160 void free_pagetable(struct domain_pgtable *pgtable)
162 struct page *freelist = NULL;
165 if (pgtable->mode == PAGE_MODE_NONE)
168 BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
169 pgtable->mode > PAGE_MODE_6_LEVEL);
171 root = (unsigned long)pgtable->root;
172 freelist = free_sub_pt(root, pgtable->mode, freelist);
174 free_page_list(freelist);
177 void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
182 /* lowest 3 bits encode pgtable mode */
184 pt_root |= (u64)root;
186 amd_iommu_domain_set_pt_root(domain, pt_root);
190 * This function is used to add another level to an IO page table. Adding
191 * another level increases the size of the address space by 9 bits to a size up
194 static bool increase_address_space(struct protection_domain *domain,
195 unsigned long address,
198 struct domain_pgtable pgtable;
203 spin_lock_irqsave(&domain->lock, flags);
205 amd_iommu_domain_get_pgtable(domain, &pgtable);
207 if (address <= PM_LEVEL_SIZE(pgtable.mode))
211 if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
214 pte = (void *)get_zeroed_page(gfp);
218 *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
222 amd_iommu_update_and_flush_device_table(domain);
223 amd_iommu_domain_flush_complete(domain);
226 * Device Table needs to be updated and flushed before the new root can
229 amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
234 spin_unlock_irqrestore(&domain->lock, flags);
239 static u64 *alloc_pte(struct protection_domain *domain,
240 unsigned long address,
241 unsigned long page_size,
246 struct domain_pgtable pgtable;
250 BUG_ON(!is_power_of_2(page_size));
252 amd_iommu_domain_get_pgtable(domain, &pgtable);
254 while (address > PM_LEVEL_SIZE(pgtable.mode)) {
256 * Return an error if there is no memory to update the
259 if (!increase_address_space(domain, address, gfp))
262 /* Read new values to check if update was successful */
263 amd_iommu_domain_get_pgtable(domain, &pgtable);
267 level = pgtable.mode - 1;
268 pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
269 address = PAGE_SIZE_ALIGN(address, page_size);
270 end_lvl = PAGE_SIZE_LEVEL(page_size);
272 while (level > end_lvl) {
277 pte_level = PM_PTE_LEVEL(__pte);
280 * If we replace a series of large PTEs, we need
281 * to tear down all of them.
283 if (IOMMU_PTE_PRESENT(__pte) &&
284 pte_level == PAGE_MODE_7_LEVEL) {
285 unsigned long count, i;
288 lpte = first_pte_l7(pte, NULL, &count);
291 * Unmap the replicated PTEs that still match the
292 * original large mapping
294 for (i = 0; i < count; ++i)
295 cmpxchg64(&lpte[i], __pte, 0ULL);
301 if (!IOMMU_PTE_PRESENT(__pte) ||
302 pte_level == PAGE_MODE_NONE) {
303 page = (u64 *)get_zeroed_page(gfp);
308 __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
310 /* pte could have been changed somewhere. */
311 if (cmpxchg64(pte, __pte, __npte) != __pte)
312 free_page((unsigned long)page);
313 else if (IOMMU_PTE_PRESENT(__pte))
319 /* No level skipping support yet */
320 if (pte_level != level)
325 pte = IOMMU_PTE_PAGE(__pte);
327 if (pte_page && level == end_lvl)
330 pte = &pte[PM_LEVEL_INDEX(level, address)];
337 * This function checks if there is a PTE for a given dma address. If
338 * there is one, it returns the pointer to it.
340 u64 *fetch_pte(struct protection_domain *domain,
341 unsigned long address,
342 unsigned long *page_size)
344 struct domain_pgtable pgtable;
350 amd_iommu_domain_get_pgtable(domain, &pgtable);
352 if (address > PM_LEVEL_SIZE(pgtable.mode))
355 level = pgtable.mode - 1;
356 pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
357 *page_size = PTE_LEVEL_PAGE_SIZE(level);
362 if (!IOMMU_PTE_PRESENT(*pte))
366 if (PM_PTE_LEVEL(*pte) == 7 ||
367 PM_PTE_LEVEL(*pte) == 0)
370 /* No level skipping support yet */
371 if (PM_PTE_LEVEL(*pte) != level)
376 /* Walk to the next level */
377 pte = IOMMU_PTE_PAGE(*pte);
378 pte = &pte[PM_LEVEL_INDEX(level, address)];
379 *page_size = PTE_LEVEL_PAGE_SIZE(level);
383 * If we have a series of large PTEs, make
384 * sure to return a pointer to the first one.
386 if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
387 pte = first_pte_l7(pte, page_size, NULL);
392 static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
397 while (cmpxchg64(pte, pteval, 0) != pteval) {
398 pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
402 if (!IOMMU_PTE_PRESENT(pteval))
405 pt = (unsigned long)IOMMU_PTE_PAGE(pteval);
406 mode = IOMMU_PTE_MODE(pteval);
408 return free_sub_pt(pt, mode, freelist);
412 * Generic mapping functions. It maps a physical address into a DMA
413 * address space. It allocates the page table pages if necessary.
414 * In the future it can be extended to a generic mapping function
415 * supporting all features of AMD IOMMU page tables like level skipping
416 * and full 64 bit address spaces.
418 int iommu_map_page(struct protection_domain *dom,
419 unsigned long bus_addr,
420 unsigned long phys_addr,
421 unsigned long page_size,
425 struct page *freelist = NULL;
426 bool updated = false;
430 BUG_ON(!IS_ALIGNED(bus_addr, page_size));
431 BUG_ON(!IS_ALIGNED(phys_addr, page_size));
434 if (!(prot & IOMMU_PROT_MASK))
437 count = PAGE_SIZE_PTE_COUNT(page_size);
438 pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
444 for (i = 0; i < count; ++i)
445 freelist = free_clear_pte(&pte[i], pte[i], freelist);
447 if (freelist != NULL)
451 __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
452 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
454 __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
456 if (prot & IOMMU_PROT_IR)
457 __pte |= IOMMU_PTE_IR;
458 if (prot & IOMMU_PROT_IW)
459 __pte |= IOMMU_PTE_IW;
461 for (i = 0; i < count; ++i)
470 spin_lock_irqsave(&dom->lock, flags);
472 * Flush domain TLB(s) and wait for completion. Any Device-Table
473 * Updates and flushing already happened in
474 * increase_address_space().
476 amd_iommu_domain_flush_tlb_pde(dom);
477 amd_iommu_domain_flush_complete(dom);
478 spin_unlock_irqrestore(&dom->lock, flags);
481 /* Everything flushed out, free pages now */
482 free_page_list(freelist);
487 unsigned long iommu_unmap_page(struct protection_domain *dom,
488 unsigned long bus_addr,
489 unsigned long page_size)
491 unsigned long long unmapped;
492 unsigned long unmap_size;
495 BUG_ON(!is_power_of_2(page_size));
499 while (unmapped < page_size) {
501 pte = fetch_pte(dom, bus_addr, &unmap_size);
506 count = PAGE_SIZE_PTE_COUNT(unmap_size);
507 for (i = 0; i < count; i++)
511 bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
512 unmapped += unmap_size;
515 BUG_ON(unmapped && !is_power_of_2(unmapped));
521 * ----------------------------------------------------
523 static void v1_free_pgtable(struct io_pgtable *iop)
527 static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
529 struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
531 cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES,
532 cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
533 cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
534 cfg->tlb = &v1_flush_ops;
536 return &pgtable->iop;
539 struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
540 .alloc = v1_alloc_pgtable,
541 .free = v1_free_pgtable,