1 // SPDX-License-Identifier: GPL-2.0-only
3 * arch/arm64/mm/hugetlbpage.c
5 * Copyright (C) 2013 Linaro Ltd.
7 * Based on arch/x86/mm/hugetlbpage.c.
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
22 * HugeTLB Support Matrix
24 * ---------------------------------------------------
25 * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
26 * ---------------------------------------------------
27 * | 4K | 64K | 2M | 32M | 1G |
28 * | 16K | 2M | 32M | 1G | |
29 * | 64K | 2M | 512M | 16G | |
30 * ---------------------------------------------------
34 * Reserve CMA areas for the largest supported gigantic
35 * huge page when requested. Any other smaller gigantic
36 * huge pages could still be served from those areas.
39 void __init arm64_hugetlb_cma_reserve(void)
43 if (pud_sect_supported())
44 order = PUD_SHIFT - PAGE_SHIFT;
46 order = CONT_PMD_SHIFT - PAGE_SHIFT;
49 * HugeTLB CMA reservation is required for gigantic
50 * huge pages which could not be allocated via the
51 * page allocator. Just warn if there is any change
52 * breaking this assumption.
54 WARN_ON(order <= MAX_ORDER);
55 hugetlb_cma_reserve(order);
57 #endif /* CONFIG_CMA */
59 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
60 bool arch_hugetlb_migration_supported(struct hstate *h)
62 size_t pagesize = huge_page_size(h);
65 #ifndef __PAGETABLE_PMD_FOLDED
67 return pud_sect_supported();
74 pr_warn("%s: unrecognized huge page size 0x%lx\n",
80 int pmd_huge(pmd_t pmd)
82 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
85 int pud_huge(pud_t pud)
87 #ifndef __PAGETABLE_PMD_FOLDED
88 return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
95 * Select all bits except the pfn
97 static inline pgprot_t pte_pgprot(pte_t pte)
99 unsigned long pfn = pte_pfn(pte);
101 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
104 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
105 pte_t *ptep, size_t *pgsize)
107 pgd_t *pgdp = pgd_offset(mm, addr);
113 p4dp = p4d_offset(pgdp, addr);
114 pudp = pud_offset(p4dp, addr);
115 pmdp = pmd_offset(pudp, addr);
116 if ((pte_t *)pmdp == ptep) {
123 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
130 #ifndef __PAGETABLE_PMD_FOLDED
132 if (pud_sect_supported())
141 contig_ptes = CONT_PMDS;
145 contig_ptes = CONT_PTES;
153 * Changing some bits of contiguous entries requires us to follow a
154 * Break-Before-Make approach, breaking the whole contiguous set
155 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
156 * "Misprogramming of the Contiguous bit", page D4-1762.
158 * This helper performs the break step.
160 static pte_t get_clear_flush(struct mm_struct *mm,
163 unsigned long pgsize,
164 unsigned long ncontig)
166 pte_t orig_pte = huge_ptep_get(ptep);
167 bool valid = pte_valid(orig_pte);
168 unsigned long i, saddr = addr;
170 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
171 pte_t pte = ptep_get_and_clear(mm, addr, ptep);
174 * If HW_AFDBM is enabled, then the HW could turn on
175 * the dirty or accessed bit for any page in the set,
179 orig_pte = pte_mkdirty(orig_pte);
182 orig_pte = pte_mkyoung(orig_pte);
186 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
187 flush_tlb_range(&vma, saddr, addr);
193 * Changing some bits of contiguous entries requires us to follow a
194 * Break-Before-Make approach, breaking the whole contiguous set
195 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
196 * "Misprogramming of the Contiguous bit", page D4-1762.
198 * This helper performs the break step for use cases where the
199 * original pte is not needed.
201 static void clear_flush(struct mm_struct *mm,
204 unsigned long pgsize,
205 unsigned long ncontig)
207 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
208 unsigned long i, saddr = addr;
210 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
211 pte_clear(mm, addr, ptep);
213 flush_tlb_range(&vma, saddr, addr);
216 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
217 pte_t *ptep, pte_t pte)
222 unsigned long pfn, dpfn;
226 * Code needs to be expanded to handle huge swap and migration
227 * entries. Needed for HUGETLB and MEMORY_FAILURE.
229 WARN_ON(!pte_present(pte));
231 if (!pte_cont(pte)) {
232 set_pte_at(mm, addr, ptep, pte);
236 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
238 dpfn = pgsize >> PAGE_SHIFT;
239 hugeprot = pte_pgprot(pte);
241 clear_flush(mm, addr, ptep, pgsize, ncontig);
243 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
244 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
247 void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
248 pte_t *ptep, pte_t pte, unsigned long sz)
253 ncontig = num_contig_ptes(sz, &pgsize);
255 for (i = 0; i < ncontig; i++, ptep++)
259 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
260 unsigned long addr, unsigned long sz)
268 pgdp = pgd_offset(mm, addr);
269 p4dp = p4d_offset(pgdp, addr);
270 pudp = pud_alloc(mm, p4dp, addr);
274 if (sz == PUD_SIZE) {
275 ptep = (pte_t *)pudp;
276 } else if (sz == (CONT_PTE_SIZE)) {
277 pmdp = pmd_alloc(mm, pudp, addr);
281 WARN_ON(addr & (sz - 1));
283 * Note that if this code were ever ported to the
284 * 32-bit arm platform then it will cause trouble in
285 * the case where CONFIG_HIGHPTE is set, since there
286 * will be no pte_unmap() to correspond with this
289 ptep = pte_alloc_map(mm, pmdp, addr);
290 } else if (sz == PMD_SIZE) {
291 if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
292 ptep = huge_pmd_share(mm, vma, addr, pudp);
294 ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
295 } else if (sz == (CONT_PMD_SIZE)) {
296 pmdp = pmd_alloc(mm, pudp, addr);
297 WARN_ON(addr & (sz - 1));
298 return (pte_t *)pmdp;
304 pte_t *huge_pte_offset(struct mm_struct *mm,
305 unsigned long addr, unsigned long sz)
312 pgdp = pgd_offset(mm, addr);
313 if (!pgd_present(READ_ONCE(*pgdp)))
316 p4dp = p4d_offset(pgdp, addr);
317 if (!p4d_present(READ_ONCE(*p4dp)))
320 pudp = pud_offset(p4dp, addr);
321 pud = READ_ONCE(*pudp);
322 if (sz != PUD_SIZE && pud_none(pud))
324 /* hugepage or swap? */
325 if (pud_huge(pud) || !pud_present(pud))
326 return (pte_t *)pudp;
327 /* table; check the next level */
329 if (sz == CONT_PMD_SIZE)
330 addr &= CONT_PMD_MASK;
332 pmdp = pmd_offset(pudp, addr);
333 pmd = READ_ONCE(*pmdp);
334 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
337 if (pmd_huge(pmd) || !pmd_present(pmd))
338 return (pte_t *)pmdp;
340 if (sz == CONT_PTE_SIZE)
341 return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
346 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
348 size_t pagesize = 1UL << shift;
350 if (pagesize == CONT_PTE_SIZE) {
351 entry = pte_mkcont(entry);
352 } else if (pagesize == CONT_PMD_SIZE) {
353 entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
354 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
355 pr_warn("%s: unrecognized huge page size 0x%lx\n",
361 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
362 pte_t *ptep, unsigned long sz)
367 ncontig = num_contig_ptes(sz, &pgsize);
369 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
370 pte_clear(mm, addr, ptep);
373 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
374 unsigned long addr, pte_t *ptep)
378 pte_t orig_pte = huge_ptep_get(ptep);
380 if (!pte_cont(orig_pte))
381 return ptep_get_and_clear(mm, addr, ptep);
383 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
385 return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
389 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
390 * and write permission.
392 * For a contiguous huge pte range we need to check whether or not write
393 * permission has to change only on the first pte in the set. Then for
394 * all the contiguous ptes we need to check whether or not there is a
395 * discrepancy between dirty or young.
397 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
401 if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
404 for (i = 0; i < ncontig; i++) {
405 pte_t orig_pte = huge_ptep_get(ptep + i);
407 if (pte_dirty(pte) != pte_dirty(orig_pte))
410 if (pte_young(pte) != pte_young(orig_pte))
417 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
418 unsigned long addr, pte_t *ptep,
419 pte_t pte, int dirty)
423 unsigned long pfn = pte_pfn(pte), dpfn;
428 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
430 ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
431 dpfn = pgsize >> PAGE_SHIFT;
433 if (!__cont_access_flags_changed(ptep, pte, ncontig))
436 orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
438 /* Make sure we don't lose the dirty or young state */
439 if (pte_dirty(orig_pte))
440 pte = pte_mkdirty(pte);
442 if (pte_young(orig_pte))
443 pte = pte_mkyoung(pte);
445 hugeprot = pte_pgprot(pte);
446 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
447 set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
452 void huge_ptep_set_wrprotect(struct mm_struct *mm,
453 unsigned long addr, pte_t *ptep)
455 unsigned long pfn, dpfn;
461 if (!pte_cont(READ_ONCE(*ptep))) {
462 ptep_set_wrprotect(mm, addr, ptep);
466 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
467 dpfn = pgsize >> PAGE_SHIFT;
469 pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
470 pte = pte_wrprotect(pte);
472 hugeprot = pte_pgprot(pte);
475 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
476 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
479 void huge_ptep_clear_flush(struct vm_area_struct *vma,
480 unsigned long addr, pte_t *ptep)
485 if (!pte_cont(READ_ONCE(*ptep))) {
486 ptep_clear_flush(vma, addr, ptep);
490 ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
491 clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
494 static int __init hugetlbpage_init(void)
496 if (pud_sect_supported())
497 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
499 hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
500 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
501 hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
505 arch_initcall(hugetlbpage_init);
507 bool __init arch_hugetlb_valid_size(unsigned long size)
510 #ifndef __PAGETABLE_PMD_FOLDED
512 return pud_sect_supported();