1 // SPDX-License-Identifier: GPL-2.0-only
3 * arch/arm64/mm/hugetlbpage.c
5 * Copyright (C) 2013 Linaro Ltd.
7 * Based on arch/x86/mm/hugetlbpage.c.
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
22 * HugeTLB Support Matrix
24 * ---------------------------------------------------
25 * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
26 * ---------------------------------------------------
27 * | 4K | 64K | 2M | 32M | 1G |
28 * | 16K | 2M | 32M | 1G | |
29 * | 64K | 2M | 512M | 16G | |
30 * ---------------------------------------------------
34 * Reserve CMA areas for the largest supported gigantic
35 * huge page when requested. Any other smaller gigantic
36 * huge pages could still be served from those areas.
39 void __init arm64_hugetlb_cma_reserve(void)
43 if (pud_sect_supported())
44 order = PUD_SHIFT - PAGE_SHIFT;
46 order = CONT_PMD_SHIFT - PAGE_SHIFT;
48 hugetlb_cma_reserve(order);
50 #endif /* CONFIG_CMA */
52 static bool __hugetlb_valid_size(unsigned long size)
55 #ifndef __PAGETABLE_PMD_FOLDED
57 return pud_sect_supported();
68 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
69 bool arch_hugetlb_migration_supported(struct hstate *h)
71 size_t pagesize = huge_page_size(h);
73 if (!__hugetlb_valid_size(pagesize)) {
74 pr_warn("%s: unrecognized huge page size 0x%lx\n",
82 int pmd_huge(pmd_t pmd)
84 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
87 int pud_huge(pud_t pud)
89 #ifndef __PAGETABLE_PMD_FOLDED
90 return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
96 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
97 pte_t *ptep, size_t *pgsize)
99 pgd_t *pgdp = pgd_offset(mm, addr);
105 p4dp = p4d_offset(pgdp, addr);
106 pudp = pud_offset(p4dp, addr);
107 pmdp = pmd_offset(pudp, addr);
108 if ((pte_t *)pmdp == ptep) {
115 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
122 #ifndef __PAGETABLE_PMD_FOLDED
124 if (pud_sect_supported())
133 contig_ptes = CONT_PMDS;
137 contig_ptes = CONT_PTES;
144 pte_t huge_ptep_get(pte_t *ptep)
148 pte_t orig_pte = __ptep_get(ptep);
150 if (!pte_present(orig_pte) || !pte_cont(orig_pte))
153 ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
154 for (i = 0; i < ncontig; i++, ptep++) {
155 pte_t pte = __ptep_get(ptep);
158 orig_pte = pte_mkdirty(orig_pte);
161 orig_pte = pte_mkyoung(orig_pte);
167 * Changing some bits of contiguous entries requires us to follow a
168 * Break-Before-Make approach, breaking the whole contiguous set
169 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
170 * "Misprogramming of the Contiguous bit", page D4-1762.
172 * This helper performs the break step.
174 static pte_t get_clear_contig(struct mm_struct *mm,
177 unsigned long pgsize,
178 unsigned long ncontig)
180 pte_t orig_pte = __ptep_get(ptep);
183 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
184 pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
187 * If HW_AFDBM is enabled, then the HW could turn on
188 * the dirty or accessed bit for any page in the set,
192 orig_pte = pte_mkdirty(orig_pte);
195 orig_pte = pte_mkyoung(orig_pte);
200 static pte_t get_clear_contig_flush(struct mm_struct *mm,
203 unsigned long pgsize,
204 unsigned long ncontig)
206 pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
207 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
209 flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
214 * Changing some bits of contiguous entries requires us to follow a
215 * Break-Before-Make approach, breaking the whole contiguous set
216 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
217 * "Misprogramming of the Contiguous bit", page D4-1762.
219 * This helper performs the break step for use cases where the
220 * original pte is not needed.
222 static void clear_flush(struct mm_struct *mm,
225 unsigned long pgsize,
226 unsigned long ncontig)
228 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
229 unsigned long i, saddr = addr;
231 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
232 __ptep_get_and_clear(mm, addr, ptep);
234 flush_tlb_range(&vma, saddr, addr);
237 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
238 pte_t *ptep, pte_t pte, unsigned long sz)
243 unsigned long pfn, dpfn;
246 ncontig = num_contig_ptes(sz, &pgsize);
248 if (!pte_present(pte)) {
249 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
250 __set_ptes(mm, addr, ptep, pte, 1);
254 if (!pte_cont(pte)) {
255 __set_ptes(mm, addr, ptep, pte, 1);
260 dpfn = pgsize >> PAGE_SHIFT;
261 hugeprot = pte_pgprot(pte);
263 clear_flush(mm, addr, ptep, pgsize, ncontig);
265 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
266 __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
269 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
270 unsigned long addr, unsigned long sz)
278 pgdp = pgd_offset(mm, addr);
279 p4dp = p4d_offset(pgdp, addr);
280 pudp = pud_alloc(mm, p4dp, addr);
284 if (sz == PUD_SIZE) {
285 ptep = (pte_t *)pudp;
286 } else if (sz == (CONT_PTE_SIZE)) {
287 pmdp = pmd_alloc(mm, pudp, addr);
291 WARN_ON(addr & (sz - 1));
292 ptep = pte_alloc_huge(mm, pmdp, addr);
293 } else if (sz == PMD_SIZE) {
294 if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
295 ptep = huge_pmd_share(mm, vma, addr, pudp);
297 ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
298 } else if (sz == (CONT_PMD_SIZE)) {
299 pmdp = pmd_alloc(mm, pudp, addr);
300 WARN_ON(addr & (sz - 1));
301 return (pte_t *)pmdp;
307 pte_t *huge_pte_offset(struct mm_struct *mm,
308 unsigned long addr, unsigned long sz)
315 pgdp = pgd_offset(mm, addr);
316 if (!pgd_present(READ_ONCE(*pgdp)))
319 p4dp = p4d_offset(pgdp, addr);
320 if (!p4d_present(READ_ONCE(*p4dp)))
323 pudp = pud_offset(p4dp, addr);
324 pud = READ_ONCE(*pudp);
325 if (sz != PUD_SIZE && pud_none(pud))
327 /* hugepage or swap? */
328 if (pud_huge(pud) || !pud_present(pud))
329 return (pte_t *)pudp;
330 /* table; check the next level */
332 if (sz == CONT_PMD_SIZE)
333 addr &= CONT_PMD_MASK;
335 pmdp = pmd_offset(pudp, addr);
336 pmd = READ_ONCE(*pmdp);
337 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
340 if (pmd_huge(pmd) || !pmd_present(pmd))
341 return (pte_t *)pmdp;
343 if (sz == CONT_PTE_SIZE)
344 return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK));
349 unsigned long hugetlb_mask_last_page(struct hstate *h)
351 unsigned long hp_size = huge_page_size(h);
354 #ifndef __PAGETABLE_PMD_FOLDED
356 return PGDIR_SIZE - PUD_SIZE;
359 return PUD_SIZE - CONT_PMD_SIZE;
361 return PUD_SIZE - PMD_SIZE;
363 return PMD_SIZE - CONT_PTE_SIZE;
371 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
373 size_t pagesize = 1UL << shift;
375 entry = pte_mkhuge(entry);
376 if (pagesize == CONT_PTE_SIZE) {
377 entry = pte_mkcont(entry);
378 } else if (pagesize == CONT_PMD_SIZE) {
379 entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
380 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
381 pr_warn("%s: unrecognized huge page size 0x%lx\n",
387 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
388 pte_t *ptep, unsigned long sz)
393 ncontig = num_contig_ptes(sz, &pgsize);
395 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
396 __pte_clear(mm, addr, ptep);
399 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
400 unsigned long addr, pte_t *ptep)
404 pte_t orig_pte = __ptep_get(ptep);
406 if (!pte_cont(orig_pte))
407 return __ptep_get_and_clear(mm, addr, ptep);
409 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
411 return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
415 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
416 * and write permission.
418 * For a contiguous huge pte range we need to check whether or not write
419 * permission has to change only on the first pte in the set. Then for
420 * all the contiguous ptes we need to check whether or not there is a
421 * discrepancy between dirty or young.
423 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
427 if (pte_write(pte) != pte_write(__ptep_get(ptep)))
430 for (i = 0; i < ncontig; i++) {
431 pte_t orig_pte = __ptep_get(ptep + i);
433 if (pte_dirty(pte) != pte_dirty(orig_pte))
436 if (pte_young(pte) != pte_young(orig_pte))
443 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
444 unsigned long addr, pte_t *ptep,
445 pte_t pte, int dirty)
449 unsigned long pfn = pte_pfn(pte), dpfn;
450 struct mm_struct *mm = vma->vm_mm;
455 return __ptep_set_access_flags(vma, addr, ptep, pte, dirty);
457 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
458 dpfn = pgsize >> PAGE_SHIFT;
460 if (!__cont_access_flags_changed(ptep, pte, ncontig))
463 orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
465 /* Make sure we don't lose the dirty or young state */
466 if (pte_dirty(orig_pte))
467 pte = pte_mkdirty(pte);
469 if (pte_young(orig_pte))
470 pte = pte_mkyoung(pte);
472 hugeprot = pte_pgprot(pte);
473 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
474 __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
479 void huge_ptep_set_wrprotect(struct mm_struct *mm,
480 unsigned long addr, pte_t *ptep)
482 unsigned long pfn, dpfn;
488 if (!pte_cont(__ptep_get(ptep))) {
489 __ptep_set_wrprotect(mm, addr, ptep);
493 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
494 dpfn = pgsize >> PAGE_SHIFT;
496 pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
497 pte = pte_wrprotect(pte);
499 hugeprot = pte_pgprot(pte);
502 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
503 __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
506 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
507 unsigned long addr, pte_t *ptep)
509 struct mm_struct *mm = vma->vm_mm;
513 if (!pte_cont(__ptep_get(ptep)))
514 return ptep_clear_flush(vma, addr, ptep);
516 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
517 return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
520 static int __init hugetlbpage_init(void)
522 if (pud_sect_supported())
523 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
525 hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
526 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
527 hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
531 arch_initcall(hugetlbpage_init);
533 bool __init arch_hugetlb_valid_size(unsigned long size)
535 return __hugetlb_valid_size(size);
538 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
540 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
542 * Break-before-make (BBM) is required for all user space mappings
543 * when the permission changes from executable to non-executable
544 * in cases where cpu is affected with errata #2645198.
546 if (pte_user_exec(__ptep_get(ptep)))
547 return huge_ptep_clear_flush(vma, addr, ptep);
549 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
552 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
553 pte_t old_pte, pte_t pte)
555 unsigned long psize = huge_page_size(hstate_vma(vma));
557 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);