2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <linux/swap.h>
21 #include <linux/swapops.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
25 #include <asm/setup.h>
26 #include <asm/hugetlb.h>
27 #include <asm/pte-walk.h>
30 #ifdef CONFIG_HUGETLB_PAGE
32 #define PAGE_SHIFT_64K 16
33 #define PAGE_SHIFT_512K 19
34 #define PAGE_SHIFT_8M 23
35 #define PAGE_SHIFT_16M 24
36 #define PAGE_SHIFT_16G 34
38 bool hugetlb_disabled = false;
40 unsigned int HPAGE_SHIFT;
41 EXPORT_SYMBOL(HPAGE_SHIFT);
43 #define hugepd_none(hpd) (hpd_val(hpd) == 0)
45 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
48 * Only called for hugetlbfs pages, hence can ignore THP and the
51 return __find_linux_pte(mm->pgd, addr, NULL, NULL);
54 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
55 unsigned long address, unsigned int pdshift,
56 unsigned int pshift, spinlock_t *ptl)
58 struct kmem_cache *cachep;
63 if (pshift >= pdshift) {
64 cachep = hugepte_cache;
65 num_hugepd = 1 << (pshift - pdshift);
67 cachep = PGT_CACHE(pdshift - pshift);
71 new = kmem_cache_zalloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
73 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
74 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
80 * Make sure other cpus find the hugepd set only after a
81 * properly initialized page table is visible to them.
82 * For more details look for comment in __pte_alloc().
88 * We have multiple higher-level entries that point to the same
89 * actual pte location. Fill in each as we go and backtrack on error.
90 * We need all of these so the DTLB pgtable walk code can find the
91 * right higher-level entry without knowing if it's a hugepage or not.
93 for (i = 0; i < num_hugepd; i++, hpdp++) {
94 if (unlikely(!hugepd_none(*hpdp)))
97 #ifdef CONFIG_PPC_BOOK3S_64
98 *hpdp = __hugepd(__pa(new) |
99 (shift_to_mmu_psize(pshift) << 2));
100 #elif defined(CONFIG_PPC_8xx)
101 *hpdp = __hugepd(__pa(new) | _PMD_USER |
102 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
103 _PMD_PAGE_512K) | _PMD_PRESENT);
105 /* We use the old format for PPC_FSL_BOOK3E */
106 *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
110 /* If we bailed from the for loop early, an error occurred, clean up */
111 if (i < num_hugepd) {
112 for (i = i - 1 ; i >= 0; i--, hpdp--)
114 kmem_cache_free(cachep, new);
121 * At this point we do the placement change only for BOOK3S 64. This would
122 * possibly work on other subarchs.
124 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
129 hugepd_t *hpdp = NULL;
130 unsigned pshift = __ffs(sz);
131 unsigned pdshift = PGDIR_SHIFT;
135 pg = pgd_offset(mm, addr);
137 #ifdef CONFIG_PPC_BOOK3S_64
138 if (pshift == PGDIR_SHIFT)
141 else if (pshift > PUD_SHIFT) {
143 * We need to use hugepd table
145 ptl = &mm->page_table_lock;
146 hpdp = (hugepd_t *)pg;
149 pu = pud_alloc(mm, pg, addr);
150 if (pshift == PUD_SHIFT)
152 else if (pshift > PMD_SHIFT) {
153 ptl = pud_lockptr(mm, pu);
154 hpdp = (hugepd_t *)pu;
157 pm = pmd_alloc(mm, pu, addr);
158 if (pshift == PMD_SHIFT)
162 ptl = pmd_lockptr(mm, pm);
163 hpdp = (hugepd_t *)pm;
168 if (pshift >= PGDIR_SHIFT) {
169 ptl = &mm->page_table_lock;
170 hpdp = (hugepd_t *)pg;
173 pu = pud_alloc(mm, pg, addr);
174 if (pshift >= PUD_SHIFT) {
175 ptl = pud_lockptr(mm, pu);
176 hpdp = (hugepd_t *)pu;
179 pm = pmd_alloc(mm, pu, addr);
180 ptl = pmd_lockptr(mm, pm);
181 hpdp = (hugepd_t *)pm;
188 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
190 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
191 pdshift, pshift, ptl))
194 return hugepte_offset(*hpdp, addr, pdshift);
197 #ifdef CONFIG_PPC_BOOK3S_64
199 * Tracks gpages after the device tree is scanned and before the
200 * huge_boot_pages list is ready on pseries.
202 #define MAX_NUMBER_GPAGES 1024
203 __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
204 __initdata static unsigned nr_gpages;
207 * Build list of addresses of gigantic pages. This function is used in early
208 * boot before the buddy allocator is setup.
210 void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
214 while (number_of_pages > 0) {
215 gpage_freearray[nr_gpages] = addr;
222 int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
224 struct huge_bootmem_page *m;
227 m = phys_to_virt(gpage_freearray[--nr_gpages]);
228 gpage_freearray[nr_gpages] = 0;
229 list_add(&m->list, &huge_boot_pages);
236 int __init alloc_bootmem_huge_page(struct hstate *h)
239 #ifdef CONFIG_PPC_BOOK3S_64
240 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
241 return pseries_alloc_bootmem_huge_page(h);
243 return __alloc_bootmem_huge_page(h);
246 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
247 #define HUGEPD_FREELIST_SIZE \
248 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
250 struct hugepd_freelist {
256 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
258 static void hugepd_free_rcu_callback(struct rcu_head *head)
260 struct hugepd_freelist *batch =
261 container_of(head, struct hugepd_freelist, rcu);
264 for (i = 0; i < batch->index; i++)
265 kmem_cache_free(hugepte_cache, batch->ptes[i]);
267 free_page((unsigned long)batch);
270 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
272 struct hugepd_freelist **batchp;
274 batchp = &get_cpu_var(hugepd_freelist_cur);
276 if (atomic_read(&tlb->mm->mm_users) < 2 ||
277 mm_is_thread_local(tlb->mm)) {
278 kmem_cache_free(hugepte_cache, hugepte);
279 put_cpu_var(hugepd_freelist_cur);
283 if (*batchp == NULL) {
284 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
285 (*batchp)->index = 0;
288 (*batchp)->ptes[(*batchp)->index++] = hugepte;
289 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
290 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
293 put_cpu_var(hugepd_freelist_cur);
296 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
299 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
300 unsigned long start, unsigned long end,
301 unsigned long floor, unsigned long ceiling)
303 pte_t *hugepte = hugepd_page(*hpdp);
306 unsigned long pdmask = ~((1UL << pdshift) - 1);
307 unsigned int num_hugepd = 1;
308 unsigned int shift = hugepd_shift(*hpdp);
310 /* Note: On fsl the hpdp may be the first of several */
312 num_hugepd = 1 << (shift - pdshift);
322 if (end - 1 > ceiling - 1)
325 for (i = 0; i < num_hugepd; i++, hpdp++)
328 if (shift >= pdshift)
329 hugepd_free(tlb, hugepte);
331 pgtable_free_tlb(tlb, hugepte,
332 get_hugepd_cache_index(pdshift - shift));
335 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
336 unsigned long addr, unsigned long end,
337 unsigned long floor, unsigned long ceiling)
347 pmd = pmd_offset(pud, addr);
348 next = pmd_addr_end(addr, end);
349 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
351 * if it is not hugepd pointer, we should already find
354 WARN_ON(!pmd_none_or_clear_bad(pmd));
358 * Increment next by the size of the huge mapping since
359 * there may be more than one entry at this level for a
360 * single hugepage, but all of them point to
361 * the same kmem cache that holds the hugepte.
363 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
367 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
368 addr, next, floor, ceiling);
369 } while (addr = next, addr != end);
379 if (end - 1 > ceiling - 1)
382 pmd = pmd_offset(pud, start);
384 pmd_free_tlb(tlb, pmd, start);
385 mm_dec_nr_pmds(tlb->mm);
388 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
389 unsigned long addr, unsigned long end,
390 unsigned long floor, unsigned long ceiling)
398 pud = pud_offset(pgd, addr);
399 next = pud_addr_end(addr, end);
400 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
401 if (pud_none_or_clear_bad(pud))
403 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
408 * Increment next by the size of the huge mapping since
409 * there may be more than one entry at this level for a
410 * single hugepage, but all of them point to
411 * the same kmem cache that holds the hugepte.
413 more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
417 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
418 addr, next, floor, ceiling);
420 } while (addr = next, addr != end);
426 ceiling &= PGDIR_MASK;
430 if (end - 1 > ceiling - 1)
433 pud = pud_offset(pgd, start);
435 pud_free_tlb(tlb, pud, start);
436 mm_dec_nr_puds(tlb->mm);
440 * This function frees user-level page tables of a process.
442 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
443 unsigned long addr, unsigned long end,
444 unsigned long floor, unsigned long ceiling)
450 * Because there are a number of different possible pagetable
451 * layouts for hugepage ranges, we limit knowledge of how
452 * things should be laid out to the allocation path
453 * (huge_pte_alloc(), above). Everything else works out the
454 * structure as it goes from information in the hugepd
455 * pointers. That means that we can't here use the
456 * optimization used in the normal page free_pgd_range(), of
457 * checking whether we're actually covering a large enough
458 * range to have to do anything at the top level of the walk
459 * instead of at the bottom.
461 * To make sense of this, you should probably go read the big
462 * block comment at the top of the normal free_pgd_range(),
467 next = pgd_addr_end(addr, end);
468 pgd = pgd_offset(tlb->mm, addr);
469 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
470 if (pgd_none_or_clear_bad(pgd))
472 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
476 * Increment next by the size of the huge mapping since
477 * there may be more than one entry at the pgd level
478 * for a single hugepage, but all of them point to the
479 * same kmem cache that holds the hugepte.
481 more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
485 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
486 addr, next, floor, ceiling);
488 } while (addr = next, addr != end);
491 struct page *follow_huge_pd(struct vm_area_struct *vma,
492 unsigned long address, hugepd_t hpd,
493 int flags, int pdshift)
497 struct page *page = NULL;
499 int shift = hugepd_shift(hpd);
500 struct mm_struct *mm = vma->vm_mm;
504 * hugepage directory entries are protected by mm->page_table_lock
505 * Use this instead of huge_pte_lockptr
507 ptl = &mm->page_table_lock;
510 ptep = hugepte_offset(hpd, address, pdshift);
511 if (pte_present(*ptep)) {
512 mask = (1UL << shift) - 1;
513 page = pte_page(*ptep);
514 page += ((address & mask) >> PAGE_SHIFT);
515 if (flags & FOLL_GET)
518 if (is_hugetlb_entry_migration(*ptep)) {
520 __migration_entry_wait(mm, ptep, ptl);
528 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
531 unsigned long __boundary = (addr + sz) & ~(sz-1);
532 return (__boundary - 1 < end - 1) ? __boundary : end;
535 int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
536 unsigned long end, int write, struct page **pages, int *nr)
539 unsigned long sz = 1UL << hugepd_shift(hugepd);
542 ptep = hugepte_offset(hugepd, addr, pdshift);
544 next = hugepte_addr_end(addr, end, sz);
545 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
547 } while (ptep++, addr = next, addr != end);
552 #ifdef CONFIG_PPC_MM_SLICES
553 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
554 unsigned long len, unsigned long pgoff,
557 struct hstate *hstate = hstate_file(file);
558 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
560 #ifdef CONFIG_PPC_RADIX_MMU
562 return radix__hugetlb_get_unmapped_area(file, addr, len,
565 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
569 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
571 #ifdef CONFIG_PPC_MM_SLICES
572 /* With radix we don't use slice, so derive it from vma*/
573 if (!radix_enabled()) {
574 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
576 return 1UL << mmu_psize_to_shift(psize);
579 return vma_kernel_pagesize(vma);
582 static inline bool is_power_of_4(unsigned long x)
584 if (is_power_of_2(x))
585 return (__ilog2(x) % 2) ? false : true;
589 static int __init add_huge_page_size(unsigned long long size)
591 int shift = __ffs(size);
594 /* Check that it is a page size supported by the hardware and
595 * that it fits within pagetable and slice limits. */
596 if (size <= PAGE_SIZE)
598 #if defined(CONFIG_PPC_FSL_BOOK3E)
599 if (!is_power_of_4(size))
601 #elif !defined(CONFIG_PPC_8xx)
602 if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
606 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
609 #ifdef CONFIG_PPC_BOOK3S_64
611 * We need to make sure that for different page sizes reported by
612 * firmware we only add hugetlb support for page sizes that can be
613 * supported by linux page table layout.
618 if (radix_enabled()) {
619 if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
622 if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
627 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
629 /* Return if huge page size has already been setup */
630 if (size_to_hstate(size))
633 hugetlb_add_hstate(shift - PAGE_SHIFT);
638 static int __init hugepage_setup_sz(char *str)
640 unsigned long long size;
642 size = memparse(str, &str);
644 if (add_huge_page_size(size) != 0) {
646 pr_err("Invalid huge page size specified(%llu)\n", size);
651 __setup("hugepagesz=", hugepage_setup_sz);
653 struct kmem_cache *hugepte_cache;
654 static int __init hugetlbpage_init(void)
658 if (hugetlb_disabled) {
659 pr_info("HugeTLB support is disabled!\n");
663 #if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
664 if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
667 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
671 if (!mmu_psize_defs[psize].shift)
674 shift = mmu_psize_to_shift(psize);
676 #ifdef CONFIG_PPC_BOOK3S_64
677 if (shift > PGDIR_SHIFT)
679 else if (shift > PUD_SHIFT)
680 pdshift = PGDIR_SHIFT;
681 else if (shift > PMD_SHIFT)
686 if (shift < PUD_SHIFT)
688 else if (shift < PGDIR_SHIFT)
691 pdshift = PGDIR_SHIFT;
694 if (add_huge_page_size(1ULL << shift) < 0)
697 * if we have pdshift and shift value same, we don't
698 * use pgt cache for hugepd.
701 pgtable_cache_add(pdshift - shift, NULL);
702 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
703 else if (!hugepte_cache) {
705 * Create a kmem cache for hugeptes. The bottom bits in
706 * the pte have size information encoded in them, so
707 * align them to allow this
709 hugepte_cache = kmem_cache_create("hugepte-cache",
711 HUGEPD_SHIFT_MASK + 1,
713 if (hugepte_cache == NULL)
714 panic("%s: Unable to create kmem cache "
715 "for hugeptes\n", __func__);
721 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
722 /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
723 if (mmu_psize_defs[MMU_PAGE_4M].shift)
724 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
725 else if (mmu_psize_defs[MMU_PAGE_512K].shift)
726 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
728 /* Set default large page size. Currently, we pick 16M or 1M
729 * depending on what is available
731 if (mmu_psize_defs[MMU_PAGE_16M].shift)
732 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
733 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
734 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
735 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
736 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
741 arch_initcall(hugetlbpage_init);
743 void flush_dcache_icache_hugepage(struct page *page)
748 BUG_ON(!PageCompound(page));
750 for (i = 0; i < (1UL << compound_order(page)); i++) {
751 if (!PageHighMem(page)) {
752 __flush_dcache_icache(page_address(page+i));
754 start = kmap_atomic(page+i);
755 __flush_dcache_icache(start);
756 kunmap_atomic(start);
761 #endif /* CONFIG_HUGETLB_PAGE */
764 * We have 4 cases for pgds and pmds:
765 * (1) invalid (all zeroes)
766 * (2) pointer to next table, as normal; bottom 6 bits == 0
767 * (3) leaf pte for huge page _PAGE_PTE set
768 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
770 * So long as we atomically load page table pointers we are safe against teardown,
771 * we can follow the address down to the the page and take a ref on it.
772 * This function need to be called with interrupts disabled. We use this variant
773 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
775 pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
776 bool *is_thp, unsigned *hpage_shift)
782 hugepd_t *hpdp = NULL;
783 unsigned pdshift = PGDIR_SHIFT;
791 pgdp = pgdir + pgd_index(ea);
792 pgd = READ_ONCE(*pgdp);
794 * Always operate on the local stack value. This make sure the
795 * value don't get updated by a parallel THP split/collapse,
796 * page fault or a page unmap. The return pte_t * is still not
797 * stable. So should be checked there for above conditions.
801 else if (pgd_huge(pgd)) {
802 ret_pte = (pte_t *) pgdp;
804 } else if (is_hugepd(__hugepd(pgd_val(pgd))))
805 hpdp = (hugepd_t *)&pgd;
808 * Even if we end up with an unmap, the pgtable will not
809 * be freed, because we do an rcu free and here we are
813 pudp = pud_offset(&pgd, ea);
814 pud = READ_ONCE(*pudp);
818 else if (pud_huge(pud)) {
819 ret_pte = (pte_t *) pudp;
821 } else if (is_hugepd(__hugepd(pud_val(pud))))
822 hpdp = (hugepd_t *)&pud;
825 pmdp = pmd_offset(&pud, ea);
826 pmd = READ_ONCE(*pmdp);
828 * A hugepage collapse is captured by pmd_none, because
829 * it mark the pmd none and do a hpte invalidate.
834 if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
837 ret_pte = (pte_t *) pmdp;
842 ret_pte = (pte_t *) pmdp;
844 } else if (is_hugepd(__hugepd(pmd_val(pmd))))
845 hpdp = (hugepd_t *)&pmd;
847 return pte_offset_kernel(&pmd, ea);
853 ret_pte = hugepte_offset(*hpdp, ea, pdshift);
854 pdshift = hugepd_shift(*hpdp);
857 *hpage_shift = pdshift;
860 EXPORT_SYMBOL_GPL(__find_linux_pte);
862 int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
863 unsigned long end, int write, struct page **pages, int *nr)
865 unsigned long pte_end;
866 struct page *head, *page;
870 pte_end = (addr + sz) & ~(sz-1);
874 pte = READ_ONCE(*ptep);
876 if (!pte_access_permitted(pte, write))
879 /* hugepages are never "special" */
880 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
883 head = pte_page(pte);
885 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
887 VM_BUG_ON(compound_head(page) != head);
892 } while (addr += PAGE_SIZE, addr != end);
894 if (!page_cache_add_speculative(head, refs)) {
899 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
900 /* Could be optimized better */