2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/of_fdt.h>
16 #include <linux/memblock.h>
17 #include <linux/bootmem.h>
18 #include <linux/moduleparam.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
22 #include <asm/setup.h>
24 #define PAGE_SHIFT_64K 16
25 #define PAGE_SHIFT_16M 24
26 #define PAGE_SHIFT_16G 34
28 unsigned int HPAGE_SHIFT;
31 * Tracks gpages after the device tree is scanned and before the
32 * huge_boot_pages list is ready. On non-Freescale implementations, this is
33 * just used to track 16G pages and so is a single array. FSL-based
34 * implementations may have more than one gpage size, so we need multiple
37 #ifdef CONFIG_PPC_FSL_BOOK3E
38 #define MAX_NUMBER_GPAGES 128
40 u64 gpage_list[MAX_NUMBER_GPAGES];
41 unsigned int nr_gpages;
43 static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
45 #define MAX_NUMBER_GPAGES 1024
46 static u64 gpage_freearray[MAX_NUMBER_GPAGES];
47 static unsigned nr_gpages;
50 static inline int shift_to_mmu_psize(unsigned int shift)
54 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
55 if (mmu_psize_defs[psize].shift == shift)
60 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
62 if (mmu_psize_defs[mmu_psize].shift)
63 return mmu_psize_defs[mmu_psize].shift;
67 #define hugepd_none(hpd) ((hpd).pd == 0)
69 pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
74 hugepd_t *hpdp = NULL;
75 unsigned pdshift = PGDIR_SHIFT;
80 pg = pgdir + pgd_index(ea);
82 hpdp = (hugepd_t *)pg;
83 } else if (!pgd_none(*pg)) {
85 pu = pud_offset(pg, ea);
87 hpdp = (hugepd_t *)pu;
88 else if (!pud_none(*pu)) {
90 pm = pmd_offset(pu, ea);
92 hpdp = (hugepd_t *)pm;
93 else if (!pmd_none(*pm)) {
94 return pte_offset_kernel(pm, ea);
103 *shift = hugepd_shift(*hpdp);
104 return hugepte_offset(hpdp, ea, pdshift);
107 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
109 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
112 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
113 unsigned long address, unsigned pdshift, unsigned pshift)
115 struct kmem_cache *cachep;
118 #ifdef CONFIG_PPC_FSL_BOOK3E
120 int num_hugepd = 1 << (pshift - pdshift);
121 cachep = hugepte_cache;
123 cachep = PGT_CACHE(pdshift - pshift);
126 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
128 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
129 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
134 spin_lock(&mm->page_table_lock);
135 #ifdef CONFIG_PPC_FSL_BOOK3E
137 * We have multiple higher-level entries that point to the same
138 * actual pte location. Fill in each as we go and backtrack on error.
139 * We need all of these so the DTLB pgtable walk code can find the
140 * right higher-level entry without knowing if it's a hugepage or not.
142 for (i = 0; i < num_hugepd; i++, hpdp++) {
143 if (unlikely(!hugepd_none(*hpdp)))
146 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
148 /* If we bailed from the for loop early, an error occurred, clean up */
149 if (i < num_hugepd) {
150 for (i = i - 1 ; i >= 0; i--, hpdp--)
152 kmem_cache_free(cachep, new);
155 if (!hugepd_none(*hpdp))
156 kmem_cache_free(cachep, new);
158 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
160 spin_unlock(&mm->page_table_lock);
165 * These macros define how to determine which level of the page table holds
168 #ifdef CONFIG_PPC_FSL_BOOK3E
169 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
170 #define HUGEPD_PUD_SHIFT PUD_SHIFT
172 #define HUGEPD_PGD_SHIFT PUD_SHIFT
173 #define HUGEPD_PUD_SHIFT PMD_SHIFT
176 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
181 hugepd_t *hpdp = NULL;
182 unsigned pshift = __ffs(sz);
183 unsigned pdshift = PGDIR_SHIFT;
187 pg = pgd_offset(mm, addr);
189 if (pshift >= HUGEPD_PGD_SHIFT) {
190 hpdp = (hugepd_t *)pg;
193 pu = pud_alloc(mm, pg, addr);
194 if (pshift >= HUGEPD_PUD_SHIFT) {
195 hpdp = (hugepd_t *)pu;
198 pm = pmd_alloc(mm, pu, addr);
199 hpdp = (hugepd_t *)pm;
206 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
208 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
211 return hugepte_offset(hpdp, addr, pdshift);
214 #ifdef CONFIG_PPC_FSL_BOOK3E
215 /* Build list of addresses of gigantic pages. This function is used in early
216 * boot before the buddy or bootmem allocator is setup.
218 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
220 unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
226 gpage_freearray[idx].nr_gpages = number_of_pages;
228 for (i = 0; i < number_of_pages; i++) {
229 gpage_freearray[idx].gpage_list[i] = addr;
235 * Moves the gigantic page addresses from the temporary list to the
236 * huge_boot_pages list.
238 int alloc_bootmem_huge_page(struct hstate *hstate)
240 struct huge_bootmem_page *m;
241 int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
242 int nr_gpages = gpage_freearray[idx].nr_gpages;
247 #ifdef CONFIG_HIGHMEM
249 * If gpages can be in highmem we can't use the trick of storing the
250 * data structure in the page; allocate space for this
252 m = alloc_bootmem(sizeof(struct huge_bootmem_page));
253 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
255 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
258 list_add(&m->list, &huge_boot_pages);
259 gpage_freearray[idx].nr_gpages = nr_gpages;
260 gpage_freearray[idx].gpage_list[nr_gpages] = 0;
266 * Scan the command line hugepagesz= options for gigantic pages; store those in
267 * a list that we use to allocate the memory once all options are parsed.
270 unsigned long gpage_npages[MMU_PAGE_COUNT];
272 static int __init do_gpage_early_setup(char *param, char *val)
274 static phys_addr_t size;
275 unsigned long npages;
278 * The hugepagesz and hugepages cmdline options are interleaved. We
279 * use the size variable to keep track of whether or not this was done
280 * properly and skip over instances where it is incorrect. Other
281 * command-line parsing code will issue warnings, so we don't need to.
284 if ((strcmp(param, "default_hugepagesz") == 0) ||
285 (strcmp(param, "hugepagesz") == 0)) {
286 size = memparse(val, NULL);
287 } else if (strcmp(param, "hugepages") == 0) {
289 if (sscanf(val, "%lu", &npages) <= 0)
291 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
300 * This function allocates physical space for pages that are larger than the
301 * buddy allocator can handle. We want to allocate these in highmem because
302 * the amount of lowmem is limited. This means that this function MUST be
303 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
304 * allocate to grab highmem.
306 void __init reserve_hugetlb_gpages(void)
308 static __initdata char cmdline[COMMAND_LINE_SIZE];
309 phys_addr_t size, base;
312 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
313 parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup);
316 * Walk gpage list in reverse, allocating larger page sizes first.
317 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
318 * When we reach the point in the list where pages are no longer
319 * considered gpages, we're done.
321 for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
322 if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
324 else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
327 size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
328 base = memblock_alloc_base(size * gpage_npages[i], size,
329 MEMBLOCK_ALLOC_ANYWHERE);
330 add_gpage(base, size, gpage_npages[i]);
334 #else /* !PPC_FSL_BOOK3E */
336 /* Build list of addresses of gigantic pages. This function is used in early
337 * boot before the buddy or bootmem allocator is setup.
339 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
343 while (number_of_pages > 0) {
344 gpage_freearray[nr_gpages] = addr;
351 /* Moves the gigantic page addresses from the temporary list to the
352 * huge_boot_pages list.
354 int alloc_bootmem_huge_page(struct hstate *hstate)
356 struct huge_bootmem_page *m;
359 m = phys_to_virt(gpage_freearray[--nr_gpages]);
360 gpage_freearray[nr_gpages] = 0;
361 list_add(&m->list, &huge_boot_pages);
367 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
372 #ifdef CONFIG_PPC_FSL_BOOK3E
373 #define HUGEPD_FREELIST_SIZE \
374 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
376 struct hugepd_freelist {
382 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
384 static void hugepd_free_rcu_callback(struct rcu_head *head)
386 struct hugepd_freelist *batch =
387 container_of(head, struct hugepd_freelist, rcu);
390 for (i = 0; i < batch->index; i++)
391 kmem_cache_free(hugepte_cache, batch->ptes[i]);
393 free_page((unsigned long)batch);
396 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
398 struct hugepd_freelist **batchp;
400 batchp = &__get_cpu_var(hugepd_freelist_cur);
402 if (atomic_read(&tlb->mm->mm_users) < 2 ||
403 cpumask_equal(mm_cpumask(tlb->mm),
404 cpumask_of(smp_processor_id()))) {
405 kmem_cache_free(hugepte_cache, hugepte);
409 if (*batchp == NULL) {
410 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
411 (*batchp)->index = 0;
414 (*batchp)->ptes[(*batchp)->index++] = hugepte;
415 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
416 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
422 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
423 unsigned long start, unsigned long end,
424 unsigned long floor, unsigned long ceiling)
426 pte_t *hugepte = hugepd_page(*hpdp);
429 unsigned long pdmask = ~((1UL << pdshift) - 1);
430 unsigned int num_hugepd = 1;
432 #ifdef CONFIG_PPC_FSL_BOOK3E
433 /* Note: On fsl the hpdp may be the first of several */
434 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
436 unsigned int shift = hugepd_shift(*hpdp);
447 if (end - 1 > ceiling - 1)
450 for (i = 0; i < num_hugepd; i++, hpdp++)
455 #ifdef CONFIG_PPC_FSL_BOOK3E
456 hugepd_free(tlb, hugepte);
458 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
462 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
463 unsigned long addr, unsigned long end,
464 unsigned long floor, unsigned long ceiling)
472 pmd = pmd_offset(pud, addr);
473 next = pmd_addr_end(addr, end);
476 #ifdef CONFIG_PPC_FSL_BOOK3E
478 * Increment next by the size of the huge mapping since
479 * there may be more than one entry at this level for a
480 * single hugepage, but all of them point to
481 * the same kmem cache that holds the hugepte.
483 next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
485 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
486 addr, next, floor, ceiling);
487 } while (addr = next, addr != end);
497 if (end - 1 > ceiling - 1)
500 pmd = pmd_offset(pud, start);
502 pmd_free_tlb(tlb, pmd, start);
505 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
506 unsigned long addr, unsigned long end,
507 unsigned long floor, unsigned long ceiling)
515 pud = pud_offset(pgd, addr);
516 next = pud_addr_end(addr, end);
517 if (!is_hugepd(pud)) {
518 if (pud_none_or_clear_bad(pud))
520 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
523 #ifdef CONFIG_PPC_FSL_BOOK3E
525 * Increment next by the size of the huge mapping since
526 * there may be more than one entry at this level for a
527 * single hugepage, but all of them point to
528 * the same kmem cache that holds the hugepte.
530 next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
532 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
533 addr, next, floor, ceiling);
535 } while (addr = next, addr != end);
541 ceiling &= PGDIR_MASK;
545 if (end - 1 > ceiling - 1)
548 pud = pud_offset(pgd, start);
550 pud_free_tlb(tlb, pud, start);
554 * This function frees user-level page tables of a process.
556 * Must be called with pagetable lock held.
558 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
559 unsigned long addr, unsigned long end,
560 unsigned long floor, unsigned long ceiling)
566 * Because there are a number of different possible pagetable
567 * layouts for hugepage ranges, we limit knowledge of how
568 * things should be laid out to the allocation path
569 * (huge_pte_alloc(), above). Everything else works out the
570 * structure as it goes from information in the hugepd
571 * pointers. That means that we can't here use the
572 * optimization used in the normal page free_pgd_range(), of
573 * checking whether we're actually covering a large enough
574 * range to have to do anything at the top level of the walk
575 * instead of at the bottom.
577 * To make sense of this, you should probably go read the big
578 * block comment at the top of the normal free_pgd_range(),
583 next = pgd_addr_end(addr, end);
584 pgd = pgd_offset(tlb->mm, addr);
585 if (!is_hugepd(pgd)) {
586 if (pgd_none_or_clear_bad(pgd))
588 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
590 #ifdef CONFIG_PPC_FSL_BOOK3E
592 * Increment next by the size of the huge mapping since
593 * there may be more than one entry at the pgd level
594 * for a single hugepage, but all of them point to the
595 * same kmem cache that holds the hugepte.
597 next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
599 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
600 addr, next, floor, ceiling);
602 } while (addr = next, addr != end);
606 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
613 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
615 /* Verify it is a huge page else bail. */
617 return ERR_PTR(-EINVAL);
619 mask = (1UL << shift) - 1;
620 page = pte_page(*ptep);
622 page += (address & mask) / PAGE_SIZE;
627 int pmd_huge(pmd_t pmd)
632 int pud_huge(pud_t pud)
638 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
639 pmd_t *pmd, int write)
645 static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
646 unsigned long end, int write, struct page **pages, int *nr)
649 unsigned long pte_end;
650 struct page *head, *page, *tail;
654 pte_end = (addr + sz) & ~(sz-1);
659 mask = _PAGE_PRESENT | _PAGE_USER;
663 if ((pte_val(pte) & mask) != mask)
666 /* hugepages are never "special" */
667 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
670 head = pte_page(pte);
672 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
675 VM_BUG_ON(compound_head(page) != head);
680 } while (addr += PAGE_SIZE, addr != end);
682 if (!page_cache_add_speculative(head, refs)) {
687 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
688 /* Could be optimized better */
696 * Any tail page need their mapcount reference taken before we
701 get_huge_page_tail(tail);
708 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
711 unsigned long __boundary = (addr + sz) & ~(sz-1);
712 return (__boundary - 1 < end - 1) ? __boundary : end;
715 int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
716 unsigned long addr, unsigned long end,
717 int write, struct page **pages, int *nr)
720 unsigned long sz = 1UL << hugepd_shift(*hugepd);
723 ptep = hugepte_offset(hugepd, addr, pdshift);
725 next = hugepte_addr_end(addr, end, sz);
726 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
728 } while (ptep++, addr = next, addr != end);
733 #ifdef CONFIG_PPC_MM_SLICES
734 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
735 unsigned long len, unsigned long pgoff,
738 struct hstate *hstate = hstate_file(file);
739 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
741 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
745 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
747 #ifdef CONFIG_PPC_MM_SLICES
748 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
750 return 1UL << mmu_psize_to_shift(psize);
752 if (!is_vm_hugetlb_page(vma))
755 return huge_page_size(hstate_vma(vma));
759 static inline bool is_power_of_4(unsigned long x)
761 if (is_power_of_2(x))
762 return (__ilog2(x) % 2) ? false : true;
766 static int __init add_huge_page_size(unsigned long long size)
768 int shift = __ffs(size);
771 /* Check that it is a page size supported by the hardware and
772 * that it fits within pagetable and slice limits. */
773 #ifdef CONFIG_PPC_FSL_BOOK3E
774 if ((size < PAGE_SIZE) || !is_power_of_4(size))
777 if (!is_power_of_2(size)
778 || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
782 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
785 #ifdef CONFIG_SPU_FS_64K_LS
786 /* Disable support for 64K huge pages when 64K SPU local store
787 * support is enabled as the current implementation conflicts.
789 if (shift == PAGE_SHIFT_64K)
791 #endif /* CONFIG_SPU_FS_64K_LS */
793 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
795 /* Return if huge page size has already been setup */
796 if (size_to_hstate(size))
799 hugetlb_add_hstate(shift - PAGE_SHIFT);
804 static int __init hugepage_setup_sz(char *str)
806 unsigned long long size;
808 size = memparse(str, &str);
810 if (add_huge_page_size(size) != 0)
811 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
815 __setup("hugepagesz=", hugepage_setup_sz);
817 #ifdef CONFIG_PPC_FSL_BOOK3E
818 struct kmem_cache *hugepte_cache;
819 static int __init hugetlbpage_init(void)
823 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
826 if (!mmu_psize_defs[psize].shift)
829 shift = mmu_psize_to_shift(psize);
831 /* Don't treat normal page sizes as huge... */
832 if (shift != PAGE_SHIFT)
833 if (add_huge_page_size(1ULL << shift) < 0)
838 * Create a kmem cache for hugeptes. The bottom bits in the pte have
839 * size information encoded in them, so align them to allow this
841 hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
842 HUGEPD_SHIFT_MASK + 1, 0, NULL);
843 if (hugepte_cache == NULL)
844 panic("%s: Unable to create kmem cache for hugeptes\n",
847 /* Default hpage size = 4M */
848 if (mmu_psize_defs[MMU_PAGE_4M].shift)
849 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
851 panic("%s: Unable to set default huge page size\n", __func__);
857 static int __init hugetlbpage_init(void)
861 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
864 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
868 if (!mmu_psize_defs[psize].shift)
871 shift = mmu_psize_to_shift(psize);
873 if (add_huge_page_size(1ULL << shift) < 0)
876 if (shift < PMD_SHIFT)
878 else if (shift < PUD_SHIFT)
881 pdshift = PGDIR_SHIFT;
883 pgtable_cache_add(pdshift - shift, NULL);
884 if (!PGT_CACHE(pdshift - shift))
885 panic("hugetlbpage_init(): could not create "
886 "pgtable cache for %d bit pagesize\n", shift);
889 /* Set default large page size. Currently, we pick 16M or 1M
890 * depending on what is available
892 if (mmu_psize_defs[MMU_PAGE_16M].shift)
893 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
894 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
895 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
900 module_init(hugetlbpage_init);
902 void flush_dcache_icache_hugepage(struct page *page)
907 BUG_ON(!PageCompound(page));
909 for (i = 0; i < (1UL << compound_order(page)); i++) {
910 if (!PageHighMem(page)) {
911 __flush_dcache_icache(page_address(page+i));
913 start = kmap_atomic(page+i);
914 __flush_dcache_icache(start);
915 kunmap_atomic(start);