powerpc/8xx: Manage 512k huge pages as standard pages.
[linux-2.6-microblaze.git] / arch / powerpc / mm / hugetlbpage.c
1 /*
2  * PPC Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2003 David Gibson, IBM Corporation.
5  * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
6  *
7  * Based on the IA-32 version:
8  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/io.h>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/moduleparam.h>
19 #include <linux/swap.h>
20 #include <linux/swapops.h>
21 #include <linux/kmemleak.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlb.h>
25 #include <asm/setup.h>
26 #include <asm/hugetlb.h>
27 #include <asm/pte-walk.h>
28
29 bool hugetlb_disabled = false;
30
31 #define hugepd_none(hpd)        (hpd_val(hpd) == 0)
32
33 #define PTE_T_ORDER     (__builtin_ffs(sizeof(pte_basic_t)) - \
34                          __builtin_ffs(sizeof(void *)))
35
36 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
37 {
38         /*
39          * Only called for hugetlbfs pages, hence can ignore THP and the
40          * irq disabled walk.
41          */
42         return __find_linux_pte(mm->pgd, addr, NULL, NULL);
43 }
44
45 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
46                            unsigned long address, unsigned int pdshift,
47                            unsigned int pshift, spinlock_t *ptl)
48 {
49         struct kmem_cache *cachep;
50         pte_t *new;
51         int i;
52         int num_hugepd;
53
54         if (pshift >= pdshift) {
55                 cachep = PGT_CACHE(PTE_T_ORDER);
56                 num_hugepd = 1 << (pshift - pdshift);
57                 new = NULL;
58         } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
59                 cachep = NULL;
60                 num_hugepd = 1;
61                 new = pte_alloc_one(mm);
62         } else {
63                 cachep = PGT_CACHE(pdshift - pshift);
64                 num_hugepd = 1;
65                 new = NULL;
66         }
67
68         if (!cachep && !new) {
69                 WARN_ONCE(1, "No page table cache created for hugetlb tables");
70                 return -ENOMEM;
71         }
72
73         if (cachep)
74                 new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
75
76         BUG_ON(pshift > HUGEPD_SHIFT_MASK);
77         BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
78
79         if (!new)
80                 return -ENOMEM;
81
82         /*
83          * Make sure other cpus find the hugepd set only after a
84          * properly initialized page table is visible to them.
85          * For more details look for comment in __pte_alloc().
86          */
87         smp_wmb();
88
89         spin_lock(ptl);
90         /*
91          * We have multiple higher-level entries that point to the same
92          * actual pte location.  Fill in each as we go and backtrack on error.
93          * We need all of these so the DTLB pgtable walk code can find the
94          * right higher-level entry without knowing if it's a hugepage or not.
95          */
96         for (i = 0; i < num_hugepd; i++, hpdp++) {
97                 if (unlikely(!hugepd_none(*hpdp)))
98                         break;
99                 hugepd_populate(hpdp, new, pshift);
100         }
101         /* If we bailed from the for loop early, an error occurred, clean up */
102         if (i < num_hugepd) {
103                 for (i = i - 1 ; i >= 0; i--, hpdp--)
104                         *hpdp = __hugepd(0);
105                 if (cachep)
106                         kmem_cache_free(cachep, new);
107                 else
108                         pte_free(mm, new);
109         } else {
110                 kmemleak_ignore(new);
111         }
112         spin_unlock(ptl);
113         return 0;
114 }
115
116 /*
117  * At this point we do the placement change only for BOOK3S 64. This would
118  * possibly work on other subarchs.
119  */
120 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
121 {
122         pgd_t *pg;
123         pud_t *pu;
124         pmd_t *pm;
125         hugepd_t *hpdp = NULL;
126         unsigned pshift = __ffs(sz);
127         unsigned pdshift = PGDIR_SHIFT;
128         spinlock_t *ptl;
129
130         addr &= ~(sz-1);
131         pg = pgd_offset(mm, addr);
132
133 #ifdef CONFIG_PPC_BOOK3S_64
134         if (pshift == PGDIR_SHIFT)
135                 /* 16GB huge page */
136                 return (pte_t *) pg;
137         else if (pshift > PUD_SHIFT) {
138                 /*
139                  * We need to use hugepd table
140                  */
141                 ptl = &mm->page_table_lock;
142                 hpdp = (hugepd_t *)pg;
143         } else {
144                 pdshift = PUD_SHIFT;
145                 pu = pud_alloc(mm, pg, addr);
146                 if (!pu)
147                         return NULL;
148                 if (pshift == PUD_SHIFT)
149                         return (pte_t *)pu;
150                 else if (pshift > PMD_SHIFT) {
151                         ptl = pud_lockptr(mm, pu);
152                         hpdp = (hugepd_t *)pu;
153                 } else {
154                         pdshift = PMD_SHIFT;
155                         pm = pmd_alloc(mm, pu, addr);
156                         if (!pm)
157                                 return NULL;
158                         if (pshift == PMD_SHIFT)
159                                 /* 16MB hugepage */
160                                 return (pte_t *)pm;
161                         else {
162                                 ptl = pmd_lockptr(mm, pm);
163                                 hpdp = (hugepd_t *)pm;
164                         }
165                 }
166         }
167 #else
168         if (pshift >= PGDIR_SHIFT) {
169                 ptl = &mm->page_table_lock;
170                 hpdp = (hugepd_t *)pg;
171         } else {
172                 pdshift = PUD_SHIFT;
173                 pu = pud_alloc(mm, pg, addr);
174                 if (!pu)
175                         return NULL;
176                 if (pshift >= PUD_SHIFT) {
177                         ptl = pud_lockptr(mm, pu);
178                         hpdp = (hugepd_t *)pu;
179                 } else {
180                         pdshift = PMD_SHIFT;
181                         pm = pmd_alloc(mm, pu, addr);
182                         if (!pm)
183                                 return NULL;
184                         ptl = pmd_lockptr(mm, pm);
185                         hpdp = (hugepd_t *)pm;
186                 }
187         }
188 #endif
189         if (!hpdp)
190                 return NULL;
191
192         if (IS_ENABLED(CONFIG_PPC_8xx) && sz == SZ_512K)
193                 return pte_alloc_map(mm, (pmd_t *)hpdp, addr);
194
195         BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
196
197         if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
198                                                   pdshift, pshift, ptl))
199                 return NULL;
200
201         return hugepte_offset(*hpdp, addr, pdshift);
202 }
203
204 #ifdef CONFIG_PPC_BOOK3S_64
205 /*
206  * Tracks gpages after the device tree is scanned and before the
207  * huge_boot_pages list is ready on pseries.
208  */
209 #define MAX_NUMBER_GPAGES       1024
210 __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
211 __initdata static unsigned nr_gpages;
212
213 /*
214  * Build list of addresses of gigantic pages.  This function is used in early
215  * boot before the buddy allocator is setup.
216  */
217 void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
218 {
219         if (!addr)
220                 return;
221         while (number_of_pages > 0) {
222                 gpage_freearray[nr_gpages] = addr;
223                 nr_gpages++;
224                 number_of_pages--;
225                 addr += page_size;
226         }
227 }
228
229 int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
230 {
231         struct huge_bootmem_page *m;
232         if (nr_gpages == 0)
233                 return 0;
234         m = phys_to_virt(gpage_freearray[--nr_gpages]);
235         gpage_freearray[nr_gpages] = 0;
236         list_add(&m->list, &huge_boot_pages);
237         m->hstate = hstate;
238         return 1;
239 }
240 #endif
241
242
243 int __init alloc_bootmem_huge_page(struct hstate *h)
244 {
245
246 #ifdef CONFIG_PPC_BOOK3S_64
247         if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
248                 return pseries_alloc_bootmem_huge_page(h);
249 #endif
250         return __alloc_bootmem_huge_page(h);
251 }
252
253 #ifndef CONFIG_PPC_BOOK3S_64
254 #define HUGEPD_FREELIST_SIZE \
255         ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
256
257 struct hugepd_freelist {
258         struct rcu_head rcu;
259         unsigned int index;
260         void *ptes[];
261 };
262
263 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
264
265 static void hugepd_free_rcu_callback(struct rcu_head *head)
266 {
267         struct hugepd_freelist *batch =
268                 container_of(head, struct hugepd_freelist, rcu);
269         unsigned int i;
270
271         for (i = 0; i < batch->index; i++)
272                 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
273
274         free_page((unsigned long)batch);
275 }
276
277 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
278 {
279         struct hugepd_freelist **batchp;
280
281         batchp = &get_cpu_var(hugepd_freelist_cur);
282
283         if (atomic_read(&tlb->mm->mm_users) < 2 ||
284             mm_is_thread_local(tlb->mm)) {
285                 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
286                 put_cpu_var(hugepd_freelist_cur);
287                 return;
288         }
289
290         if (*batchp == NULL) {
291                 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
292                 (*batchp)->index = 0;
293         }
294
295         (*batchp)->ptes[(*batchp)->index++] = hugepte;
296         if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
297                 call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
298                 *batchp = NULL;
299         }
300         put_cpu_var(hugepd_freelist_cur);
301 }
302 #else
303 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
304 #endif
305
306 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
307                               unsigned long start, unsigned long end,
308                               unsigned long floor, unsigned long ceiling)
309 {
310         pte_t *hugepte = hugepd_page(*hpdp);
311         int i;
312
313         unsigned long pdmask = ~((1UL << pdshift) - 1);
314         unsigned int num_hugepd = 1;
315         unsigned int shift = hugepd_shift(*hpdp);
316
317         /* Note: On fsl the hpdp may be the first of several */
318         if (shift > pdshift)
319                 num_hugepd = 1 << (shift - pdshift);
320
321         start &= pdmask;
322         if (start < floor)
323                 return;
324         if (ceiling) {
325                 ceiling &= pdmask;
326                 if (! ceiling)
327                         return;
328         }
329         if (end - 1 > ceiling - 1)
330                 return;
331
332         for (i = 0; i < num_hugepd; i++, hpdp++)
333                 *hpdp = __hugepd(0);
334
335         if (shift >= pdshift)
336                 hugepd_free(tlb, hugepte);
337         else
338                 pgtable_free_tlb(tlb, hugepte,
339                                  get_hugepd_cache_index(pdshift - shift));
340 }
341
342 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr)
343 {
344         pgtable_t token = pmd_pgtable(*pmd);
345
346         pmd_clear(pmd);
347         pte_free_tlb(tlb, token, addr);
348         mm_dec_nr_ptes(tlb->mm);
349 }
350
351 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
352                                    unsigned long addr, unsigned long end,
353                                    unsigned long floor, unsigned long ceiling)
354 {
355         pmd_t *pmd;
356         unsigned long next;
357         unsigned long start;
358
359         start = addr;
360         do {
361                 unsigned long more;
362
363                 pmd = pmd_offset(pud, addr);
364                 next = pmd_addr_end(addr, end);
365                 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
366                         if (pmd_none_or_clear_bad(pmd))
367                                 continue;
368
369                         /*
370                          * if it is not hugepd pointer, we should already find
371                          * it cleared.
372                          */
373                         WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
374
375                         hugetlb_free_pte_range(tlb, pmd, addr);
376
377                         continue;
378                 }
379                 /*
380                  * Increment next by the size of the huge mapping since
381                  * there may be more than one entry at this level for a
382                  * single hugepage, but all of them point to
383                  * the same kmem cache that holds the hugepte.
384                  */
385                 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
386                 if (more > next)
387                         next = more;
388
389                 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
390                                   addr, next, floor, ceiling);
391         } while (addr = next, addr != end);
392
393         start &= PUD_MASK;
394         if (start < floor)
395                 return;
396         if (ceiling) {
397                 ceiling &= PUD_MASK;
398                 if (!ceiling)
399                         return;
400         }
401         if (end - 1 > ceiling - 1)
402                 return;
403
404         pmd = pmd_offset(pud, start);
405         pud_clear(pud);
406         pmd_free_tlb(tlb, pmd, start);
407         mm_dec_nr_pmds(tlb->mm);
408 }
409
410 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
411                                    unsigned long addr, unsigned long end,
412                                    unsigned long floor, unsigned long ceiling)
413 {
414         pud_t *pud;
415         unsigned long next;
416         unsigned long start;
417
418         start = addr;
419         do {
420                 pud = pud_offset(pgd, addr);
421                 next = pud_addr_end(addr, end);
422                 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
423                         if (pud_none_or_clear_bad(pud))
424                                 continue;
425                         hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
426                                                ceiling);
427                 } else {
428                         unsigned long more;
429                         /*
430                          * Increment next by the size of the huge mapping since
431                          * there may be more than one entry at this level for a
432                          * single hugepage, but all of them point to
433                          * the same kmem cache that holds the hugepte.
434                          */
435                         more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
436                         if (more > next)
437                                 next = more;
438
439                         free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
440                                           addr, next, floor, ceiling);
441                 }
442         } while (addr = next, addr != end);
443
444         start &= PGDIR_MASK;
445         if (start < floor)
446                 return;
447         if (ceiling) {
448                 ceiling &= PGDIR_MASK;
449                 if (!ceiling)
450                         return;
451         }
452         if (end - 1 > ceiling - 1)
453                 return;
454
455         pud = pud_offset(pgd, start);
456         pgd_clear(pgd);
457         pud_free_tlb(tlb, pud, start);
458         mm_dec_nr_puds(tlb->mm);
459 }
460
461 /*
462  * This function frees user-level page tables of a process.
463  */
464 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
465                             unsigned long addr, unsigned long end,
466                             unsigned long floor, unsigned long ceiling)
467 {
468         pgd_t *pgd;
469         unsigned long next;
470
471         /*
472          * Because there are a number of different possible pagetable
473          * layouts for hugepage ranges, we limit knowledge of how
474          * things should be laid out to the allocation path
475          * (huge_pte_alloc(), above).  Everything else works out the
476          * structure as it goes from information in the hugepd
477          * pointers.  That means that we can't here use the
478          * optimization used in the normal page free_pgd_range(), of
479          * checking whether we're actually covering a large enough
480          * range to have to do anything at the top level of the walk
481          * instead of at the bottom.
482          *
483          * To make sense of this, you should probably go read the big
484          * block comment at the top of the normal free_pgd_range(),
485          * too.
486          */
487
488         do {
489                 next = pgd_addr_end(addr, end);
490                 pgd = pgd_offset(tlb->mm, addr);
491                 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
492                         if (pgd_none_or_clear_bad(pgd))
493                                 continue;
494                         hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
495                 } else {
496                         unsigned long more;
497                         /*
498                          * Increment next by the size of the huge mapping since
499                          * there may be more than one entry at the pgd level
500                          * for a single hugepage, but all of them point to the
501                          * same kmem cache that holds the hugepte.
502                          */
503                         more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
504                         if (more > next)
505                                 next = more;
506
507                         free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
508                                           addr, next, floor, ceiling);
509                 }
510         } while (addr = next, addr != end);
511 }
512
513 struct page *follow_huge_pd(struct vm_area_struct *vma,
514                             unsigned long address, hugepd_t hpd,
515                             int flags, int pdshift)
516 {
517         pte_t *ptep;
518         spinlock_t *ptl;
519         struct page *page = NULL;
520         unsigned long mask;
521         int shift = hugepd_shift(hpd);
522         struct mm_struct *mm = vma->vm_mm;
523
524 retry:
525         /*
526          * hugepage directory entries are protected by mm->page_table_lock
527          * Use this instead of huge_pte_lockptr
528          */
529         ptl = &mm->page_table_lock;
530         spin_lock(ptl);
531
532         ptep = hugepte_offset(hpd, address, pdshift);
533         if (pte_present(*ptep)) {
534                 mask = (1UL << shift) - 1;
535                 page = pte_page(*ptep);
536                 page += ((address & mask) >> PAGE_SHIFT);
537                 if (flags & FOLL_GET)
538                         get_page(page);
539         } else {
540                 if (is_hugetlb_entry_migration(*ptep)) {
541                         spin_unlock(ptl);
542                         __migration_entry_wait(mm, ptep, ptl);
543                         goto retry;
544                 }
545         }
546         spin_unlock(ptl);
547         return page;
548 }
549
550 #ifdef CONFIG_PPC_MM_SLICES
551 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
552                                         unsigned long len, unsigned long pgoff,
553                                         unsigned long flags)
554 {
555         struct hstate *hstate = hstate_file(file);
556         int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
557
558 #ifdef CONFIG_PPC_RADIX_MMU
559         if (radix_enabled())
560                 return radix__hugetlb_get_unmapped_area(file, addr, len,
561                                                        pgoff, flags);
562 #endif
563         return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
564 }
565 #endif
566
567 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
568 {
569         /* With radix we don't use slice, so derive it from vma*/
570         if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
571                 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
572
573                 return 1UL << mmu_psize_to_shift(psize);
574         }
575         return vma_kernel_pagesize(vma);
576 }
577
578 static int __init add_huge_page_size(unsigned long long size)
579 {
580         int shift = __ffs(size);
581         int mmu_psize;
582
583         /* Check that it is a page size supported by the hardware and
584          * that it fits within pagetable and slice limits. */
585         if (size <= PAGE_SIZE || !is_power_of_2(size))
586                 return -EINVAL;
587
588         mmu_psize = check_and_get_huge_psize(shift);
589         if (mmu_psize < 0)
590                 return -EINVAL;
591
592         BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
593
594         /* Return if huge page size has already been setup */
595         if (size_to_hstate(size))
596                 return 0;
597
598         hugetlb_add_hstate(shift - PAGE_SHIFT);
599
600         return 0;
601 }
602
603 static int __init hugepage_setup_sz(char *str)
604 {
605         unsigned long long size;
606
607         size = memparse(str, &str);
608
609         if (add_huge_page_size(size) != 0) {
610                 hugetlb_bad_size();
611                 pr_err("Invalid huge page size specified(%llu)\n", size);
612         }
613
614         return 1;
615 }
616 __setup("hugepagesz=", hugepage_setup_sz);
617
618 static int __init hugetlbpage_init(void)
619 {
620         bool configured = false;
621         int psize;
622
623         if (hugetlb_disabled) {
624                 pr_info("HugeTLB support is disabled!\n");
625                 return 0;
626         }
627
628         if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
629             !mmu_has_feature(MMU_FTR_16M_PAGE))
630                 return -ENODEV;
631
632         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
633                 unsigned shift;
634                 unsigned pdshift;
635
636                 if (!mmu_psize_defs[psize].shift)
637                         continue;
638
639                 shift = mmu_psize_to_shift(psize);
640
641 #ifdef CONFIG_PPC_BOOK3S_64
642                 if (shift > PGDIR_SHIFT)
643                         continue;
644                 else if (shift > PUD_SHIFT)
645                         pdshift = PGDIR_SHIFT;
646                 else if (shift > PMD_SHIFT)
647                         pdshift = PUD_SHIFT;
648                 else
649                         pdshift = PMD_SHIFT;
650 #else
651                 if (shift < PUD_SHIFT)
652                         pdshift = PMD_SHIFT;
653                 else if (shift < PGDIR_SHIFT)
654                         pdshift = PUD_SHIFT;
655                 else
656                         pdshift = PGDIR_SHIFT;
657 #endif
658
659                 if (add_huge_page_size(1ULL << shift) < 0)
660                         continue;
661                 /*
662                  * if we have pdshift and shift value same, we don't
663                  * use pgt cache for hugepd.
664                  */
665                 if (pdshift > shift) {
666                         if (!IS_ENABLED(CONFIG_PPC_8xx))
667                                 pgtable_cache_add(pdshift - shift);
668                 } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
669                            IS_ENABLED(CONFIG_PPC_8xx)) {
670                         pgtable_cache_add(PTE_T_ORDER);
671                 }
672
673                 configured = true;
674         }
675
676         if (configured) {
677                 if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
678                         hugetlbpage_init_default();
679         } else
680                 pr_info("Failed to initialize. Disabling HugeTLB");
681
682         return 0;
683 }
684
685 arch_initcall(hugetlbpage_init);
686
687 void flush_dcache_icache_hugepage(struct page *page)
688 {
689         int i;
690         void *start;
691
692         BUG_ON(!PageCompound(page));
693
694         for (i = 0; i < compound_nr(page); i++) {
695                 if (!PageHighMem(page)) {
696                         __flush_dcache_icache(page_address(page+i));
697                 } else {
698                         start = kmap_atomic(page+i);
699                         __flush_dcache_icache(start);
700                         kunmap_atomic(start);
701                 }
702         }
703 }