mm: move the powerpc hugepd code to mm/gup.c
[linux-2.6-microblaze.git] / arch / powerpc / mm / hugetlbpage.c
1 /*
2  * PPC Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2003 David Gibson, IBM Corporation.
5  * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
6  *
7  * Based on the IA-32 version:
8  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/io.h>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/moduleparam.h>
19 #include <linux/swap.h>
20 #include <linux/swapops.h>
21 #include <linux/kmemleak.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlb.h>
25 #include <asm/setup.h>
26 #include <asm/hugetlb.h>
27 #include <asm/pte-walk.h>
28
29 bool hugetlb_disabled = false;
30
31 #define hugepd_none(hpd)        (hpd_val(hpd) == 0)
32
33 #define PTE_T_ORDER     (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))
34
35 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
36 {
37         /*
38          * Only called for hugetlbfs pages, hence can ignore THP and the
39          * irq disabled walk.
40          */
41         return __find_linux_pte(mm->pgd, addr, NULL, NULL);
42 }
43
44 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
45                            unsigned long address, unsigned int pdshift,
46                            unsigned int pshift, spinlock_t *ptl)
47 {
48         struct kmem_cache *cachep;
49         pte_t *new;
50         int i;
51         int num_hugepd;
52
53         if (pshift >= pdshift) {
54                 cachep = PGT_CACHE(PTE_T_ORDER);
55                 num_hugepd = 1 << (pshift - pdshift);
56         } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
57                 cachep = PGT_CACHE(PTE_INDEX_SIZE);
58                 num_hugepd = 1;
59         } else {
60                 cachep = PGT_CACHE(pdshift - pshift);
61                 num_hugepd = 1;
62         }
63
64         new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
65
66         BUG_ON(pshift > HUGEPD_SHIFT_MASK);
67         BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
68
69         if (! new)
70                 return -ENOMEM;
71
72         /*
73          * Make sure other cpus find the hugepd set only after a
74          * properly initialized page table is visible to them.
75          * For more details look for comment in __pte_alloc().
76          */
77         smp_wmb();
78
79         spin_lock(ptl);
80         /*
81          * We have multiple higher-level entries that point to the same
82          * actual pte location.  Fill in each as we go and backtrack on error.
83          * We need all of these so the DTLB pgtable walk code can find the
84          * right higher-level entry without knowing if it's a hugepage or not.
85          */
86         for (i = 0; i < num_hugepd; i++, hpdp++) {
87                 if (unlikely(!hugepd_none(*hpdp)))
88                         break;
89                 hugepd_populate(hpdp, new, pshift);
90         }
91         /* If we bailed from the for loop early, an error occurred, clean up */
92         if (i < num_hugepd) {
93                 for (i = i - 1 ; i >= 0; i--, hpdp--)
94                         *hpdp = __hugepd(0);
95                 kmem_cache_free(cachep, new);
96         } else {
97                 kmemleak_ignore(new);
98         }
99         spin_unlock(ptl);
100         return 0;
101 }
102
103 /*
104  * At this point we do the placement change only for BOOK3S 64. This would
105  * possibly work on other subarchs.
106  */
107 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
108 {
109         pgd_t *pg;
110         pud_t *pu;
111         pmd_t *pm;
112         hugepd_t *hpdp = NULL;
113         unsigned pshift = __ffs(sz);
114         unsigned pdshift = PGDIR_SHIFT;
115         spinlock_t *ptl;
116
117         addr &= ~(sz-1);
118         pg = pgd_offset(mm, addr);
119
120 #ifdef CONFIG_PPC_BOOK3S_64
121         if (pshift == PGDIR_SHIFT)
122                 /* 16GB huge page */
123                 return (pte_t *) pg;
124         else if (pshift > PUD_SHIFT) {
125                 /*
126                  * We need to use hugepd table
127                  */
128                 ptl = &mm->page_table_lock;
129                 hpdp = (hugepd_t *)pg;
130         } else {
131                 pdshift = PUD_SHIFT;
132                 pu = pud_alloc(mm, pg, addr);
133                 if (pshift == PUD_SHIFT)
134                         return (pte_t *)pu;
135                 else if (pshift > PMD_SHIFT) {
136                         ptl = pud_lockptr(mm, pu);
137                         hpdp = (hugepd_t *)pu;
138                 } else {
139                         pdshift = PMD_SHIFT;
140                         pm = pmd_alloc(mm, pu, addr);
141                         if (pshift == PMD_SHIFT)
142                                 /* 16MB hugepage */
143                                 return (pte_t *)pm;
144                         else {
145                                 ptl = pmd_lockptr(mm, pm);
146                                 hpdp = (hugepd_t *)pm;
147                         }
148                 }
149         }
150 #else
151         if (pshift >= PGDIR_SHIFT) {
152                 ptl = &mm->page_table_lock;
153                 hpdp = (hugepd_t *)pg;
154         } else {
155                 pdshift = PUD_SHIFT;
156                 pu = pud_alloc(mm, pg, addr);
157                 if (pshift >= PUD_SHIFT) {
158                         ptl = pud_lockptr(mm, pu);
159                         hpdp = (hugepd_t *)pu;
160                 } else {
161                         pdshift = PMD_SHIFT;
162                         pm = pmd_alloc(mm, pu, addr);
163                         ptl = pmd_lockptr(mm, pm);
164                         hpdp = (hugepd_t *)pm;
165                 }
166         }
167 #endif
168         if (!hpdp)
169                 return NULL;
170
171         BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
172
173         if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
174                                                   pdshift, pshift, ptl))
175                 return NULL;
176
177         return hugepte_offset(*hpdp, addr, pdshift);
178 }
179
180 #ifdef CONFIG_PPC_BOOK3S_64
181 /*
182  * Tracks gpages after the device tree is scanned and before the
183  * huge_boot_pages list is ready on pseries.
184  */
185 #define MAX_NUMBER_GPAGES       1024
186 __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
187 __initdata static unsigned nr_gpages;
188
189 /*
190  * Build list of addresses of gigantic pages.  This function is used in early
191  * boot before the buddy allocator is setup.
192  */
193 void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
194 {
195         if (!addr)
196                 return;
197         while (number_of_pages > 0) {
198                 gpage_freearray[nr_gpages] = addr;
199                 nr_gpages++;
200                 number_of_pages--;
201                 addr += page_size;
202         }
203 }
204
205 int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
206 {
207         struct huge_bootmem_page *m;
208         if (nr_gpages == 0)
209                 return 0;
210         m = phys_to_virt(gpage_freearray[--nr_gpages]);
211         gpage_freearray[nr_gpages] = 0;
212         list_add(&m->list, &huge_boot_pages);
213         m->hstate = hstate;
214         return 1;
215 }
216 #endif
217
218
219 int __init alloc_bootmem_huge_page(struct hstate *h)
220 {
221
222 #ifdef CONFIG_PPC_BOOK3S_64
223         if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
224                 return pseries_alloc_bootmem_huge_page(h);
225 #endif
226         return __alloc_bootmem_huge_page(h);
227 }
228
229 #ifndef CONFIG_PPC_BOOK3S_64
230 #define HUGEPD_FREELIST_SIZE \
231         ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
232
233 struct hugepd_freelist {
234         struct rcu_head rcu;
235         unsigned int index;
236         void *ptes[0];
237 };
238
239 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
240
241 static void hugepd_free_rcu_callback(struct rcu_head *head)
242 {
243         struct hugepd_freelist *batch =
244                 container_of(head, struct hugepd_freelist, rcu);
245         unsigned int i;
246
247         for (i = 0; i < batch->index; i++)
248                 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
249
250         free_page((unsigned long)batch);
251 }
252
253 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
254 {
255         struct hugepd_freelist **batchp;
256
257         batchp = &get_cpu_var(hugepd_freelist_cur);
258
259         if (atomic_read(&tlb->mm->mm_users) < 2 ||
260             mm_is_thread_local(tlb->mm)) {
261                 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
262                 put_cpu_var(hugepd_freelist_cur);
263                 return;
264         }
265
266         if (*batchp == NULL) {
267                 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
268                 (*batchp)->index = 0;
269         }
270
271         (*batchp)->ptes[(*batchp)->index++] = hugepte;
272         if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
273                 call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
274                 *batchp = NULL;
275         }
276         put_cpu_var(hugepd_freelist_cur);
277 }
278 #else
279 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
280 #endif
281
282 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
283                               unsigned long start, unsigned long end,
284                               unsigned long floor, unsigned long ceiling)
285 {
286         pte_t *hugepte = hugepd_page(*hpdp);
287         int i;
288
289         unsigned long pdmask = ~((1UL << pdshift) - 1);
290         unsigned int num_hugepd = 1;
291         unsigned int shift = hugepd_shift(*hpdp);
292
293         /* Note: On fsl the hpdp may be the first of several */
294         if (shift > pdshift)
295                 num_hugepd = 1 << (shift - pdshift);
296
297         start &= pdmask;
298         if (start < floor)
299                 return;
300         if (ceiling) {
301                 ceiling &= pdmask;
302                 if (! ceiling)
303                         return;
304         }
305         if (end - 1 > ceiling - 1)
306                 return;
307
308         for (i = 0; i < num_hugepd; i++, hpdp++)
309                 *hpdp = __hugepd(0);
310
311         if (shift >= pdshift)
312                 hugepd_free(tlb, hugepte);
313         else if (IS_ENABLED(CONFIG_PPC_8xx))
314                 pgtable_free_tlb(tlb, hugepte,
315                                  get_hugepd_cache_index(PTE_INDEX_SIZE));
316         else
317                 pgtable_free_tlb(tlb, hugepte,
318                                  get_hugepd_cache_index(pdshift - shift));
319 }
320
321 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
322                                    unsigned long addr, unsigned long end,
323                                    unsigned long floor, unsigned long ceiling)
324 {
325         pmd_t *pmd;
326         unsigned long next;
327         unsigned long start;
328
329         start = addr;
330         do {
331                 unsigned long more;
332
333                 pmd = pmd_offset(pud, addr);
334                 next = pmd_addr_end(addr, end);
335                 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
336                         /*
337                          * if it is not hugepd pointer, we should already find
338                          * it cleared.
339                          */
340                         WARN_ON(!pmd_none_or_clear_bad(pmd));
341                         continue;
342                 }
343                 /*
344                  * Increment next by the size of the huge mapping since
345                  * there may be more than one entry at this level for a
346                  * single hugepage, but all of them point to
347                  * the same kmem cache that holds the hugepte.
348                  */
349                 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
350                 if (more > next)
351                         next = more;
352
353                 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
354                                   addr, next, floor, ceiling);
355         } while (addr = next, addr != end);
356
357         start &= PUD_MASK;
358         if (start < floor)
359                 return;
360         if (ceiling) {
361                 ceiling &= PUD_MASK;
362                 if (!ceiling)
363                         return;
364         }
365         if (end - 1 > ceiling - 1)
366                 return;
367
368         pmd = pmd_offset(pud, start);
369         pud_clear(pud);
370         pmd_free_tlb(tlb, pmd, start);
371         mm_dec_nr_pmds(tlb->mm);
372 }
373
374 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
375                                    unsigned long addr, unsigned long end,
376                                    unsigned long floor, unsigned long ceiling)
377 {
378         pud_t *pud;
379         unsigned long next;
380         unsigned long start;
381
382         start = addr;
383         do {
384                 pud = pud_offset(pgd, addr);
385                 next = pud_addr_end(addr, end);
386                 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
387                         if (pud_none_or_clear_bad(pud))
388                                 continue;
389                         hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
390                                                ceiling);
391                 } else {
392                         unsigned long more;
393                         /*
394                          * Increment next by the size of the huge mapping since
395                          * there may be more than one entry at this level for a
396                          * single hugepage, but all of them point to
397                          * the same kmem cache that holds the hugepte.
398                          */
399                         more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
400                         if (more > next)
401                                 next = more;
402
403                         free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
404                                           addr, next, floor, ceiling);
405                 }
406         } while (addr = next, addr != end);
407
408         start &= PGDIR_MASK;
409         if (start < floor)
410                 return;
411         if (ceiling) {
412                 ceiling &= PGDIR_MASK;
413                 if (!ceiling)
414                         return;
415         }
416         if (end - 1 > ceiling - 1)
417                 return;
418
419         pud = pud_offset(pgd, start);
420         pgd_clear(pgd);
421         pud_free_tlb(tlb, pud, start);
422         mm_dec_nr_puds(tlb->mm);
423 }
424
425 /*
426  * This function frees user-level page tables of a process.
427  */
428 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
429                             unsigned long addr, unsigned long end,
430                             unsigned long floor, unsigned long ceiling)
431 {
432         pgd_t *pgd;
433         unsigned long next;
434
435         /*
436          * Because there are a number of different possible pagetable
437          * layouts for hugepage ranges, we limit knowledge of how
438          * things should be laid out to the allocation path
439          * (huge_pte_alloc(), above).  Everything else works out the
440          * structure as it goes from information in the hugepd
441          * pointers.  That means that we can't here use the
442          * optimization used in the normal page free_pgd_range(), of
443          * checking whether we're actually covering a large enough
444          * range to have to do anything at the top level of the walk
445          * instead of at the bottom.
446          *
447          * To make sense of this, you should probably go read the big
448          * block comment at the top of the normal free_pgd_range(),
449          * too.
450          */
451
452         do {
453                 next = pgd_addr_end(addr, end);
454                 pgd = pgd_offset(tlb->mm, addr);
455                 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
456                         if (pgd_none_or_clear_bad(pgd))
457                                 continue;
458                         hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
459                 } else {
460                         unsigned long more;
461                         /*
462                          * Increment next by the size of the huge mapping since
463                          * there may be more than one entry at the pgd level
464                          * for a single hugepage, but all of them point to the
465                          * same kmem cache that holds the hugepte.
466                          */
467                         more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
468                         if (more > next)
469                                 next = more;
470
471                         free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
472                                           addr, next, floor, ceiling);
473                 }
474         } while (addr = next, addr != end);
475 }
476
477 struct page *follow_huge_pd(struct vm_area_struct *vma,
478                             unsigned long address, hugepd_t hpd,
479                             int flags, int pdshift)
480 {
481         pte_t *ptep;
482         spinlock_t *ptl;
483         struct page *page = NULL;
484         unsigned long mask;
485         int shift = hugepd_shift(hpd);
486         struct mm_struct *mm = vma->vm_mm;
487
488 retry:
489         /*
490          * hugepage directory entries are protected by mm->page_table_lock
491          * Use this instead of huge_pte_lockptr
492          */
493         ptl = &mm->page_table_lock;
494         spin_lock(ptl);
495
496         ptep = hugepte_offset(hpd, address, pdshift);
497         if (pte_present(*ptep)) {
498                 mask = (1UL << shift) - 1;
499                 page = pte_page(*ptep);
500                 page += ((address & mask) >> PAGE_SHIFT);
501                 if (flags & FOLL_GET)
502                         get_page(page);
503         } else {
504                 if (is_hugetlb_entry_migration(*ptep)) {
505                         spin_unlock(ptl);
506                         __migration_entry_wait(mm, ptep, ptl);
507                         goto retry;
508                 }
509         }
510         spin_unlock(ptl);
511         return page;
512 }
513
514 #ifdef CONFIG_PPC_MM_SLICES
515 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
516                                         unsigned long len, unsigned long pgoff,
517                                         unsigned long flags)
518 {
519         struct hstate *hstate = hstate_file(file);
520         int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
521
522 #ifdef CONFIG_PPC_RADIX_MMU
523         if (radix_enabled())
524                 return radix__hugetlb_get_unmapped_area(file, addr, len,
525                                                        pgoff, flags);
526 #endif
527         return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
528 }
529 #endif
530
531 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
532 {
533         /* With radix we don't use slice, so derive it from vma*/
534         if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
535                 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
536
537                 return 1UL << mmu_psize_to_shift(psize);
538         }
539         return vma_kernel_pagesize(vma);
540 }
541
542 static int __init add_huge_page_size(unsigned long long size)
543 {
544         int shift = __ffs(size);
545         int mmu_psize;
546
547         /* Check that it is a page size supported by the hardware and
548          * that it fits within pagetable and slice limits. */
549         if (size <= PAGE_SIZE || !is_power_of_2(size))
550                 return -EINVAL;
551
552         mmu_psize = check_and_get_huge_psize(shift);
553         if (mmu_psize < 0)
554                 return -EINVAL;
555
556         BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
557
558         /* Return if huge page size has already been setup */
559         if (size_to_hstate(size))
560                 return 0;
561
562         hugetlb_add_hstate(shift - PAGE_SHIFT);
563
564         return 0;
565 }
566
567 static int __init hugepage_setup_sz(char *str)
568 {
569         unsigned long long size;
570
571         size = memparse(str, &str);
572
573         if (add_huge_page_size(size) != 0) {
574                 hugetlb_bad_size();
575                 pr_err("Invalid huge page size specified(%llu)\n", size);
576         }
577
578         return 1;
579 }
580 __setup("hugepagesz=", hugepage_setup_sz);
581
582 static int __init hugetlbpage_init(void)
583 {
584         int psize;
585
586         if (hugetlb_disabled) {
587                 pr_info("HugeTLB support is disabled!\n");
588                 return 0;
589         }
590
591         if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
592             !mmu_has_feature(MMU_FTR_16M_PAGE))
593                 return -ENODEV;
594
595         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
596                 unsigned shift;
597                 unsigned pdshift;
598
599                 if (!mmu_psize_defs[psize].shift)
600                         continue;
601
602                 shift = mmu_psize_to_shift(psize);
603
604 #ifdef CONFIG_PPC_BOOK3S_64
605                 if (shift > PGDIR_SHIFT)
606                         continue;
607                 else if (shift > PUD_SHIFT)
608                         pdshift = PGDIR_SHIFT;
609                 else if (shift > PMD_SHIFT)
610                         pdshift = PUD_SHIFT;
611                 else
612                         pdshift = PMD_SHIFT;
613 #else
614                 if (shift < PUD_SHIFT)
615                         pdshift = PMD_SHIFT;
616                 else if (shift < PGDIR_SHIFT)
617                         pdshift = PUD_SHIFT;
618                 else
619                         pdshift = PGDIR_SHIFT;
620 #endif
621
622                 if (add_huge_page_size(1ULL << shift) < 0)
623                         continue;
624                 /*
625                  * if we have pdshift and shift value same, we don't
626                  * use pgt cache for hugepd.
627                  */
628                 if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
629                         pgtable_cache_add(PTE_INDEX_SIZE);
630                 else if (pdshift > shift)
631                         pgtable_cache_add(pdshift - shift);
632                 else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
633                         pgtable_cache_add(PTE_T_ORDER);
634         }
635
636         if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
637                 hugetlbpage_init_default();
638
639         return 0;
640 }
641
642 arch_initcall(hugetlbpage_init);
643
644 void flush_dcache_icache_hugepage(struct page *page)
645 {
646         int i;
647         void *start;
648
649         BUG_ON(!PageCompound(page));
650
651         for (i = 0; i < (1UL << compound_order(page)); i++) {
652                 if (!PageHighMem(page)) {
653                         __flush_dcache_icache(page_address(page+i));
654                 } else {
655                         start = kmap_atomic(page+i);
656                         __flush_dcache_icache(start);
657                         kunmap_atomic(start);
658                 }
659         }
660 }