1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for TLB flushing.
4 * On machines where the MMU does not use a hash table to store virtual to
5 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
6 * this does -not- include 603 however which shares the implementation with
7 * hash based processors)
11 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
14 * Derived from arch/ppc/mm/init.c:
15 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
17 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
18 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
19 * Copyright (C) 1996 Paul Mackerras
21 * Derived from "arch/i386/mm/init.c"
22 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
25 #include <linux/kernel.h>
26 #include <linux/export.h>
28 #include <linux/init.h>
29 #include <linux/highmem.h>
30 #include <linux/pagemap.h>
31 #include <linux/preempt.h>
32 #include <linux/spinlock.h>
33 #include <linux/memblock.h>
34 #include <linux/of_fdt.h>
35 #include <linux/hugetlb.h>
37 #include <asm/pgalloc.h>
38 #include <asm/tlbflush.h>
40 #include <asm/code-patching.h>
41 #include <asm/cputhreads.h>
42 #include <asm/hugetlb.h>
45 #include <mm/mmu_decl.h>
48 * This struct lists the sw-supported page sizes. The hardawre MMU may support
49 * other sizes not listed here. The .ind field is only used on MMUs that have
50 * indirect page table entries.
52 #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
53 #ifdef CONFIG_PPC_FSL_BOOK3E
54 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
57 .enc = BOOK3E_PAGESZ_4K,
61 .enc = BOOK3E_PAGESZ_2M,
65 .enc = BOOK3E_PAGESZ_4M,
69 .enc = BOOK3E_PAGESZ_16M,
73 .enc = BOOK3E_PAGESZ_64M,
77 .enc = BOOK3E_PAGESZ_256M,
81 .enc = BOOK3E_PAGESZ_1GB,
84 #elif defined(CONFIG_PPC_8xx)
85 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
100 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
104 .enc = BOOK3E_PAGESZ_4K,
108 .enc = BOOK3E_PAGESZ_16K,
113 .enc = BOOK3E_PAGESZ_64K,
117 .enc = BOOK3E_PAGESZ_1M,
122 .enc = BOOK3E_PAGESZ_16M,
126 .enc = BOOK3E_PAGESZ_256M,
130 .enc = BOOK3E_PAGESZ_1GB,
133 #endif /* CONFIG_FSL_BOOKE */
135 static inline int mmu_get_tsize(int psize)
137 return mmu_psize_defs[psize].enc;
140 static inline int mmu_get_tsize(int psize)
142 /* This isn't used on !Book3E for now */
145 #endif /* CONFIG_PPC_BOOK3E_MMU */
147 /* The variables below are currently only used on 64-bit Book3E
148 * though this will probably be made common with other nohash
149 * implementations at some point
153 int mmu_linear_psize; /* Page size used for the linear mapping */
154 int mmu_pte_psize; /* Page size used for PTE pages */
155 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
156 int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
157 unsigned long linear_map_top; /* Top of linear mapping */
161 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
162 * exceptions. This is used for bolted and e6500 TLB miss handlers which
163 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
164 * this is set to zero.
168 #endif /* CONFIG_PPC64 */
170 #ifdef CONFIG_PPC_FSL_BOOK3E
171 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
172 DEFINE_PER_CPU(int, next_tlbcam_idx);
173 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
177 * Base TLB flushing operations:
179 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
180 * - flush_tlb_page(vma, vmaddr) flushes one page
181 * - flush_tlb_range(vma, start, end) flushes a range of pages
182 * - flush_tlb_kernel_range(start, end) flushes kernel pages
184 * - local_* variants of page and mm only apply to the current
188 #ifndef CONFIG_PPC_8xx
190 * These are the base non-SMP variants of page and mm flushing
192 void local_flush_tlb_mm(struct mm_struct *mm)
197 pid = mm->context.id;
198 if (pid != MMU_NO_CONTEXT)
202 EXPORT_SYMBOL(local_flush_tlb_mm);
204 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
210 pid = mm ? mm->context.id : 0;
211 if (pid != MMU_NO_CONTEXT)
212 _tlbil_va(vmaddr, pid, tsize, ind);
216 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
218 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
219 mmu_get_tsize(mmu_virtual_psize), 0);
221 EXPORT_SYMBOL(local_flush_tlb_page);
225 * And here are the SMP non-local implementations
229 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
231 struct tlb_flush_param {
238 static void do_flush_tlb_mm_ipi(void *param)
240 struct tlb_flush_param *p = param;
242 _tlbil_pid(p ? p->pid : 0);
245 static void do_flush_tlb_page_ipi(void *param)
247 struct tlb_flush_param *p = param;
249 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
253 /* Note on invalidations and PID:
255 * We snapshot the PID with preempt disabled. At this point, it can still
256 * change either because:
257 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
258 * - we are invaliating some target that isn't currently running here
259 * and is concurrently acquiring a new PID on another CPU
260 * - some other CPU is re-acquiring a lost PID for this mm
263 * However, this shouldn't be a problem as we only guarantee
264 * invalidation of TLB entries present prior to this call, so we
265 * don't care about the PID changing, and invalidating a stale PID
266 * is generally harmless.
269 void flush_tlb_mm(struct mm_struct *mm)
274 pid = mm->context.id;
275 if (unlikely(pid == MMU_NO_CONTEXT))
277 if (!mm_is_core_local(mm)) {
278 struct tlb_flush_param p = { .pid = pid };
279 /* Ignores smp_processor_id() even if set. */
280 smp_call_function_many(mm_cpumask(mm),
281 do_flush_tlb_mm_ipi, &p, 1);
287 EXPORT_SYMBOL(flush_tlb_mm);
289 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
292 struct cpumask *cpu_mask;
296 * This function as well as __local_flush_tlb_page() must only be called
303 pid = mm->context.id;
304 if (unlikely(pid == MMU_NO_CONTEXT))
306 cpu_mask = mm_cpumask(mm);
307 if (!mm_is_core_local(mm)) {
308 /* If broadcast tlbivax is supported, use it */
309 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
310 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
312 raw_spin_lock(&tlbivax_lock);
313 _tlbivax_bcast(vmaddr, pid, tsize, ind);
315 raw_spin_unlock(&tlbivax_lock);
318 struct tlb_flush_param p = {
324 /* Ignores smp_processor_id() even if set in cpu_mask */
325 smp_call_function_many(cpu_mask,
326 do_flush_tlb_page_ipi, &p, 1);
329 _tlbil_va(vmaddr, pid, tsize, ind);
334 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
336 #ifdef CONFIG_HUGETLB_PAGE
337 if (vma && is_vm_hugetlb_page(vma))
338 flush_hugetlb_page(vma, vmaddr);
341 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
342 mmu_get_tsize(mmu_virtual_psize), 0);
344 EXPORT_SYMBOL(flush_tlb_page);
346 #endif /* CONFIG_SMP */
348 #ifdef CONFIG_PPC_47x
349 void __init early_init_mmu_47x(void)
352 unsigned long root = of_get_flat_dt_root();
353 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
354 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
355 #endif /* CONFIG_SMP */
357 #endif /* CONFIG_PPC_47x */
360 * Flush kernel TLB entries in the given range
362 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
366 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
373 EXPORT_SYMBOL(flush_tlb_kernel_range);
376 * Currently, for range flushing, we just do a full mm flush. This should
377 * be optimized based on a threshold on the size of the range, since
378 * some implementation can stack multiple tlbivax before a tlbsync but
379 * for now, we keep it that way
381 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
385 if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK))
386 flush_tlb_page(vma, start);
388 flush_tlb_mm(vma->vm_mm);
390 EXPORT_SYMBOL(flush_tlb_range);
392 void tlb_flush(struct mmu_gather *tlb)
394 flush_tlb_mm(tlb->mm);
398 * Below are functions specific to the 64-bit variant of Book3E though that
399 * may change in the future
405 * Handling of virtual linear page tables or indirect TLB entries
406 * flushing when PTE pages are freed
408 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
410 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
412 if (book3e_htw_mode != PPC_HTW_NONE) {
413 unsigned long start = address & PMD_MASK;
414 unsigned long end = address + PMD_SIZE;
415 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
417 /* This isn't the most optimal, ideally we would factor out the
418 * while preempt & CPU mask mucking around, or even the IPI but
421 while (start < end) {
422 __flush_tlb_page(tlb->mm, start, tsize, 1);
426 unsigned long rmask = 0xf000000000000000ul;
427 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
428 unsigned long vpte = address & ~rmask;
430 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
432 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
436 static void setup_page_sizes(void)
438 unsigned int tlb0cfg;
443 #ifdef CONFIG_PPC_FSL_BOOK3E
444 unsigned int mmucfg = mfspr(SPRN_MMUCFG);
445 int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
447 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
448 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
449 unsigned int min_pg, max_pg;
451 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
452 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
454 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
455 struct mmu_psize_def *def;
458 def = &mmu_psize_defs[psize];
461 if (shift == 0 || shift & 1)
464 /* adjust to be in terms of 4^shift Kb */
465 shift = (shift - 10) >> 1;
467 if ((shift >= min_pg) && (shift <= max_pg))
468 def->flags |= MMU_PAGE_SIZE_DIRECT;
474 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
477 tlb0cfg = mfspr(SPRN_TLB0CFG);
478 tlb1cfg = mfspr(SPRN_TLB1CFG);
479 tlb1ps = mfspr(SPRN_TLB1PS);
480 eptcfg = mfspr(SPRN_EPTCFG);
482 if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
483 book3e_htw_mode = PPC_HTW_E6500;
486 * We expect 4K subpage size and unrestricted indirect size.
487 * The lack of a restriction on indirect size is a Freescale
488 * extension, indicated by PSn = 0 but SPSn != 0.
491 book3e_htw_mode = PPC_HTW_NONE;
493 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
494 struct mmu_psize_def *def = &mmu_psize_defs[psize];
499 if (tlb1ps & (1U << (def->shift - 10))) {
500 def->flags |= MMU_PAGE_SIZE_DIRECT;
502 if (book3e_htw_mode && psize == MMU_PAGE_2M)
503 def->flags |= MMU_PAGE_SIZE_INDIRECT;
511 tlb0cfg = mfspr(SPRN_TLB0CFG);
512 tlb0ps = mfspr(SPRN_TLB0PS);
513 eptcfg = mfspr(SPRN_EPTCFG);
515 /* Look for supported direct sizes */
516 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
517 struct mmu_psize_def *def = &mmu_psize_defs[psize];
519 if (tlb0ps & (1U << (def->shift - 10)))
520 def->flags |= MMU_PAGE_SIZE_DIRECT;
523 /* Indirect page sizes supported ? */
524 if ((tlb0cfg & TLBnCFG_IND) == 0 ||
525 (tlb0cfg & TLBnCFG_PT) == 0)
528 book3e_htw_mode = PPC_HTW_IBM;
530 /* Now, we only deal with one IND page size for each
531 * direct size. Hopefully all implementations today are
532 * unambiguous, but we might want to be careful in the
535 for (i = 0; i < 3; i++) {
536 unsigned int ps, sps;
544 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
545 struct mmu_psize_def *def = &mmu_psize_defs[psize];
547 if (ps == (def->shift - 10))
548 def->flags |= MMU_PAGE_SIZE_INDIRECT;
549 if (sps == (def->shift - 10))
555 /* Cleanup array and print summary */
556 pr_info("MMU: Supported page sizes\n");
557 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
558 struct mmu_psize_def *def = &mmu_psize_defs[psize];
559 const char *__page_type_names[] = {
565 if (def->flags == 0) {
569 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
570 __page_type_names[def->flags & 0x3]);
574 static void setup_mmu_htw(void)
577 * If we want to use HW tablewalk, enable it by patching the TLB miss
578 * handlers to branch to the one dedicated to it.
581 switch (book3e_htw_mode) {
583 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
584 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
586 #ifdef CONFIG_PPC_FSL_BOOK3E
588 extlb_level_exc = EX_TLB_SIZE;
589 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
590 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
594 pr_info("MMU: Book3E HW tablewalk %s\n",
595 book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
599 * Early initialization of the MMU TLB code
601 static void early_init_this_mmu(void)
605 /* Set MAS4 based on page table setting */
607 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
608 switch (book3e_htw_mode) {
611 mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
612 mas4 |= MAS4_TLBSELD(1);
613 mmu_pte_psize = MMU_PAGE_2M;
618 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
619 mmu_pte_psize = MMU_PAGE_1M;
623 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
624 mmu_pte_psize = mmu_virtual_psize;
627 mtspr(SPRN_MAS4, mas4);
629 #ifdef CONFIG_PPC_FSL_BOOK3E
630 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
631 unsigned int num_cams;
634 /* use a quarter of the TLBCAM for bolted linear map */
635 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
638 * Only do the mapping once per core, or else the
639 * transient mapping would cause problems.
642 if (hweight32(get_tensr()) > 1)
647 linear_map_top = map_mem_in_cams(linear_map_top,
648 num_cams, false, true);
652 /* A sync won't hurt us after mucking around with
653 * the MMU configuration
658 static void __init early_init_mmu_global(void)
660 /* XXX This will have to be decided at runtime, but right
661 * now our boot and TLB miss code hard wires it. Ideally
662 * we should find out a suitable page size and patch the
663 * TLB miss code (either that or use the PACA to store
666 mmu_linear_psize = MMU_PAGE_1G;
668 /* XXX This should be decided at runtime based on supported
669 * page sizes in the TLB, but for now let's assume 16M is
670 * always there and a good fit (which it probably is)
672 * Freescale booke only supports 4K pages in TLB0, so use that.
674 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
675 mmu_vmemmap_psize = MMU_PAGE_4K;
677 mmu_vmemmap_psize = MMU_PAGE_16M;
679 /* XXX This code only checks for TLB 0 capabilities and doesn't
680 * check what page size combos are supported by the HW. It
681 * also doesn't handle the case where a separate array holds
682 * the IND entries from the array loaded by the PT.
684 /* Look for supported page sizes */
687 /* Look for HW tablewalk support */
690 #ifdef CONFIG_PPC_FSL_BOOK3E
691 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
692 if (book3e_htw_mode == PPC_HTW_NONE) {
693 extlb_level_exc = EX_TLB_SIZE;
694 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
695 patch_exception(0x1e0,
696 exc_instruction_tlb_miss_bolted_book3e);
701 /* Set the global containing the top of the linear mapping
702 * for use by the TLB miss code
704 linear_map_top = memblock_end_of_DRAM();
706 ioremap_bot = IOREMAP_BASE;
709 static void __init early_mmu_set_memory_limit(void)
711 #ifdef CONFIG_PPC_FSL_BOOK3E
712 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
714 * Limit memory so we dont have linear faults.
715 * Unlike memblock_set_current_limit, which limits
716 * memory available during early boot, this permanently
717 * reduces the memory available to Linux. We need to
718 * do this because highmem is not supported on 64-bit.
720 memblock_enforce_memory_limit(linear_map_top);
724 memblock_set_current_limit(linear_map_top);
728 void __init early_init_mmu(void)
730 early_init_mmu_global();
731 early_init_this_mmu();
732 early_mmu_set_memory_limit();
735 void early_init_mmu_secondary(void)
737 early_init_this_mmu();
740 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
741 phys_addr_t first_memblock_size)
743 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
744 * the bolted TLB entry. We know for now that only 1G
745 * entries are supported though that may eventually
748 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
749 * unusual memory sizes it's possible for some RAM to not be mapped
750 * (such RAM is not used at all by Linux, since we don't support
751 * highmem on 64-bit). We limit ppc64_rma_size to what would be
752 * mappable if this memblock is the only one. Additional memblocks
753 * can only increase, not decrease, the amount that ends up getting
754 * mapped. We still limit max to 1G even if we'll eventually map
755 * more. This is due to what the early init code is set up to do.
757 * We crop it to the size of the first MEMBLOCK to
758 * avoid going over total available memory just in case...
760 #ifdef CONFIG_PPC_FSL_BOOK3E
761 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
762 unsigned long linear_sz;
763 unsigned int num_cams;
765 /* use a quarter of the TLBCAM for bolted linear map */
766 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
768 linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
771 ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
774 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
776 /* Finally limit subsequent allocations */
777 memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
779 #else /* ! CONFIG_PPC64 */
780 void __init early_init_mmu(void)
782 #ifdef CONFIG_PPC_47x
783 early_init_mmu_47x();
786 #ifdef CONFIG_PPC_MM_SLICES
787 mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
790 #endif /* CONFIG_PPC64 */