2 * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
15 #include <linux/mmu_context.h>
16 #include <linux/sched/mm.h>
18 #include <asm/ppc-opcode.h>
20 #include <asm/tlbflush.h>
21 #include <asm/trace.h>
22 #include <asm/cputhreads.h>
24 #define RIC_FLUSH_TLB 0
25 #define RIC_FLUSH_PWC 1
26 #define RIC_FLUSH_ALL 2
29 * tlbiel instruction for radix, set invalidation
30 * i.e., r=1 and is=01 or is=10 or is=11
32 static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
34 unsigned int ric, unsigned int prs)
39 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
40 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
42 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
43 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
47 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
51 asm volatile("ptesync": : :"memory");
54 * Flush the first set of the TLB, and the entire Page Walk Cache
55 * and partition table entries. Then flush the remaining sets of the
58 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
59 for (set = 1; set < num_sets; set++)
60 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
62 /* Do the same for process scoped entries. */
63 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
64 for (set = 1; set < num_sets; set++)
65 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
67 asm volatile("ptesync": : :"memory");
70 void radix__tlbiel_all(unsigned int action)
75 case TLB_INVAL_SCOPE_GLOBAL:
78 case TLB_INVAL_SCOPE_LPID:
85 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
86 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
88 WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
90 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
93 static inline void __tlbiel_pid(unsigned long pid, int set,
96 unsigned long rb,rs,prs,r;
98 rb = PPC_BIT(53); /* IS = 1 */
99 rb |= set << PPC_BITLSHIFT(51);
100 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
101 prs = 1; /* process scoped */
102 r = 1; /* radix format */
104 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
105 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
106 trace_tlbie(0, 1, rb, rs, ric, prs, r);
109 static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
111 unsigned long rb,rs,prs,r;
113 rb = PPC_BIT(53); /* IS = 1 */
114 rs = pid << PPC_BITLSHIFT(31);
115 prs = 1; /* process scoped */
116 r = 1; /* radix format */
118 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
119 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
120 trace_tlbie(0, 0, rb, rs, ric, prs, r);
123 static inline void __tlbiel_lpid(unsigned long lpid, int set,
126 unsigned long rb,rs,prs,r;
128 rb = PPC_BIT(52); /* IS = 2 */
129 rb |= set << PPC_BITLSHIFT(51);
130 rs = 0; /* LPID comes from LPIDR */
131 prs = 0; /* partition scoped */
132 r = 1; /* radix format */
134 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
135 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
136 trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
139 static inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
141 unsigned long rb,rs,prs,r;
143 rb = PPC_BIT(52); /* IS = 2 */
145 prs = 0; /* partition scoped */
146 r = 1; /* radix format */
148 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
149 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
150 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
153 static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
156 unsigned long rb,rs,prs,r;
158 rb = PPC_BIT(52); /* IS = 2 */
159 rb |= set << PPC_BITLSHIFT(51);
160 rs = 0; /* LPID comes from LPIDR */
161 prs = 1; /* process scoped */
162 r = 1; /* radix format */
164 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
165 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
166 trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
170 static inline void __tlbiel_va(unsigned long va, unsigned long pid,
171 unsigned long ap, unsigned long ric)
173 unsigned long rb,rs,prs,r;
175 rb = va & ~(PPC_BITMASK(52, 63));
176 rb |= ap << PPC_BITLSHIFT(58);
177 rs = pid << PPC_BITLSHIFT(31);
178 prs = 1; /* process scoped */
179 r = 1; /* radix format */
181 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
182 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
183 trace_tlbie(0, 1, rb, rs, ric, prs, r);
186 static inline void __tlbie_va(unsigned long va, unsigned long pid,
187 unsigned long ap, unsigned long ric)
189 unsigned long rb,rs,prs,r;
191 rb = va & ~(PPC_BITMASK(52, 63));
192 rb |= ap << PPC_BITLSHIFT(58);
193 rs = pid << PPC_BITLSHIFT(31);
194 prs = 1; /* process scoped */
195 r = 1; /* radix format */
197 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
198 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
199 trace_tlbie(0, 0, rb, rs, ric, prs, r);
202 static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
203 unsigned long ap, unsigned long ric)
205 unsigned long rb,rs,prs,r;
207 rb = va & ~(PPC_BITMASK(52, 63));
208 rb |= ap << PPC_BITLSHIFT(58);
210 prs = 0; /* partition scoped */
211 r = 1; /* radix format */
213 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
214 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
215 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
218 static inline void fixup_tlbie(void)
220 unsigned long pid = 0;
221 unsigned long va = ((1UL << 52) - 1);
223 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
224 asm volatile("ptesync": : :"memory");
225 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
229 static inline void fixup_tlbie_lpid(unsigned long lpid)
231 unsigned long va = ((1UL << 52) - 1);
233 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
234 asm volatile("ptesync": : :"memory");
235 __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
240 * We use 128 set in radix mode and 256 set in hpt mode.
242 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
246 asm volatile("ptesync": : :"memory");
249 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
250 * also flush the entire Page Walk Cache.
252 __tlbiel_pid(pid, 0, ric);
254 /* For PWC, only one flush is needed */
255 if (ric == RIC_FLUSH_PWC) {
256 asm volatile("ptesync": : :"memory");
260 /* For the remaining sets, just flush the TLB */
261 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
262 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
264 asm volatile("ptesync": : :"memory");
265 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
268 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
270 asm volatile("ptesync": : :"memory");
273 * Workaround the fact that the "ric" argument to __tlbie_pid
274 * must be a compile-time contraint to match the "i" constraint
275 * in the asm statement.
279 __tlbie_pid(pid, RIC_FLUSH_TLB);
282 __tlbie_pid(pid, RIC_FLUSH_PWC);
286 __tlbie_pid(pid, RIC_FLUSH_ALL);
289 asm volatile("eieio; tlbsync; ptesync": : :"memory");
292 static inline void _tlbiel_lpid(unsigned long lpid, unsigned long ric)
296 VM_BUG_ON(mfspr(SPRN_LPID) != lpid);
298 asm volatile("ptesync": : :"memory");
301 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
302 * also flush the entire Page Walk Cache.
304 __tlbiel_lpid(lpid, 0, ric);
306 /* For PWC, only one flush is needed */
307 if (ric == RIC_FLUSH_PWC) {
308 asm volatile("ptesync": : :"memory");
312 /* For the remaining sets, just flush the TLB */
313 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
314 __tlbiel_lpid(lpid, set, RIC_FLUSH_TLB);
316 asm volatile("ptesync": : :"memory");
317 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
320 static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
322 asm volatile("ptesync": : :"memory");
325 * Workaround the fact that the "ric" argument to __tlbie_pid
326 * must be a compile-time contraint to match the "i" constraint
327 * in the asm statement.
331 __tlbie_lpid(lpid, RIC_FLUSH_TLB);
334 __tlbie_lpid(lpid, RIC_FLUSH_PWC);
338 __tlbie_lpid(lpid, RIC_FLUSH_ALL);
340 fixup_tlbie_lpid(lpid);
341 asm volatile("eieio; tlbsync; ptesync": : :"memory");
344 static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
348 VM_BUG_ON(mfspr(SPRN_LPID) != lpid);
350 asm volatile("ptesync": : :"memory");
353 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
354 * also flush the entire Page Walk Cache.
356 __tlbiel_lpid_guest(lpid, 0, ric);
358 /* For PWC, only one flush is needed */
359 if (ric == RIC_FLUSH_PWC) {
360 asm volatile("ptesync": : :"memory");
364 /* For the remaining sets, just flush the TLB */
365 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
366 __tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB);
368 asm volatile("ptesync": : :"memory");
372 static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
373 unsigned long pid, unsigned long page_size,
377 unsigned long ap = mmu_get_ap(psize);
379 for (addr = start; addr < end; addr += page_size)
380 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
383 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
384 unsigned long psize, unsigned long ric)
386 unsigned long ap = mmu_get_ap(psize);
388 asm volatile("ptesync": : :"memory");
389 __tlbiel_va(va, pid, ap, ric);
390 asm volatile("ptesync": : :"memory");
393 static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
394 unsigned long pid, unsigned long page_size,
395 unsigned long psize, bool also_pwc)
397 asm volatile("ptesync": : :"memory");
399 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
400 __tlbiel_va_range(start, end, pid, page_size, psize);
401 asm volatile("ptesync": : :"memory");
404 static inline void __tlbie_va_range(unsigned long start, unsigned long end,
405 unsigned long pid, unsigned long page_size,
409 unsigned long ap = mmu_get_ap(psize);
411 for (addr = start; addr < end; addr += page_size)
412 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
415 static inline void _tlbie_va(unsigned long va, unsigned long pid,
416 unsigned long psize, unsigned long ric)
418 unsigned long ap = mmu_get_ap(psize);
420 asm volatile("ptesync": : :"memory");
421 __tlbie_va(va, pid, ap, ric);
423 asm volatile("eieio; tlbsync; ptesync": : :"memory");
426 static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
427 unsigned long psize, unsigned long ric)
429 unsigned long ap = mmu_get_ap(psize);
431 asm volatile("ptesync": : :"memory");
432 __tlbie_lpid_va(va, lpid, ap, ric);
433 fixup_tlbie_lpid(lpid);
434 asm volatile("eieio; tlbsync; ptesync": : :"memory");
437 static inline void _tlbie_va_range(unsigned long start, unsigned long end,
438 unsigned long pid, unsigned long page_size,
439 unsigned long psize, bool also_pwc)
441 asm volatile("ptesync": : :"memory");
443 __tlbie_pid(pid, RIC_FLUSH_PWC);
444 __tlbie_va_range(start, end, pid, page_size, psize);
446 asm volatile("eieio; tlbsync; ptesync": : :"memory");
450 * Base TLB flushing operations:
452 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
453 * - flush_tlb_page(vma, vmaddr) flushes one page
454 * - flush_tlb_range(vma, start, end) flushes a range of pages
455 * - flush_tlb_kernel_range(start, end) flushes kernel pages
457 * - local_* variants of page and mm only apply to the current
460 void radix__local_flush_tlb_mm(struct mm_struct *mm)
465 pid = mm->context.id;
466 if (pid != MMU_NO_CONTEXT)
467 _tlbiel_pid(pid, RIC_FLUSH_TLB);
470 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
473 void radix__local_flush_all_mm(struct mm_struct *mm)
478 pid = mm->context.id;
479 if (pid != MMU_NO_CONTEXT)
480 _tlbiel_pid(pid, RIC_FLUSH_ALL);
483 EXPORT_SYMBOL(radix__local_flush_all_mm);
484 #endif /* CONFIG_SMP */
486 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
492 pid = mm->context.id;
493 if (pid != MMU_NO_CONTEXT)
494 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
498 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
500 #ifdef CONFIG_HUGETLB_PAGE
501 /* need the return fix for nohash.c */
502 if (is_vm_hugetlb_page(vma))
503 return radix__local_flush_hugetlb_page(vma, vmaddr);
505 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
507 EXPORT_SYMBOL(radix__local_flush_tlb_page);
509 static bool mm_is_singlethreaded(struct mm_struct *mm)
511 if (atomic_read(&mm->context.copros) > 0)
513 if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm)
518 static bool mm_needs_flush_escalation(struct mm_struct *mm)
521 * P9 nest MMU has issues with the page walk cache
522 * caching PTEs and not flushing them properly when
523 * RIC = 0 for a PID/LPID invalidate
525 if (atomic_read(&mm->context.copros) > 0)
531 static void do_exit_flush_lazy_tlb(void *arg)
533 struct mm_struct *mm = arg;
534 unsigned long pid = mm->context.id;
536 if (current->mm == mm)
537 return; /* Local CPU */
539 if (current->active_mm == mm) {
541 * Must be a kernel thread because sender is single-threaded.
545 switch_mm(mm, &init_mm, current);
546 current->active_mm = &init_mm;
549 _tlbiel_pid(pid, RIC_FLUSH_ALL);
552 static void exit_flush_lazy_tlbs(struct mm_struct *mm)
555 * Would be nice if this was async so it could be run in
556 * parallel with our local flush, but generic code does not
557 * give a good API for it. Could extend the generic code or
558 * make a special powerpc IPI for flushing TLBs.
559 * For now it's not too performance critical.
561 smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
563 mm_reset_thread_local(mm);
566 void radix__flush_tlb_mm(struct mm_struct *mm)
570 pid = mm->context.id;
571 if (unlikely(pid == MMU_NO_CONTEXT))
576 * Order loads of mm_cpumask vs previous stores to clear ptes before
577 * the invalidate. See barrier in switch_mm_irqs_off
580 if (!mm_is_thread_local(mm)) {
581 if (unlikely(mm_is_singlethreaded(mm))) {
582 exit_flush_lazy_tlbs(mm);
586 if (mm_needs_flush_escalation(mm))
587 _tlbie_pid(pid, RIC_FLUSH_ALL);
589 _tlbie_pid(pid, RIC_FLUSH_TLB);
592 _tlbiel_pid(pid, RIC_FLUSH_TLB);
596 EXPORT_SYMBOL(radix__flush_tlb_mm);
598 static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
602 pid = mm->context.id;
603 if (unlikely(pid == MMU_NO_CONTEXT))
607 smp_mb(); /* see radix__flush_tlb_mm */
608 if (!mm_is_thread_local(mm)) {
609 if (unlikely(mm_is_singlethreaded(mm))) {
611 exit_flush_lazy_tlbs(mm);
615 _tlbie_pid(pid, RIC_FLUSH_ALL);
618 _tlbiel_pid(pid, RIC_FLUSH_ALL);
622 void radix__flush_all_mm(struct mm_struct *mm)
624 __flush_all_mm(mm, false);
626 EXPORT_SYMBOL(radix__flush_all_mm);
628 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
630 tlb->need_flush_all = 1;
632 EXPORT_SYMBOL(radix__flush_tlb_pwc);
634 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
639 pid = mm->context.id;
640 if (unlikely(pid == MMU_NO_CONTEXT))
644 smp_mb(); /* see radix__flush_tlb_mm */
645 if (!mm_is_thread_local(mm)) {
646 if (unlikely(mm_is_singlethreaded(mm))) {
647 exit_flush_lazy_tlbs(mm);
650 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
653 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
658 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
660 #ifdef CONFIG_HUGETLB_PAGE
661 if (is_vm_hugetlb_page(vma))
662 return radix__flush_hugetlb_page(vma, vmaddr);
664 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
666 EXPORT_SYMBOL(radix__flush_tlb_page);
668 #else /* CONFIG_SMP */
669 #define radix__flush_all_mm radix__local_flush_all_mm
670 #endif /* CONFIG_SMP */
672 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
674 _tlbie_pid(0, RIC_FLUSH_ALL);
676 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
678 #define TLB_FLUSH_ALL -1UL
681 * Number of pages above which we invalidate the entire PID rather than
682 * flush individual pages, for local and global flushes respectively.
684 * tlbie goes out to the interconnect and individual ops are more costly.
685 * It also does not iterate over sets like the local tlbiel variant when
686 * invalidating a full PID, so it has a far lower threshold to change from
687 * individual page flushes to full-pid flushes.
689 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
690 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
692 static inline void __radix__flush_tlb_range(struct mm_struct *mm,
693 unsigned long start, unsigned long end,
694 bool flush_all_sizes)
698 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
699 unsigned long page_size = 1UL << page_shift;
700 unsigned long nr_pages = (end - start) >> page_shift;
703 pid = mm->context.id;
704 if (unlikely(pid == MMU_NO_CONTEXT))
708 smp_mb(); /* see radix__flush_tlb_mm */
709 if (!mm_is_thread_local(mm)) {
710 if (unlikely(mm_is_singlethreaded(mm))) {
711 if (end != TLB_FLUSH_ALL) {
712 exit_flush_lazy_tlbs(mm);
717 full = (end == TLB_FLUSH_ALL ||
718 nr_pages > tlb_single_page_flush_ceiling);
722 full = (end == TLB_FLUSH_ALL ||
723 nr_pages > tlb_local_single_page_flush_ceiling);
728 _tlbiel_pid(pid, RIC_FLUSH_TLB);
730 if (mm_needs_flush_escalation(mm))
731 _tlbie_pid(pid, RIC_FLUSH_ALL);
733 _tlbie_pid(pid, RIC_FLUSH_TLB);
736 bool hflush = flush_all_sizes;
737 bool gflush = flush_all_sizes;
738 unsigned long hstart, hend;
739 unsigned long gstart, gend;
741 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
745 hstart = (start + PMD_SIZE - 1) & PMD_MASK;
746 hend = end & PMD_MASK;
752 gstart = (start + PUD_SIZE - 1) & PUD_MASK;
753 gend = end & PUD_MASK;
758 asm volatile("ptesync": : :"memory");
760 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
762 __tlbiel_va_range(hstart, hend, pid,
763 PMD_SIZE, MMU_PAGE_2M);
765 __tlbiel_va_range(gstart, gend, pid,
766 PUD_SIZE, MMU_PAGE_1G);
767 asm volatile("ptesync": : :"memory");
769 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
771 __tlbie_va_range(hstart, hend, pid,
772 PMD_SIZE, MMU_PAGE_2M);
774 __tlbie_va_range(gstart, gend, pid,
775 PUD_SIZE, MMU_PAGE_1G);
777 asm volatile("eieio; tlbsync; ptesync": : :"memory");
783 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
787 #ifdef CONFIG_HUGETLB_PAGE
788 if (is_vm_hugetlb_page(vma))
789 return radix__flush_hugetlb_tlb_range(vma, start, end);
792 __radix__flush_tlb_range(vma->vm_mm, start, end, false);
794 EXPORT_SYMBOL(radix__flush_tlb_range);
796 static int radix_get_mmu_psize(int page_size)
800 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
801 psize = mmu_virtual_psize;
802 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
804 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
812 * Flush partition scoped LPID address translation for all CPUs.
814 void radix__flush_tlb_lpid_page(unsigned int lpid,
816 unsigned long page_size)
818 int psize = radix_get_mmu_psize(page_size);
820 _tlbie_lpid_va(addr, lpid, psize, RIC_FLUSH_TLB);
822 EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid_page);
825 * Flush partition scoped PWC from LPID for all CPUs.
827 void radix__flush_pwc_lpid(unsigned int lpid)
829 _tlbie_lpid(lpid, RIC_FLUSH_PWC);
831 EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
834 * Flush partition scoped translations from LPID (=LPIDR)
836 void radix__local_flush_tlb_lpid(unsigned int lpid)
838 _tlbiel_lpid(lpid, RIC_FLUSH_ALL);
840 EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid);
843 * Flush process scoped translations from LPID (=LPIDR).
844 * Important difference, the guest normally manages its own translations,
845 * but some cases e.g., vCPU CPU migration require KVM to flush.
847 void radix__local_flush_tlb_lpid_guest(unsigned int lpid)
849 _tlbiel_lpid_guest(lpid, RIC_FLUSH_ALL);
851 EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid_guest);
854 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
855 unsigned long end, int psize);
857 void radix__tlb_flush(struct mmu_gather *tlb)
860 struct mm_struct *mm = tlb->mm;
861 int page_size = tlb->page_size;
862 unsigned long start = tlb->start;
863 unsigned long end = tlb->end;
866 * if page size is not something we understand, do a full mm flush
868 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
869 * that flushes the process table entry cache upon process teardown.
870 * See the comment for radix in arch_exit_mmap().
873 __flush_all_mm(mm, true);
874 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
875 } else if (mm_tlb_flush_nested(mm)) {
877 * If there is a concurrent invalidation that is clearing ptes,
878 * then it's possible this invalidation will miss one of those
879 * cleared ptes and miss flushing the TLB. If this invalidate
880 * returns before the other one flushes TLBs, that can result
881 * in it returning while there are still valid TLBs inside the
882 * range to be invalidated.
884 * See mm/memory.c:tlb_finish_mmu() for more details.
886 * The solution to this is ensure the entire range is always
887 * flushed here. The problem for powerpc is that the flushes
888 * are page size specific, so this "forced flush" would not
889 * do the right thing if there are a mix of page sizes in
890 * the range to be invalidated. So use __flush_tlb_range
891 * which invalidates all possible page sizes in the range.
893 * PWC flush probably is not be required because the core code
894 * shouldn't free page tables in this path, but accounting
895 * for the possibility makes us a bit more robust.
897 * need_flush_all is an uncommon case because page table
898 * teardown should be done with exclusive locks held (but
899 * after locks are dropped another invalidate could come
900 * in), it could be optimized further if necessary.
902 if (!tlb->need_flush_all)
903 __radix__flush_tlb_range(mm, start, end, true);
905 radix__flush_all_mm(mm);
907 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
908 if (!tlb->need_flush_all)
909 radix__flush_tlb_mm(mm);
911 radix__flush_all_mm(mm);
913 if (!tlb->need_flush_all)
914 radix__flush_tlb_range_psize(mm, start, end, psize);
916 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
918 tlb->need_flush_all = 0;
921 static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
922 unsigned long start, unsigned long end,
923 int psize, bool also_pwc)
926 unsigned int page_shift = mmu_psize_defs[psize].shift;
927 unsigned long page_size = 1UL << page_shift;
928 unsigned long nr_pages = (end - start) >> page_shift;
931 pid = mm->context.id;
932 if (unlikely(pid == MMU_NO_CONTEXT))
936 smp_mb(); /* see radix__flush_tlb_mm */
937 if (!mm_is_thread_local(mm)) {
938 if (unlikely(mm_is_singlethreaded(mm))) {
939 if (end != TLB_FLUSH_ALL) {
940 exit_flush_lazy_tlbs(mm);
945 full = (end == TLB_FLUSH_ALL ||
946 nr_pages > tlb_single_page_flush_ceiling);
950 full = (end == TLB_FLUSH_ALL ||
951 nr_pages > tlb_local_single_page_flush_ceiling);
956 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
958 if (mm_needs_flush_escalation(mm))
961 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
965 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
967 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
972 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
973 unsigned long end, int psize)
975 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
978 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
979 unsigned long end, int psize)
981 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
984 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
985 void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
987 unsigned long pid, end;
989 pid = mm->context.id;
990 if (unlikely(pid == MMU_NO_CONTEXT))
993 /* 4k page size, just blow the world */
994 if (PAGE_SIZE == 0x1000) {
995 radix__flush_all_mm(mm);
999 end = addr + HPAGE_PMD_SIZE;
1001 /* Otherwise first do the PWC, then iterate the pages. */
1003 smp_mb(); /* see radix__flush_tlb_mm */
1004 if (!mm_is_thread_local(mm)) {
1005 if (unlikely(mm_is_singlethreaded(mm))) {
1006 exit_flush_lazy_tlbs(mm);
1009 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1013 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1018 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1020 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
1021 unsigned long start, unsigned long end)
1023 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
1025 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
1027 void radix__flush_tlb_all(void)
1029 unsigned long rb,prs,r,rs;
1030 unsigned long ric = RIC_FLUSH_ALL;
1032 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
1033 prs = 0; /* partition scoped */
1034 r = 1; /* radix format */
1035 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
1037 asm volatile("ptesync": : :"memory");
1039 * now flush guest entries by passing PRS = 1 and LPID != 0
1041 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1042 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
1044 * now flush host entires by passing PRS = 0 and LPID == 0
1046 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1047 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
1048 asm volatile("eieio; tlbsync; ptesync": : :"memory");
1051 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1052 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
1054 unsigned long pid = mm->context.id;
1056 if (unlikely(pid == MMU_NO_CONTEXT))
1060 * If this context hasn't run on that CPU before and KVM is
1061 * around, there's a slim chance that the guest on another
1062 * CPU just brought in obsolete translation into the TLB of
1063 * this CPU due to a bad prefetch using the guest PID on
1064 * the way into the hypervisor.
1066 * We work around this here. If KVM is possible, we check if
1067 * any sibling thread is in KVM. If it is, the window may exist
1068 * and thus we flush that PID from the core.
1070 * A potential future improvement would be to mark which PIDs
1071 * have never been used on the system and avoid it if the PID
1072 * is new and the process has no other cpumask bit set.
1074 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
1075 int cpu = smp_processor_id();
1076 int sib = cpu_first_thread_sibling(cpu);
1079 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
1082 if (!cpu_possible(sib))
1084 if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
1088 _tlbiel_pid(pid, RIC_FLUSH_ALL);
1091 EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
1092 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */