1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * TLB flush routines for radix kernels.
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
9 #include <linux/hugetlb.h>
10 #include <linux/memblock.h>
11 #include <linux/mmu_context.h>
12 #include <linux/sched/mm.h>
14 #include <asm/ppc-opcode.h>
16 #include <asm/tlbflush.h>
17 #include <asm/trace.h>
18 #include <asm/cputhreads.h>
19 #include <asm/plpar_wrappers.h>
21 #define RIC_FLUSH_TLB 0
22 #define RIC_FLUSH_PWC 1
23 #define RIC_FLUSH_ALL 2
26 * tlbiel instruction for radix, set invalidation
27 * i.e., r=1 and is=01 or is=10 or is=11
29 static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
31 unsigned int ric, unsigned int prs)
36 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
37 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
39 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
40 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
44 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
48 asm volatile("ptesync": : :"memory");
51 * Flush the first set of the TLB, and the entire Page Walk Cache
52 * and partition table entries. Then flush the remaining sets of the
56 if (early_cpu_has_feature(CPU_FTR_HVMODE)) {
57 /* MSR[HV] should flush partition scope translations first. */
58 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
59 for (set = 1; set < num_sets; set++)
60 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
63 /* Flush process scoped entries. */
64 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
65 for (set = 1; set < num_sets; set++)
66 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
68 ppc_after_tlbiel_barrier();
71 void radix__tlbiel_all(unsigned int action)
76 case TLB_INVAL_SCOPE_GLOBAL:
79 case TLB_INVAL_SCOPE_LPID:
86 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
87 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
89 WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
91 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
94 static __always_inline void __tlbiel_pid(unsigned long pid, int set,
97 unsigned long rb,rs,prs,r;
99 rb = PPC_BIT(53); /* IS = 1 */
100 rb |= set << PPC_BITLSHIFT(51);
101 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
102 prs = 1; /* process scoped */
103 r = 1; /* radix format */
105 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
106 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
107 trace_tlbie(0, 1, rb, rs, ric, prs, r);
110 static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
112 unsigned long rb,rs,prs,r;
114 rb = PPC_BIT(53); /* IS = 1 */
115 rs = pid << PPC_BITLSHIFT(31);
116 prs = 1; /* process scoped */
117 r = 1; /* radix format */
119 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
120 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
121 trace_tlbie(0, 0, rb, rs, ric, prs, r);
124 static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
126 unsigned long rb,rs,prs,r;
128 rb = PPC_BIT(52); /* IS = 2 */
130 prs = 0; /* partition scoped */
131 r = 1; /* radix format */
133 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
134 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
135 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
138 static __always_inline void __tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
140 unsigned long rb,rs,prs,r;
142 rb = PPC_BIT(52); /* IS = 2 */
144 prs = 1; /* process scoped */
145 r = 1; /* radix format */
147 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
148 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
149 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
152 static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
153 unsigned long ap, unsigned long ric)
155 unsigned long rb,rs,prs,r;
157 rb = va & ~(PPC_BITMASK(52, 63));
158 rb |= ap << PPC_BITLSHIFT(58);
159 rs = pid << PPC_BITLSHIFT(31);
160 prs = 1; /* process scoped */
161 r = 1; /* radix format */
163 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
164 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
165 trace_tlbie(0, 1, rb, rs, ric, prs, r);
168 static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
169 unsigned long ap, unsigned long ric)
171 unsigned long rb,rs,prs,r;
173 rb = va & ~(PPC_BITMASK(52, 63));
174 rb |= ap << PPC_BITLSHIFT(58);
175 rs = pid << PPC_BITLSHIFT(31);
176 prs = 1; /* process scoped */
177 r = 1; /* radix format */
179 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
180 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
181 trace_tlbie(0, 0, rb, rs, ric, prs, r);
184 static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
185 unsigned long ap, unsigned long ric)
187 unsigned long rb,rs,prs,r;
189 rb = va & ~(PPC_BITMASK(52, 63));
190 rb |= ap << PPC_BITLSHIFT(58);
192 prs = 0; /* partition scoped */
193 r = 1; /* radix format */
195 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
196 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
197 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
201 static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
204 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
205 asm volatile("ptesync": : :"memory");
206 __tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
209 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
210 asm volatile("ptesync": : :"memory");
211 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
215 static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
218 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
219 asm volatile("ptesync": : :"memory");
220 __tlbie_pid(0, RIC_FLUSH_TLB);
223 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
224 asm volatile("ptesync": : :"memory");
225 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
229 static inline void fixup_tlbie_pid(unsigned long pid)
232 * We can use any address for the invalidation, pick one which is
233 * probably unused as an optimisation.
235 unsigned long va = ((1UL << 52) - 1);
237 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
238 asm volatile("ptesync": : :"memory");
239 __tlbie_pid(0, RIC_FLUSH_TLB);
242 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
243 asm volatile("ptesync": : :"memory");
244 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
249 static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
252 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
253 asm volatile("ptesync": : :"memory");
254 __tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB);
257 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
258 asm volatile("ptesync": : :"memory");
259 __tlbie_lpid_va(va, lpid, ap, RIC_FLUSH_TLB);
263 static inline void fixup_tlbie_lpid(unsigned long lpid)
266 * We can use any address for the invalidation, pick one which is
267 * probably unused as an optimisation.
269 unsigned long va = ((1UL << 52) - 1);
271 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
272 asm volatile("ptesync": : :"memory");
273 __tlbie_lpid(0, RIC_FLUSH_TLB);
276 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
277 asm volatile("ptesync": : :"memory");
278 __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
283 * We use 128 set in radix mode and 256 set in hpt mode.
285 static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
289 asm volatile("ptesync": : :"memory");
292 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
293 * also flush the entire Page Walk Cache.
295 __tlbiel_pid(pid, 0, ric);
297 /* For PWC, only one flush is needed */
298 if (ric == RIC_FLUSH_PWC) {
299 ppc_after_tlbiel_barrier();
303 /* For the remaining sets, just flush the TLB */
304 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
305 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
307 ppc_after_tlbiel_barrier();
308 asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory");
311 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
313 asm volatile("ptesync": : :"memory");
316 * Workaround the fact that the "ric" argument to __tlbie_pid
317 * must be a compile-time contraint to match the "i" constraint
318 * in the asm statement.
322 __tlbie_pid(pid, RIC_FLUSH_TLB);
323 fixup_tlbie_pid(pid);
326 __tlbie_pid(pid, RIC_FLUSH_PWC);
330 __tlbie_pid(pid, RIC_FLUSH_ALL);
331 fixup_tlbie_pid(pid);
333 asm volatile("eieio; tlbsync; ptesync": : :"memory");
341 static void do_tlbiel_pid(void *info)
343 struct tlbiel_pid *t = info;
345 if (t->ric == RIC_FLUSH_TLB)
346 _tlbiel_pid(t->pid, RIC_FLUSH_TLB);
347 else if (t->ric == RIC_FLUSH_PWC)
348 _tlbiel_pid(t->pid, RIC_FLUSH_PWC);
350 _tlbiel_pid(t->pid, RIC_FLUSH_ALL);
353 static inline void _tlbiel_pid_multicast(struct mm_struct *mm,
354 unsigned long pid, unsigned long ric)
356 struct cpumask *cpus = mm_cpumask(mm);
357 struct tlbiel_pid t = { .pid = pid, .ric = ric };
359 on_each_cpu_mask(cpus, do_tlbiel_pid, &t, 1);
361 * Always want the CPU translations to be invalidated with tlbiel in
362 * these paths, so while coprocessors must use tlbie, we can not
363 * optimise away the tlbiel component.
365 if (atomic_read(&mm->context.copros) > 0)
366 _tlbie_pid(pid, RIC_FLUSH_ALL);
369 static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
371 asm volatile("ptesync": : :"memory");
374 * Workaround the fact that the "ric" argument to __tlbie_pid
375 * must be a compile-time contraint to match the "i" constraint
376 * in the asm statement.
380 __tlbie_lpid(lpid, RIC_FLUSH_TLB);
381 fixup_tlbie_lpid(lpid);
384 __tlbie_lpid(lpid, RIC_FLUSH_PWC);
388 __tlbie_lpid(lpid, RIC_FLUSH_ALL);
389 fixup_tlbie_lpid(lpid);
391 asm volatile("eieio; tlbsync; ptesync": : :"memory");
394 static __always_inline void _tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
397 * Workaround the fact that the "ric" argument to __tlbie_pid
398 * must be a compile-time contraint to match the "i" constraint
399 * in the asm statement.
403 __tlbie_lpid_guest(lpid, RIC_FLUSH_TLB);
406 __tlbie_lpid_guest(lpid, RIC_FLUSH_PWC);
410 __tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
412 fixup_tlbie_lpid(lpid);
413 asm volatile("eieio; tlbsync; ptesync": : :"memory");
416 static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
417 unsigned long pid, unsigned long page_size,
421 unsigned long ap = mmu_get_ap(psize);
423 for (addr = start; addr < end; addr += page_size)
424 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
427 static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
428 unsigned long psize, unsigned long ric)
430 unsigned long ap = mmu_get_ap(psize);
432 asm volatile("ptesync": : :"memory");
433 __tlbiel_va(va, pid, ap, ric);
434 ppc_after_tlbiel_barrier();
437 static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
438 unsigned long pid, unsigned long page_size,
439 unsigned long psize, bool also_pwc)
441 asm volatile("ptesync": : :"memory");
443 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
444 __tlbiel_va_range(start, end, pid, page_size, psize);
445 ppc_after_tlbiel_barrier();
448 static inline void __tlbie_va_range(unsigned long start, unsigned long end,
449 unsigned long pid, unsigned long page_size,
453 unsigned long ap = mmu_get_ap(psize);
455 for (addr = start; addr < end; addr += page_size)
456 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
458 fixup_tlbie_va_range(addr - page_size, pid, ap);
461 static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
462 unsigned long psize, unsigned long ric)
464 unsigned long ap = mmu_get_ap(psize);
466 asm volatile("ptesync": : :"memory");
467 __tlbie_va(va, pid, ap, ric);
468 fixup_tlbie_va(va, pid, ap);
469 asm volatile("eieio; tlbsync; ptesync": : :"memory");
479 static void do_tlbiel_va(void *info)
481 struct tlbiel_va *t = info;
483 if (t->ric == RIC_FLUSH_TLB)
484 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_TLB);
485 else if (t->ric == RIC_FLUSH_PWC)
486 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_PWC);
488 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_ALL);
491 static inline void _tlbiel_va_multicast(struct mm_struct *mm,
492 unsigned long va, unsigned long pid,
493 unsigned long psize, unsigned long ric)
495 struct cpumask *cpus = mm_cpumask(mm);
496 struct tlbiel_va t = { .va = va, .pid = pid, .psize = psize, .ric = ric };
497 on_each_cpu_mask(cpus, do_tlbiel_va, &t, 1);
498 if (atomic_read(&mm->context.copros) > 0)
499 _tlbie_va(va, pid, psize, RIC_FLUSH_TLB);
502 struct tlbiel_va_range {
506 unsigned long page_size;
511 static void do_tlbiel_va_range(void *info)
513 struct tlbiel_va_range *t = info;
515 _tlbiel_va_range(t->start, t->end, t->pid, t->page_size,
516 t->psize, t->also_pwc);
519 static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
520 unsigned long psize, unsigned long ric)
522 unsigned long ap = mmu_get_ap(psize);
524 asm volatile("ptesync": : :"memory");
525 __tlbie_lpid_va(va, lpid, ap, ric);
526 fixup_tlbie_lpid_va(va, lpid, ap);
527 asm volatile("eieio; tlbsync; ptesync": : :"memory");
530 static inline void _tlbie_va_range(unsigned long start, unsigned long end,
531 unsigned long pid, unsigned long page_size,
532 unsigned long psize, bool also_pwc)
534 asm volatile("ptesync": : :"memory");
536 __tlbie_pid(pid, RIC_FLUSH_PWC);
537 __tlbie_va_range(start, end, pid, page_size, psize);
538 asm volatile("eieio; tlbsync; ptesync": : :"memory");
541 static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
542 unsigned long start, unsigned long end,
543 unsigned long pid, unsigned long page_size,
544 unsigned long psize, bool also_pwc)
546 struct cpumask *cpus = mm_cpumask(mm);
547 struct tlbiel_va_range t = { .start = start, .end = end,
548 .pid = pid, .page_size = page_size,
549 .psize = psize, .also_pwc = also_pwc };
551 on_each_cpu_mask(cpus, do_tlbiel_va_range, &t, 1);
552 if (atomic_read(&mm->context.copros) > 0)
553 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
557 * Base TLB flushing operations:
559 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
560 * - flush_tlb_page(vma, vmaddr) flushes one page
561 * - flush_tlb_range(vma, start, end) flushes a range of pages
562 * - flush_tlb_kernel_range(start, end) flushes kernel pages
564 * - local_* variants of page and mm only apply to the current
567 void radix__local_flush_tlb_mm(struct mm_struct *mm)
572 pid = mm->context.id;
573 if (pid != MMU_NO_CONTEXT)
574 _tlbiel_pid(pid, RIC_FLUSH_TLB);
577 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
580 void radix__local_flush_all_mm(struct mm_struct *mm)
585 pid = mm->context.id;
586 if (pid != MMU_NO_CONTEXT)
587 _tlbiel_pid(pid, RIC_FLUSH_ALL);
590 EXPORT_SYMBOL(radix__local_flush_all_mm);
592 static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
594 radix__local_flush_all_mm(mm);
596 #endif /* CONFIG_SMP */
598 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
604 pid = mm->context.id;
605 if (pid != MMU_NO_CONTEXT)
606 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
610 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
612 #ifdef CONFIG_HUGETLB_PAGE
613 /* need the return fix for nohash.c */
614 if (is_vm_hugetlb_page(vma))
615 return radix__local_flush_hugetlb_page(vma, vmaddr);
617 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
619 EXPORT_SYMBOL(radix__local_flush_tlb_page);
621 static bool mm_is_singlethreaded(struct mm_struct *mm)
623 if (atomic_read(&mm->context.copros) > 0)
625 if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm)
630 static bool mm_needs_flush_escalation(struct mm_struct *mm)
633 * P9 nest MMU has issues with the page walk cache
634 * caching PTEs and not flushing them properly when
635 * RIC = 0 for a PID/LPID invalidate
637 if (atomic_read(&mm->context.copros) > 0)
643 static void do_exit_flush_lazy_tlb(void *arg)
645 struct mm_struct *mm = arg;
646 unsigned long pid = mm->context.id;
649 * A kthread could have done a mmget_not_zero() after the flushing CPU
650 * checked mm_is_singlethreaded, and be in the process of
651 * kthread_use_mm when interrupted here. In that case, current->mm will
652 * be set to mm, because kthread_use_mm() setting ->mm and switching to
653 * the mm is done with interrupts off.
655 if (current->mm == mm)
658 if (current->active_mm == mm) {
659 WARN_ON_ONCE(current->mm != NULL);
660 /* Is a kernel thread and is using mm as the lazy tlb */
662 current->active_mm = &init_mm;
663 switch_mm_irqs_off(mm, &init_mm, current);
667 atomic_dec(&mm->context.active_cpus);
668 cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
671 _tlbiel_pid(pid, RIC_FLUSH_ALL);
674 static void exit_flush_lazy_tlbs(struct mm_struct *mm)
677 * Would be nice if this was async so it could be run in
678 * parallel with our local flush, but generic code does not
679 * give a good API for it. Could extend the generic code or
680 * make a special powerpc IPI for flushing TLBs.
681 * For now it's not too performance critical.
683 smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
687 void radix__flush_tlb_mm(struct mm_struct *mm)
691 pid = mm->context.id;
692 if (unlikely(pid == MMU_NO_CONTEXT))
697 * Order loads of mm_cpumask vs previous stores to clear ptes before
698 * the invalidate. See barrier in switch_mm_irqs_off
701 if (!mm_is_thread_local(mm)) {
702 if (unlikely(mm_is_singlethreaded(mm))) {
703 exit_flush_lazy_tlbs(mm);
707 if (!mmu_has_feature(MMU_FTR_GTSE)) {
708 unsigned long tgt = H_RPTI_TARGET_CMMU;
710 if (atomic_read(&mm->context.copros) > 0)
711 tgt |= H_RPTI_TARGET_NMMU;
712 pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
713 H_RPTI_PAGE_ALL, 0, -1UL);
714 } else if (cputlb_use_tlbie()) {
715 if (mm_needs_flush_escalation(mm))
716 _tlbie_pid(pid, RIC_FLUSH_ALL);
718 _tlbie_pid(pid, RIC_FLUSH_TLB);
720 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
724 _tlbiel_pid(pid, RIC_FLUSH_TLB);
728 EXPORT_SYMBOL(radix__flush_tlb_mm);
730 static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
734 pid = mm->context.id;
735 if (unlikely(pid == MMU_NO_CONTEXT))
739 smp_mb(); /* see radix__flush_tlb_mm */
740 if (!mm_is_thread_local(mm)) {
741 if (unlikely(mm_is_singlethreaded(mm))) {
743 exit_flush_lazy_tlbs(mm);
747 if (!mmu_has_feature(MMU_FTR_GTSE)) {
748 unsigned long tgt = H_RPTI_TARGET_CMMU;
749 unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
752 if (atomic_read(&mm->context.copros) > 0)
753 tgt |= H_RPTI_TARGET_NMMU;
754 pseries_rpt_invalidate(pid, tgt, type,
755 H_RPTI_PAGE_ALL, 0, -1UL);
756 } else if (cputlb_use_tlbie())
757 _tlbie_pid(pid, RIC_FLUSH_ALL);
759 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
762 _tlbiel_pid(pid, RIC_FLUSH_ALL);
767 void radix__flush_all_mm(struct mm_struct *mm)
769 __flush_all_mm(mm, false);
771 EXPORT_SYMBOL(radix__flush_all_mm);
773 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
778 pid = mm->context.id;
779 if (unlikely(pid == MMU_NO_CONTEXT))
783 smp_mb(); /* see radix__flush_tlb_mm */
784 if (!mm_is_thread_local(mm)) {
785 if (unlikely(mm_is_singlethreaded(mm))) {
786 exit_flush_lazy_tlbs(mm);
789 if (!mmu_has_feature(MMU_FTR_GTSE)) {
790 unsigned long tgt, pg_sizes, size;
792 tgt = H_RPTI_TARGET_CMMU;
793 pg_sizes = psize_to_rpti_pgsize(psize);
794 size = 1UL << mmu_psize_to_shift(psize);
796 if (atomic_read(&mm->context.copros) > 0)
797 tgt |= H_RPTI_TARGET_NMMU;
798 pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
801 } else if (cputlb_use_tlbie())
802 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
804 _tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB);
807 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
812 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
814 #ifdef CONFIG_HUGETLB_PAGE
815 if (is_vm_hugetlb_page(vma))
816 return radix__flush_hugetlb_page(vma, vmaddr);
818 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
820 EXPORT_SYMBOL(radix__flush_tlb_page);
822 #else /* CONFIG_SMP */
823 static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { }
824 #endif /* CONFIG_SMP */
826 static void do_tlbiel_kernel(void *info)
828 _tlbiel_pid(0, RIC_FLUSH_ALL);
831 static inline void _tlbiel_kernel_broadcast(void)
833 on_each_cpu(do_tlbiel_kernel, NULL, 1);
836 * Coherent accelerators don't refcount kernel memory mappings,
837 * so have to always issue a tlbie for them. This is quite a
840 _tlbie_pid(0, RIC_FLUSH_ALL);
845 * If kernel TLBIs ever become local rather than global, then
846 * drivers/misc/ocxl/link.c:ocxl_link_add_pe will need some work, as it
847 * assumes kernel TLBIs are global.
849 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
851 if (!mmu_has_feature(MMU_FTR_GTSE)) {
852 unsigned long tgt = H_RPTI_TARGET_CMMU | H_RPTI_TARGET_NMMU;
853 unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
856 pseries_rpt_invalidate(0, tgt, type, H_RPTI_PAGE_ALL,
858 } else if (cputlb_use_tlbie())
859 _tlbie_pid(0, RIC_FLUSH_ALL);
861 _tlbiel_kernel_broadcast();
863 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
865 #define TLB_FLUSH_ALL -1UL
868 * Number of pages above which we invalidate the entire PID rather than
869 * flush individual pages, for local and global flushes respectively.
871 * tlbie goes out to the interconnect and individual ops are more costly.
872 * It also does not iterate over sets like the local tlbiel variant when
873 * invalidating a full PID, so it has a far lower threshold to change from
874 * individual page flushes to full-pid flushes.
876 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
877 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
879 static inline void __radix__flush_tlb_range(struct mm_struct *mm,
880 unsigned long start, unsigned long end)
884 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
885 unsigned long page_size = 1UL << page_shift;
886 unsigned long nr_pages = (end - start) >> page_shift;
889 pid = mm->context.id;
890 if (unlikely(pid == MMU_NO_CONTEXT))
894 smp_mb(); /* see radix__flush_tlb_mm */
895 if (!mm_is_thread_local(mm)) {
896 if (unlikely(mm_is_singlethreaded(mm))) {
897 if (end != TLB_FLUSH_ALL) {
898 exit_flush_lazy_tlbs(mm);
903 full = (end == TLB_FLUSH_ALL ||
904 nr_pages > tlb_single_page_flush_ceiling);
908 full = (end == TLB_FLUSH_ALL ||
909 nr_pages > tlb_local_single_page_flush_ceiling);
912 if (!mmu_has_feature(MMU_FTR_GTSE) && !local) {
913 unsigned long tgt = H_RPTI_TARGET_CMMU;
914 unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
916 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
917 pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M);
918 if (atomic_read(&mm->context.copros) > 0)
919 tgt |= H_RPTI_TARGET_NMMU;
920 pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, pg_sizes,
924 _tlbiel_pid(pid, RIC_FLUSH_TLB);
926 if (cputlb_use_tlbie()) {
927 if (mm_needs_flush_escalation(mm))
928 _tlbie_pid(pid, RIC_FLUSH_ALL);
930 _tlbie_pid(pid, RIC_FLUSH_TLB);
932 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
937 unsigned long hstart, hend;
939 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
940 hstart = (start + PMD_SIZE - 1) & PMD_MASK;
941 hend = end & PMD_MASK;
947 asm volatile("ptesync": : :"memory");
948 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
950 __tlbiel_va_range(hstart, hend, pid,
951 PMD_SIZE, MMU_PAGE_2M);
952 ppc_after_tlbiel_barrier();
953 } else if (cputlb_use_tlbie()) {
954 asm volatile("ptesync": : :"memory");
955 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
957 __tlbie_va_range(hstart, hend, pid,
958 PMD_SIZE, MMU_PAGE_2M);
959 asm volatile("eieio; tlbsync; ptesync": : :"memory");
961 _tlbiel_va_range_multicast(mm,
962 start, end, pid, page_size, mmu_virtual_psize, false);
964 _tlbiel_va_range_multicast(mm,
965 hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false);
971 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
975 #ifdef CONFIG_HUGETLB_PAGE
976 if (is_vm_hugetlb_page(vma))
977 return radix__flush_hugetlb_tlb_range(vma, start, end);
980 __radix__flush_tlb_range(vma->vm_mm, start, end);
982 EXPORT_SYMBOL(radix__flush_tlb_range);
984 static int radix_get_mmu_psize(int page_size)
988 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
989 psize = mmu_virtual_psize;
990 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
992 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
1000 * Flush partition scoped LPID address translation for all CPUs.
1002 void radix__flush_tlb_lpid_page(unsigned int lpid,
1004 unsigned long page_size)
1006 int psize = radix_get_mmu_psize(page_size);
1008 _tlbie_lpid_va(addr, lpid, psize, RIC_FLUSH_TLB);
1010 EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid_page);
1013 * Flush partition scoped PWC from LPID for all CPUs.
1015 void radix__flush_pwc_lpid(unsigned int lpid)
1017 _tlbie_lpid(lpid, RIC_FLUSH_PWC);
1019 EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
1022 * Flush partition scoped translations from LPID (=LPIDR)
1024 void radix__flush_all_lpid(unsigned int lpid)
1026 _tlbie_lpid(lpid, RIC_FLUSH_ALL);
1028 EXPORT_SYMBOL_GPL(radix__flush_all_lpid);
1031 * Flush process scoped translations from LPID (=LPIDR)
1033 void radix__flush_all_lpid_guest(unsigned int lpid)
1035 _tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
1038 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
1039 unsigned long end, int psize);
1041 void radix__tlb_flush(struct mmu_gather *tlb)
1044 struct mm_struct *mm = tlb->mm;
1045 int page_size = tlb->page_size;
1046 unsigned long start = tlb->start;
1047 unsigned long end = tlb->end;
1050 * if page size is not something we understand, do a full mm flush
1052 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
1053 * that flushes the process table entry cache upon process teardown.
1054 * See the comment for radix in arch_exit_mmap().
1056 if (tlb->fullmm || tlb->need_flush_all) {
1057 __flush_all_mm(mm, true);
1058 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
1059 if (!tlb->freed_tables)
1060 radix__flush_tlb_mm(mm);
1062 radix__flush_all_mm(mm);
1064 if (!tlb->freed_tables)
1065 radix__flush_tlb_range_psize(mm, start, end, psize);
1067 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
1071 static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
1072 unsigned long start, unsigned long end,
1073 int psize, bool also_pwc)
1076 unsigned int page_shift = mmu_psize_defs[psize].shift;
1077 unsigned long page_size = 1UL << page_shift;
1078 unsigned long nr_pages = (end - start) >> page_shift;
1081 pid = mm->context.id;
1082 if (unlikely(pid == MMU_NO_CONTEXT))
1086 smp_mb(); /* see radix__flush_tlb_mm */
1087 if (!mm_is_thread_local(mm)) {
1088 if (unlikely(mm_is_singlethreaded(mm))) {
1089 if (end != TLB_FLUSH_ALL) {
1090 exit_flush_lazy_tlbs(mm);
1095 full = (end == TLB_FLUSH_ALL ||
1096 nr_pages > tlb_single_page_flush_ceiling);
1100 full = (end == TLB_FLUSH_ALL ||
1101 nr_pages > tlb_local_single_page_flush_ceiling);
1104 if (!mmu_has_feature(MMU_FTR_GTSE) && !local) {
1105 unsigned long tgt = H_RPTI_TARGET_CMMU;
1106 unsigned long type = H_RPTI_TYPE_TLB;
1107 unsigned long pg_sizes = psize_to_rpti_pgsize(psize);
1110 type |= H_RPTI_TYPE_PWC;
1111 if (atomic_read(&mm->context.copros) > 0)
1112 tgt |= H_RPTI_TARGET_NMMU;
1113 pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
1116 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
1118 if (cputlb_use_tlbie()) {
1119 if (mm_needs_flush_escalation(mm))
1123 also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
1125 _tlbiel_pid_multicast(mm, pid,
1126 also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
1132 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
1133 else if (cputlb_use_tlbie())
1134 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
1136 _tlbiel_va_range_multicast(mm,
1137 start, end, pid, page_size, psize, also_pwc);
1142 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
1143 unsigned long end, int psize)
1145 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
1148 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
1149 unsigned long end, int psize)
1151 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
1154 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1155 void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
1157 unsigned long pid, end;
1159 pid = mm->context.id;
1160 if (unlikely(pid == MMU_NO_CONTEXT))
1163 /* 4k page size, just blow the world */
1164 if (PAGE_SIZE == 0x1000) {
1165 radix__flush_all_mm(mm);
1169 end = addr + HPAGE_PMD_SIZE;
1171 /* Otherwise first do the PWC, then iterate the pages. */
1173 smp_mb(); /* see radix__flush_tlb_mm */
1174 if (!mm_is_thread_local(mm)) {
1175 if (unlikely(mm_is_singlethreaded(mm))) {
1176 exit_flush_lazy_tlbs(mm);
1179 if (!mmu_has_feature(MMU_FTR_GTSE)) {
1180 unsigned long tgt, type, pg_sizes;
1182 tgt = H_RPTI_TARGET_CMMU;
1183 type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
1185 pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
1187 if (atomic_read(&mm->context.copros) > 0)
1188 tgt |= H_RPTI_TARGET_NMMU;
1189 pseries_rpt_invalidate(pid, tgt, type, pg_sizes,
1191 } else if (cputlb_use_tlbie())
1192 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1194 _tlbiel_va_range_multicast(mm,
1195 addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1198 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1203 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1205 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
1206 unsigned long start, unsigned long end)
1208 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
1210 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
1212 void radix__flush_tlb_all(void)
1214 unsigned long rb,prs,r,rs;
1215 unsigned long ric = RIC_FLUSH_ALL;
1217 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
1218 prs = 0; /* partition scoped */
1219 r = 1; /* radix format */
1220 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
1222 asm volatile("ptesync": : :"memory");
1224 * now flush guest entries by passing PRS = 1 and LPID != 0
1226 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1227 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
1229 * now flush host entires by passing PRS = 0 and LPID == 0
1231 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1232 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
1233 asm volatile("eieio; tlbsync; ptesync": : :"memory");
1236 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1237 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
1239 unsigned long pid = mm->context.id;
1241 if (unlikely(pid == MMU_NO_CONTEXT))
1244 if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
1248 * If this context hasn't run on that CPU before and KVM is
1249 * around, there's a slim chance that the guest on another
1250 * CPU just brought in obsolete translation into the TLB of
1251 * this CPU due to a bad prefetch using the guest PID on
1252 * the way into the hypervisor.
1254 * We work around this here. If KVM is possible, we check if
1255 * any sibling thread is in KVM. If it is, the window may exist
1256 * and thus we flush that PID from the core.
1258 * A potential future improvement would be to mark which PIDs
1259 * have never been used on the system and avoid it if the PID
1260 * is new and the process has no other cpumask bit set.
1262 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
1263 int cpu = smp_processor_id();
1264 int sib = cpu_first_thread_sibling(cpu);
1267 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
1270 if (!cpu_possible(sib))
1272 if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
1276 _tlbiel_pid(pid, RIC_FLUSH_ALL);
1279 EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
1280 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */