Merge tag 's390-5.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-microblaze.git] / arch / riscv / mm / tlbflush.c
index 720b443..64f8201 100644 (file)
@@ -4,36 +4,66 @@
 #include <linux/smp.h>
 #include <linux/sched.h>
 #include <asm/sbi.h>
+#include <asm/mmu_context.h>
+
+static inline void local_flush_tlb_all_asid(unsigned long asid)
+{
+       __asm__ __volatile__ ("sfence.vma x0, %0"
+                       :
+                       : "r" (asid)
+                       : "memory");
+}
+
+static inline void local_flush_tlb_page_asid(unsigned long addr,
+               unsigned long asid)
+{
+       __asm__ __volatile__ ("sfence.vma %0, %1"
+                       :
+                       : "r" (addr), "r" (asid)
+                       : "memory");
+}
 
 void flush_tlb_all(void)
 {
        sbi_remote_sfence_vma(NULL, 0, -1);
 }
 
-/*
- * This function must not be called with cmask being null.
- * Kernel may panic if cmask is NULL.
- */
-static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
-                                 unsigned long size)
+static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+                                 unsigned long size, unsigned long stride)
 {
+       struct cpumask *cmask = mm_cpumask(mm);
        struct cpumask hmask;
        unsigned int cpuid;
+       bool broadcast;
 
        if (cpumask_empty(cmask))
                return;
 
        cpuid = get_cpu();
+       /* check if the tlbflush needs to be sent to other CPUs */
+       broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+       if (static_branch_unlikely(&use_asid_allocator)) {
+               unsigned long asid = atomic_long_read(&mm->context.id);
 
-       if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
-               /* local cpu is the only cpu present in cpumask */
-               if (size <= PAGE_SIZE)
+               if (broadcast) {
+                       riscv_cpuid_to_hartid_mask(cmask, &hmask);
+                       sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),
+                                                  start, size, asid);
+               } else if (size <= stride) {
+                       local_flush_tlb_page_asid(start, asid);
+               } else {
+                       local_flush_tlb_all_asid(asid);
+               }
+       } else {
+               if (broadcast) {
+                       riscv_cpuid_to_hartid_mask(cmask, &hmask);
+                       sbi_remote_sfence_vma(cpumask_bits(&hmask),
+                                             start, size);
+               } else if (size <= stride) {
                        local_flush_tlb_page(start);
-               else
+               } else {
                        local_flush_tlb_all();
-       } else {
-               riscv_cpuid_to_hartid_mask(cmask, &hmask);
-               sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
+               }
        }
 
        put_cpu();
@@ -41,16 +71,23 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
-       __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
+       __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 {
-       __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
+       __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
 }
 
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                     unsigned long end)
 {
-       __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
+       __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
+}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                       unsigned long end)
+{
+       __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
 }
+#endif