Merge tag 'x86-mm-2021-04-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-2.6-microblaze.git] / arch / x86 / mm / tlb.c
index 98f2695..7880468 100644 (file)
@@ -24,7 +24,7 @@
 # define __flush_tlb_local             native_flush_tlb_local
 # define __flush_tlb_global            native_flush_tlb_global
 # define __flush_tlb_one_user(addr)    native_flush_tlb_one_user(addr)
-# define __flush_tlb_others(msk, info) native_flush_tlb_others(msk, info)
+# define __flush_tlb_multi(msk, info)  native_flush_tlb_multi(msk, info)
 #endif
 
 /*
@@ -300,7 +300,7 @@ void leave_mm(int cpu)
                return;
 
        /* Warn if we're not lazy. */
-       WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
+       WARN_ON(!this_cpu_read(cpu_tlbstate_shared.is_lazy));
 
        switch_mm(NULL, &init_mm, NULL);
 }
@@ -316,7 +316,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        local_irq_restore(flags);
 }
 
-static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+static unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
 {
        unsigned long next_tif = task_thread_info(next)->flags;
        unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
@@ -424,7 +424,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 {
        struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
        u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
-       bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
+       bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy);
        unsigned cpu = smp_processor_id();
        u64 next_tlb_gen;
        bool need_flush;
@@ -439,7 +439,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
         * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
         */
 
-       /* We don't want flush_tlb_func_* to run concurrently with us. */
+       /* We don't want flush_tlb_func() to run concurrently with us. */
        if (IS_ENABLED(CONFIG_PROVE_LOCKING))
                WARN_ON_ONCE(!irqs_disabled());
 
@@ -469,7 +469,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                __flush_tlb_all();
        }
 #endif
-       this_cpu_write(cpu_tlbstate.is_lazy, false);
+       if (was_lazy)
+               this_cpu_write(cpu_tlbstate_shared.is_lazy, false);
 
        /*
         * The membarrier system call requires a full memory barrier and
@@ -490,7 +491,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                /*
                 * Even in lazy TLB mode, the CPU should stay set in the
                 * mm_cpumask. The TLB shootdown code can figure out from
-                * from cpu_tlbstate.is_lazy whether or not to send an IPI.
+                * cpu_tlbstate_shared.is_lazy whether or not to send an IPI.
                 */
                if (WARN_ON_ONCE(real_prev != &init_mm &&
                                 !cpumask_test_cpu(cpu, mm_cpumask(next))))
@@ -598,7 +599,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
        if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
                return;
 
-       this_cpu_write(cpu_tlbstate.is_lazy, true);
+       this_cpu_write(cpu_tlbstate_shared.is_lazy, true);
 }
 
 /*
@@ -647,14 +648,13 @@ void initialize_tlbstate_and_flush(void)
 }
 
 /*
- * flush_tlb_func_common()'s memory ordering requirement is that any
+ * flush_tlb_func()'s memory ordering requirement is that any
  * TLB fills that happen after we flush the TLB are ordered after we
  * read active_mm's tlb_gen.  We don't need any explicit barriers
  * because all x86 flush operations are serializing and the
  * atomic64_read operation won't be reordered by the compiler.
  */
-static void flush_tlb_func_common(const struct flush_tlb_info *f,
-                                 bool local, enum tlb_flush_reason reason)
+static void flush_tlb_func(void *info)
 {
        /*
         * We have three different tlb_gen values in here.  They are:
@@ -665,28 +665,40 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
         * - f->new_tlb_gen: the generation that the requester of the flush
         *                   wants us to catch up to.
         */
+       const struct flush_tlb_info *f = info;
        struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
        u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
        u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
        u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+       bool local = smp_processor_id() == f->initiating_cpu;
+       unsigned long nr_invalidate = 0;
 
        /* This code cannot presently handle being reentered. */
        VM_WARN_ON(!irqs_disabled());
 
+       if (!local) {
+               inc_irq_stat(irq_tlb_count);
+               count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+
+               /* Can only happen on remote CPUs */
+               if (f->mm && f->mm != loaded_mm)
+                       return;
+       }
+
        if (unlikely(loaded_mm == &init_mm))
                return;
 
        VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
                   loaded_mm->context.ctx_id);
 
-       if (this_cpu_read(cpu_tlbstate.is_lazy)) {
+       if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) {
                /*
                 * We're in lazy mode.  We need to at least flush our
                 * paging-structure cache to avoid speculatively reading
                 * garbage into our TLB.  Since switching to init_mm is barely
                 * slower than a minimal flush, just switch to init_mm.
                 *
-                * This should be rare, with native_flush_tlb_others skipping
+                * This should be rare, with native_flush_tlb_multi() skipping
                 * IPIs to lazy TLB mode CPUs.
                 */
                switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -700,8 +712,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
                 * be handled can catch us all the way up, leaving no work for
                 * the second flush.
                 */
-               trace_tlb_flush(reason, 0);
-               return;
+               goto done;
        }
 
        WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
@@ -748,56 +759,54 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
            f->new_tlb_gen == local_tlb_gen + 1 &&
            f->new_tlb_gen == mm_tlb_gen) {
                /* Partial flush */
-               unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
                unsigned long addr = f->start;
 
+               nr_invalidate = (f->end - f->start) >> f->stride_shift;
+
                while (addr < f->end) {
                        flush_tlb_one_user(addr);
                        addr += 1UL << f->stride_shift;
                }
                if (local)
                        count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
-               trace_tlb_flush(reason, nr_invalidate);
        } else {
                /* Full flush. */
+               nr_invalidate = TLB_FLUSH_ALL;
+
                flush_tlb_local();
                if (local)
                        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-               trace_tlb_flush(reason, TLB_FLUSH_ALL);
        }
 
        /* Both paths above update our state to mm_tlb_gen. */
        this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
-}
-
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
-{
-       const struct flush_tlb_info *f = info;
 
-       flush_tlb_func_common(f, true, reason);
+       /* Tracing is done in a unified manner to reduce the code size */
+done:
+       trace_tlb_flush(!local ? TLB_REMOTE_SHOOTDOWN :
+                               (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN :
+                                                 TLB_LOCAL_MM_SHOOTDOWN,
+                       nr_invalidate);
 }
 
-static void flush_tlb_func_remote(void *info)
+static bool tlb_is_not_lazy(int cpu)
 {
-       const struct flush_tlb_info *f = info;
-
-       inc_irq_stat(irq_tlb_count);
-
-       if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
-               return;
-
-       count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
-       flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
+       return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
 }
 
-static bool tlb_is_not_lazy(int cpu, void *data)
-{
-       return !per_cpu(cpu_tlbstate.is_lazy, cpu);
-}
+static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
+EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared);
 
-STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
+STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
                                         const struct flush_tlb_info *info)
 {
+       /*
+        * Do accounting and tracing. Note that there are (and have always been)
+        * cases in which a remote TLB flush will be traced, but eventually
+        * would not happen.
+        */
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
        if (info->end == TLB_FLUSH_ALL)
                trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
@@ -815,18 +824,42 @@ STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
         * up on the new contents of what used to be page tables, while
         * doing a speculative memory access.
         */
-       if (info->freed_tables)
-               smp_call_function_many(cpumask, flush_tlb_func_remote,
-                              (void *)info, 1);
-       else
-               on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
-                               (void *)info, 1, cpumask);
+       if (info->freed_tables) {
+               on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
+       } else {
+               /*
+                * Although we could have used on_each_cpu_cond_mask(),
+                * open-coding it has performance advantages, as it eliminates
+                * the need for indirect calls or retpolines. In addition, it
+                * allows to use a designated cpumask for evaluating the
+                * condition, instead of allocating one.
+                *
+                * This code works under the assumption that there are no nested
+                * TLB flushes, an assumption that is already made in
+                * flush_tlb_mm_range().
+                *
+                * cond_cpumask is logically a stack-local variable, but it is
+                * more efficient to have it off the stack and not to allocate
+                * it on demand. Preemption is disabled and this code is
+                * non-reentrant.
+                */
+               struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask);
+               int cpu;
+
+               cpumask_clear(cond_cpumask);
+
+               for_each_cpu(cpu, cpumask) {
+                       if (tlb_is_not_lazy(cpu))
+                               __cpumask_set_cpu(cpu, cond_cpumask);
+               }
+               on_each_cpu_mask(cond_cpumask, flush_tlb_func, (void *)info, true);
+       }
 }
 
-void flush_tlb_others(const struct cpumask *cpumask,
+void flush_tlb_multi(const struct cpumask *cpumask,
                      const struct flush_tlb_info *info)
 {
-       __flush_tlb_others(cpumask, info);
+       __flush_tlb_multi(cpumask, info);
 }
 
 /*
@@ -847,7 +880,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
 static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
 #endif
 
-static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
+static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
                        unsigned long start, unsigned long end,
                        unsigned int stride_shift, bool freed_tables,
                        u64 new_tlb_gen)
@@ -869,11 +902,12 @@ static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
        info->stride_shift      = stride_shift;
        info->freed_tables      = freed_tables;
        info->new_tlb_gen       = new_tlb_gen;
+       info->initiating_cpu    = smp_processor_id();
 
        return info;
 }
 
-static inline void put_flush_tlb_info(void)
+static void put_flush_tlb_info(void)
 {
 #ifdef CONFIG_DEBUG_VM
        /* Complete reentrancy prevention checks */
@@ -905,16 +939,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
        info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
                                  new_tlb_gen);
 
-       if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
+       /*
+        * flush_tlb_multi() is not optimized for the common case in which only
+        * a local TLB flush is needed. Optimize this use-case by calling
+        * flush_tlb_func_local() directly in this case.
+        */
+       if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
+               flush_tlb_multi(mm_cpumask(mm), info);
+       } else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
                lockdep_assert_irqs_enabled();
                local_irq_disable();
-               flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
+               flush_tlb_func(info);
                local_irq_enable();
        }
 
-       if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
-               flush_tlb_others(mm_cpumask(mm), info);
-
        put_flush_tlb_info();
        put_cpu();
 }
@@ -1119,34 +1157,30 @@ void __flush_tlb_all(void)
 }
 EXPORT_SYMBOL_GPL(__flush_tlb_all);
 
-/*
- * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
- * This means that the 'struct flush_tlb_info' that describes which mappings to
- * flush is actually fixed. We therefore set a single fixed struct and use it in
- * arch_tlbbatch_flush().
- */
-static const struct flush_tlb_info full_flush_tlb_info = {
-       .mm = NULL,
-       .start = 0,
-       .end = TLB_FLUSH_ALL,
-};
-
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 {
+       struct flush_tlb_info *info;
+
        int cpu = get_cpu();
 
-       if (cpumask_test_cpu(cpu, &batch->cpumask)) {
+       info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, 0);
+       /*
+        * flush_tlb_multi() is not optimized for the common case in which only
+        * a local TLB flush is needed. Optimize this use-case by calling
+        * flush_tlb_func_local() directly in this case.
+        */
+       if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
+               flush_tlb_multi(&batch->cpumask, info);
+       } else if (cpumask_test_cpu(cpu, &batch->cpumask)) {
                lockdep_assert_irqs_enabled();
                local_irq_disable();
-               flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
+               flush_tlb_func(info);
                local_irq_enable();
        }
 
-       if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
-               flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
-
        cpumask_clear(&batch->cpumask);
 
+       put_flush_tlb_info();
        put_cpu();
 }