return atomic64_inc_return(&mm->context.tlb_gen);
}
-static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
- struct mm_struct *mm)
+static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm,
+ unsigned long uaddr)
{
inc_mm_tlb_gen(mm);
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
* The arch code makes the following promise: generic code can modify a
- * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
- * needed barriers), then call arch_tlbbatch_flush(), and the entries
+ * PTE, then call arch_tlbbatch_add_pending() (which internally provides
+ * all needed barriers), then call arch_tlbbatch_flush(), and the entries
* will be flushed on all CPUs by the time that arch_tlbbatch_flush()
* returns.
*/
#define TLB_FLUSH_BATCH_PENDING_LARGE \
(TLB_FLUSH_BATCH_PENDING_MASK / 2)
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
+ unsigned long uaddr)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
int batch;
if (!pte_accessible(mm, pteval))
return;
- arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
+ arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
tlb_ubc->flush_required = true;
/*
}
}
#else
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
+ unsigned long uaddr)
{
}
*/
pteval = ptep_get_and_clear(mm, address, pvmw.pte);
- set_tlb_ubc_flush_pending(mm, pteval);
+ set_tlb_ubc_flush_pending(mm, pteval, address);
} else {
pteval = ptep_clear_flush(vma, address, pvmw.pte);
}
*/
pteval = ptep_get_and_clear(mm, address, pvmw.pte);
- set_tlb_ubc_flush_pending(mm, pteval);
+ set_tlb_ubc_flush_pending(mm, pteval, address);
} else {
pteval = ptep_clear_flush(vma, address, pvmw.pte);
}