x86/mm/tlb: Add freed_tables element to flush_tlb_info
[linux-2.6-microblaze.git] / arch / x86 / include / asm / tlbflush.h
index 58ce528..323a313 100644 (file)
@@ -148,22 +148,6 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
 #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
 #endif
 
-static inline bool tlb_defer_switch_to_init_mm(void)
-{
-       /*
-        * If we have PCID, then switching to init_mm is reasonably
-        * fast.  If we don't have PCID, then switching to init_mm is
-        * quite slow, so we try to defer it in the hopes that we can
-        * avoid it entirely.  The latter approach runs the risk of
-        * receiving otherwise unnecessary IPIs.
-        *
-        * This choice is just a heuristic.  The tlb code can handle this
-        * function returning true or false regardless of whether we have
-        * PCID.
-        */
-       return !static_cpu_has(X86_FEATURE_PCID);
-}
-
 struct tlb_context {
        u64 ctx_id;
        u64 tlb_gen;
@@ -547,23 +531,30 @@ struct flush_tlb_info {
        unsigned long           start;
        unsigned long           end;
        u64                     new_tlb_gen;
+       unsigned int            stride_shift;
+       bool                    freed_tables;
 };
 
 #define local_flush_tlb() __flush_tlb()
 
-#define flush_tlb_mm(mm)       flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
+#define flush_tlb_mm(mm)                                               \
+               flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
 
-#define flush_tlb_range(vma, start, end)       \
-               flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+#define flush_tlb_range(vma, start, end)                               \
+       flush_tlb_mm_range((vma)->vm_mm, start, end,                    \
+                          ((vma)->vm_flags & VM_HUGETLB)               \
+                               ? huge_page_shift(hstate_vma(vma))      \
+                               : PAGE_SHIFT, false)
 
 extern void flush_tlb_all(void);
 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
-                               unsigned long end, unsigned long vmflag);
+                               unsigned long end, unsigned int stride_shift,
+                               bool freed_tables);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
 {
-       flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
+       flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,