4 #include <linux/pagemap.h>
5 #include <linux/swap.h>
6 #include <asm/percpu.h>
7 #include <asm/pgalloc.h>
8 #include <asm/tlbflush.h>
10 #define tlb_start_vma(tlb, vma) do { } while (0)
11 #define tlb_end_vma(tlb, vma) do { } while (0)
12 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
14 /* struct mmu_gather is an opaque type used by the mm code for passing around
15 * any data needed by arch specific code for tlb_remove_page.
19 unsigned int need_flush; /* Really unmapped some ptes? */
22 unsigned int fullmm; /* non-zero means full mm flush */
25 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
26 unsigned long address)
28 if (tlb->start > address)
30 if (tlb->end < address + PAGE_SIZE)
31 tlb->end = address + PAGE_SIZE;
34 static inline void init_tlb_gather(struct mmu_gather *tlb)
38 tlb->start = TASK_SIZE;
48 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
49 unsigned long start, unsigned long end)
54 tlb->fullmm = !(start | (end+1));
59 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
63 tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
65 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
69 tlb_flush_mmu_free(struct mmu_gather *tlb)
75 tlb_flush_mmu(struct mmu_gather *tlb)
80 tlb_flush_mmu_tlbonly(tlb);
81 tlb_flush_mmu_free(tlb);
84 /* arch_tlb_finish_mmu
85 * Called at the end of the shootdown operation to free up any resources
89 arch_tlb_finish_mmu(struct mmu_gather *tlb,
90 unsigned long start, unsigned long end, bool force)
99 /* keep the page table cache within bounds */
104 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
105 * while handling the additional races in SMP caused by other CPUs
106 * caching valid mappings in their TLBs.
108 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
111 free_page_and_swap_cache(page);
112 return false; /* avoid calling tlb_flush_mmu */
115 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
117 __tlb_remove_page(tlb, page);
120 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
121 struct page *page, int page_size)
123 return __tlb_remove_page(tlb, page);
126 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
127 struct page *page, int page_size)
129 return tlb_remove_page(tlb, page);
133 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
135 * Record the fact that pte's were really umapped in ->need_flush, so we can
136 * later optimise away the tlb invalidate. This helps when userspace is
137 * unmapping already-unmapped pages, which happens quite a lot.
139 #define tlb_remove_tlb_entry(tlb, ptep, address) \
141 tlb->need_flush = 1; \
142 __tlb_remove_tlb_entry(tlb, ptep, address); \
145 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
146 tlb_remove_tlb_entry(tlb, ptep, address)
148 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
149 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
150 unsigned int page_size)
154 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
156 #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
158 #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
160 #define tlb_migrate_finish(mm) do {} while (0)