Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / sparc / mm / tlb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* arch/sparc64/mm/tlb.c
3  *
4  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12 #include <linux/pagemap.h>
13
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlb.h>
18
19 /* Heavily inspired by the ppc64 code.  */
20
21 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
22
23 void flush_tlb_pending(void)
24 {
25         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
26         struct mm_struct *mm = tb->mm;
27
28         if (!tb->tlb_nr)
29                 goto out;
30
31         flush_tsb_user(tb);
32
33         if (CTX_VALID(mm->context)) {
34                 if (tb->tlb_nr == 1) {
35                         global_flush_tlb_page(mm, tb->vaddrs[0]);
36                 } else {
37 #ifdef CONFIG_SMP
38                         smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
39                                               &tb->vaddrs[0]);
40 #else
41                         __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
42                                             tb->tlb_nr, &tb->vaddrs[0]);
43 #endif
44                 }
45         }
46
47         tb->tlb_nr = 0;
48
49 out:
50         put_cpu_var(tlb_batch);
51 }
52
53 void arch_enter_lazy_mmu_mode(void)
54 {
55         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
56
57         tb->active = 1;
58 }
59
60 void arch_leave_lazy_mmu_mode(void)
61 {
62         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
63
64         if (tb->tlb_nr)
65                 flush_tlb_pending();
66         tb->active = 0;
67 }
68
69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
70                               bool exec, unsigned int hugepage_shift)
71 {
72         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
73         unsigned long nr;
74
75         vaddr &= PAGE_MASK;
76         if (exec)
77                 vaddr |= 0x1UL;
78
79         nr = tb->tlb_nr;
80
81         if (unlikely(nr != 0 && mm != tb->mm)) {
82                 flush_tlb_pending();
83                 nr = 0;
84         }
85
86         if (!tb->active) {
87                 flush_tsb_user_page(mm, vaddr, hugepage_shift);
88                 global_flush_tlb_page(mm, vaddr);
89                 goto out;
90         }
91
92         if (nr == 0) {
93                 tb->mm = mm;
94                 tb->hugepage_shift = hugepage_shift;
95         }
96
97         if (tb->hugepage_shift != hugepage_shift) {
98                 flush_tlb_pending();
99                 tb->hugepage_shift = hugepage_shift;
100                 nr = 0;
101         }
102
103         tb->vaddrs[nr] = vaddr;
104         tb->tlb_nr = ++nr;
105         if (nr >= TLB_BATCH_NR)
106                 flush_tlb_pending();
107
108 out:
109         put_cpu_var(tlb_batch);
110 }
111
112 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
113                    pte_t *ptep, pte_t orig, int fullmm,
114                    unsigned int hugepage_shift)
115 {
116         if (tlb_type != hypervisor &&
117             pte_dirty(orig)) {
118                 unsigned long paddr, pfn = pte_pfn(orig);
119                 struct address_space *mapping;
120                 struct page *page;
121
122                 if (!pfn_valid(pfn))
123                         goto no_cache_flush;
124
125                 page = pfn_to_page(pfn);
126                 if (PageReserved(page))
127                         goto no_cache_flush;
128
129                 /* A real file page? */
130                 mapping = page_mapping_file(page);
131                 if (!mapping)
132                         goto no_cache_flush;
133
134                 paddr = (unsigned long) page_address(page);
135                 if ((paddr ^ vaddr) & (1 << 13))
136                         flush_dcache_page_all(mm, page);
137         }
138
139 no_cache_flush:
140         if (!fullmm)
141                 tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
142 }
143
144 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
145 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
146                                pmd_t pmd)
147 {
148         unsigned long end;
149         pte_t *pte;
150
151         pte = pte_offset_map(&pmd, vaddr);
152         end = vaddr + HPAGE_SIZE;
153         while (vaddr < end) {
154                 if (pte_val(*pte) & _PAGE_VALID) {
155                         bool exec = pte_exec(*pte);
156
157                         tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
158                 }
159                 pte++;
160                 vaddr += PAGE_SIZE;
161         }
162         pte_unmap(pte);
163 }
164
165
166 static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
167                            pmd_t orig, pmd_t pmd)
168 {
169         if (mm == &init_mm)
170                 return;
171
172         if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
173                 /*
174                  * Note that this routine only sets pmds for THP pages.
175                  * Hugetlb pages are handled elsewhere.  We need to check
176                  * for huge zero page.  Huge zero pages are like hugetlb
177                  * pages in that there is no RSS, but there is the need
178                  * for TSB entries.  So, huge zero page counts go into
179                  * hugetlb_pte_count.
180                  */
181                 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
182                         if (is_huge_zero_page(pmd_page(pmd)))
183                                 mm->context.hugetlb_pte_count++;
184                         else
185                                 mm->context.thp_pte_count++;
186                 } else {
187                         if (is_huge_zero_page(pmd_page(orig)))
188                                 mm->context.hugetlb_pte_count--;
189                         else
190                                 mm->context.thp_pte_count--;
191                 }
192
193                 /* Do not try to allocate the TSB hash table if we
194                  * don't have one already.  We have various locks held
195                  * and thus we'll end up doing a GFP_KERNEL allocation
196                  * in an atomic context.
197                  *
198                  * Instead, we let the first TLB miss on a hugepage
199                  * take care of this.
200                  */
201         }
202
203         if (!pmd_none(orig)) {
204                 addr &= HPAGE_MASK;
205                 if (pmd_trans_huge(orig)) {
206                         pte_t orig_pte = __pte(pmd_val(orig));
207                         bool exec = pte_exec(orig_pte);
208
209                         tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
210                         tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
211                                           REAL_HPAGE_SHIFT);
212                 } else {
213                         tlb_batch_pmd_scan(mm, addr, orig);
214                 }
215         }
216 }
217
218 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
219                 pmd_t *pmdp, pmd_t pmd)
220 {
221         pmd_t orig = *pmdp;
222
223         *pmdp = pmd;
224         __set_pmd_acct(mm, addr, orig, pmd);
225 }
226
227 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
228                 unsigned long address, pmd_t *pmdp, pmd_t pmd)
229 {
230         pmd_t old;
231
232         do {
233                 old = *pmdp;
234         } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
235         __set_pmd_acct(vma->vm_mm, address, old, pmd);
236
237         return old;
238 }
239
240 /*
241  * This routine is only called when splitting a THP
242  */
243 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
244                      pmd_t *pmdp)
245 {
246         pmd_t old, entry;
247
248         entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
249         old = pmdp_establish(vma, address, pmdp, entry);
250         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
251
252         /*
253          * set_pmd_at() will not be called in a way to decrement
254          * thp_pte_count when splitting a THP, so do it now.
255          * Sanity check pmd before doing the actual decrement.
256          */
257         if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
258             !is_huge_zero_page(pmd_page(entry)))
259                 (vma->vm_mm)->context.thp_pte_count--;
260
261         return old;
262 }
263
264 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
265                                 pgtable_t pgtable)
266 {
267         struct list_head *lh = (struct list_head *) pgtable;
268
269         assert_spin_locked(&mm->page_table_lock);
270
271         /* FIFO */
272         if (!pmd_huge_pte(mm, pmdp))
273                 INIT_LIST_HEAD(lh);
274         else
275                 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
276         pmd_huge_pte(mm, pmdp) = pgtable;
277 }
278
279 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
280 {
281         struct list_head *lh;
282         pgtable_t pgtable;
283
284         assert_spin_locked(&mm->page_table_lock);
285
286         /* FIFO */
287         pgtable = pmd_huge_pte(mm, pmdp);
288         lh = (struct list_head *) pgtable;
289         if (list_empty(lh))
290                 pmd_huge_pte(mm, pmdp) = NULL;
291         else {
292                 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
293                 list_del(lh);
294         }
295         pte_val(pgtable[0]) = 0;
296         pte_val(pgtable[1]) = 0;
297
298         return pgtable;
299 }
300 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */