Merge tag 'ata-5.17-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemo...
[linux-2.6-microblaze.git] / mm / page_table_check.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright (c) 2021, Google LLC.
5  * Pasha Tatashin <pasha.tatashin@soleen.com>
6  */
7 #include <linux/mm.h>
8 #include <linux/page_table_check.h>
9
10 #undef pr_fmt
11 #define pr_fmt(fmt)     "page_table_check: " fmt
12
13 struct page_table_check {
14         atomic_t anon_map_count;
15         atomic_t file_map_count;
16 };
17
18 static bool __page_table_check_enabled __initdata =
19                                 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
20
21 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
22 EXPORT_SYMBOL(page_table_check_disabled);
23
24 static int __init early_page_table_check_param(char *buf)
25 {
26         if (!buf)
27                 return -EINVAL;
28
29         if (strcmp(buf, "on") == 0)
30                 __page_table_check_enabled = true;
31         else if (strcmp(buf, "off") == 0)
32                 __page_table_check_enabled = false;
33
34         return 0;
35 }
36
37 early_param("page_table_check", early_page_table_check_param);
38
39 static bool __init need_page_table_check(void)
40 {
41         return __page_table_check_enabled;
42 }
43
44 static void __init init_page_table_check(void)
45 {
46         if (!__page_table_check_enabled)
47                 return;
48         static_branch_disable(&page_table_check_disabled);
49 }
50
51 struct page_ext_operations page_table_check_ops = {
52         .size = sizeof(struct page_table_check),
53         .need = need_page_table_check,
54         .init = init_page_table_check,
55 };
56
57 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
58 {
59         BUG_ON(!page_ext);
60         return (void *)(page_ext) + page_table_check_ops.offset;
61 }
62
63 static inline bool pte_user_accessible_page(pte_t pte)
64 {
65         return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
66 }
67
68 static inline bool pmd_user_accessible_page(pmd_t pmd)
69 {
70         return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) &&
71                 (pmd_val(pmd) & _PAGE_USER);
72 }
73
74 static inline bool pud_user_accessible_page(pud_t pud)
75 {
76         return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) &&
77                 (pud_val(pud) & _PAGE_USER);
78 }
79
80 /*
81  * An enty is removed from the page table, decrement the counters for that page
82  * verify that it is of correct type and counters do not become negative.
83  */
84 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
85                                    unsigned long pfn, unsigned long pgcnt)
86 {
87         struct page_ext *page_ext;
88         struct page *page;
89         bool anon;
90         int i;
91
92         if (!pfn_valid(pfn))
93                 return;
94
95         page = pfn_to_page(pfn);
96         page_ext = lookup_page_ext(page);
97         anon = PageAnon(page);
98
99         for (i = 0; i < pgcnt; i++) {
100                 struct page_table_check *ptc = get_page_table_check(page_ext);
101
102                 if (anon) {
103                         BUG_ON(atomic_read(&ptc->file_map_count));
104                         BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
105                 } else {
106                         BUG_ON(atomic_read(&ptc->anon_map_count));
107                         BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
108                 }
109                 page_ext = page_ext_next(page_ext);
110         }
111 }
112
113 /*
114  * A new enty is added to the page table, increment the counters for that page
115  * verify that it is of correct type and is not being mapped with a different
116  * type to a different process.
117  */
118 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
119                                  unsigned long pfn, unsigned long pgcnt,
120                                  bool rw)
121 {
122         struct page_ext *page_ext;
123         struct page *page;
124         bool anon;
125         int i;
126
127         if (!pfn_valid(pfn))
128                 return;
129
130         page = pfn_to_page(pfn);
131         page_ext = lookup_page_ext(page);
132         anon = PageAnon(page);
133
134         for (i = 0; i < pgcnt; i++) {
135                 struct page_table_check *ptc = get_page_table_check(page_ext);
136
137                 if (anon) {
138                         BUG_ON(atomic_read(&ptc->file_map_count));
139                         BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
140                 } else {
141                         BUG_ON(atomic_read(&ptc->anon_map_count));
142                         BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
143                 }
144                 page_ext = page_ext_next(page_ext);
145         }
146 }
147
148 /*
149  * page is on free list, or is being allocated, verify that counters are zeroes
150  * crash if they are not.
151  */
152 void __page_table_check_zero(struct page *page, unsigned int order)
153 {
154         struct page_ext *page_ext = lookup_page_ext(page);
155         int i;
156
157         BUG_ON(!page_ext);
158         for (i = 0; i < (1 << order); i++) {
159                 struct page_table_check *ptc = get_page_table_check(page_ext);
160
161                 BUG_ON(atomic_read(&ptc->anon_map_count));
162                 BUG_ON(atomic_read(&ptc->file_map_count));
163                 page_ext = page_ext_next(page_ext);
164         }
165 }
166
167 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
168                                   pte_t pte)
169 {
170         if (&init_mm == mm)
171                 return;
172
173         if (pte_user_accessible_page(pte)) {
174                 page_table_check_clear(mm, addr, pte_pfn(pte),
175                                        PAGE_SIZE >> PAGE_SHIFT);
176         }
177 }
178 EXPORT_SYMBOL(__page_table_check_pte_clear);
179
180 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
181                                   pmd_t pmd)
182 {
183         if (&init_mm == mm)
184                 return;
185
186         if (pmd_user_accessible_page(pmd)) {
187                 page_table_check_clear(mm, addr, pmd_pfn(pmd),
188                                        PMD_PAGE_SIZE >> PAGE_SHIFT);
189         }
190 }
191 EXPORT_SYMBOL(__page_table_check_pmd_clear);
192
193 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
194                                   pud_t pud)
195 {
196         if (&init_mm == mm)
197                 return;
198
199         if (pud_user_accessible_page(pud)) {
200                 page_table_check_clear(mm, addr, pud_pfn(pud),
201                                        PUD_PAGE_SIZE >> PAGE_SHIFT);
202         }
203 }
204 EXPORT_SYMBOL(__page_table_check_pud_clear);
205
206 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
207                                 pte_t *ptep, pte_t pte)
208 {
209         pte_t old_pte;
210
211         if (&init_mm == mm)
212                 return;
213
214         old_pte = *ptep;
215         if (pte_user_accessible_page(old_pte)) {
216                 page_table_check_clear(mm, addr, pte_pfn(old_pte),
217                                        PAGE_SIZE >> PAGE_SHIFT);
218         }
219
220         if (pte_user_accessible_page(pte)) {
221                 page_table_check_set(mm, addr, pte_pfn(pte),
222                                      PAGE_SIZE >> PAGE_SHIFT,
223                                      pte_write(pte));
224         }
225 }
226 EXPORT_SYMBOL(__page_table_check_pte_set);
227
228 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
229                                 pmd_t *pmdp, pmd_t pmd)
230 {
231         pmd_t old_pmd;
232
233         if (&init_mm == mm)
234                 return;
235
236         old_pmd = *pmdp;
237         if (pmd_user_accessible_page(old_pmd)) {
238                 page_table_check_clear(mm, addr, pmd_pfn(old_pmd),
239                                        PMD_PAGE_SIZE >> PAGE_SHIFT);
240         }
241
242         if (pmd_user_accessible_page(pmd)) {
243                 page_table_check_set(mm, addr, pmd_pfn(pmd),
244                                      PMD_PAGE_SIZE >> PAGE_SHIFT,
245                                      pmd_write(pmd));
246         }
247 }
248 EXPORT_SYMBOL(__page_table_check_pmd_set);
249
250 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
251                                 pud_t *pudp, pud_t pud)
252 {
253         pud_t old_pud;
254
255         if (&init_mm == mm)
256                 return;
257
258         old_pud = *pudp;
259         if (pud_user_accessible_page(old_pud)) {
260                 page_table_check_clear(mm, addr, pud_pfn(old_pud),
261                                        PUD_PAGE_SIZE >> PAGE_SHIFT);
262         }
263
264         if (pud_user_accessible_page(pud)) {
265                 page_table_check_set(mm, addr, pud_pfn(pud),
266                                      PUD_PAGE_SIZE >> PAGE_SHIFT,
267                                      pud_write(pud));
268         }
269 }
270 EXPORT_SYMBOL(__page_table_check_pud_set);