1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2021, Google LLC.
5 * Pasha Tatashin <pasha.tatashin@soleen.com>
8 #include <linux/page_table_check.h>
11 #define pr_fmt(fmt) "page_table_check: " fmt
13 struct page_table_check {
14 atomic_t anon_map_count;
15 atomic_t file_map_count;
18 static bool __page_table_check_enabled __initdata =
19 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
21 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
22 EXPORT_SYMBOL(page_table_check_disabled);
24 static int __init early_page_table_check_param(char *buf)
26 return strtobool(buf, &__page_table_check_enabled);
29 early_param("page_table_check", early_page_table_check_param);
31 static bool __init need_page_table_check(void)
33 return __page_table_check_enabled;
36 static void __init init_page_table_check(void)
38 if (!__page_table_check_enabled)
40 static_branch_disable(&page_table_check_disabled);
43 struct page_ext_operations page_table_check_ops = {
44 .size = sizeof(struct page_table_check),
45 .need = need_page_table_check,
46 .init = init_page_table_check,
49 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
52 return (void *)(page_ext) + page_table_check_ops.offset;
55 static inline bool pte_user_accessible_page(pte_t pte)
57 return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
60 static inline bool pmd_user_accessible_page(pmd_t pmd)
62 return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) &&
63 (pmd_val(pmd) & _PAGE_USER);
66 static inline bool pud_user_accessible_page(pud_t pud)
68 return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) &&
69 (pud_val(pud) & _PAGE_USER);
73 * An enty is removed from the page table, decrement the counters for that page
74 * verify that it is of correct type and counters do not become negative.
76 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
77 unsigned long pfn, unsigned long pgcnt)
79 struct page_ext *page_ext;
87 page = pfn_to_page(pfn);
88 page_ext = lookup_page_ext(page);
89 anon = PageAnon(page);
91 for (i = 0; i < pgcnt; i++) {
92 struct page_table_check *ptc = get_page_table_check(page_ext);
95 BUG_ON(atomic_read(&ptc->file_map_count));
96 BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
98 BUG_ON(atomic_read(&ptc->anon_map_count));
99 BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
101 page_ext = page_ext_next(page_ext);
106 * A new enty is added to the page table, increment the counters for that page
107 * verify that it is of correct type and is not being mapped with a different
108 * type to a different process.
110 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
111 unsigned long pfn, unsigned long pgcnt,
114 struct page_ext *page_ext;
122 page = pfn_to_page(pfn);
123 page_ext = lookup_page_ext(page);
124 anon = PageAnon(page);
126 for (i = 0; i < pgcnt; i++) {
127 struct page_table_check *ptc = get_page_table_check(page_ext);
130 BUG_ON(atomic_read(&ptc->file_map_count));
131 BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
133 BUG_ON(atomic_read(&ptc->anon_map_count));
134 BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
136 page_ext = page_ext_next(page_ext);
141 * page is on free list, or is being allocated, verify that counters are zeroes
142 * crash if they are not.
144 void __page_table_check_zero(struct page *page, unsigned int order)
146 struct page_ext *page_ext = lookup_page_ext(page);
150 for (i = 0; i < (1ul << order); i++) {
151 struct page_table_check *ptc = get_page_table_check(page_ext);
153 BUG_ON(atomic_read(&ptc->anon_map_count));
154 BUG_ON(atomic_read(&ptc->file_map_count));
155 page_ext = page_ext_next(page_ext);
159 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
165 if (pte_user_accessible_page(pte)) {
166 page_table_check_clear(mm, addr, pte_pfn(pte),
167 PAGE_SIZE >> PAGE_SHIFT);
170 EXPORT_SYMBOL(__page_table_check_pte_clear);
172 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
178 if (pmd_user_accessible_page(pmd)) {
179 page_table_check_clear(mm, addr, pmd_pfn(pmd),
180 PMD_PAGE_SIZE >> PAGE_SHIFT);
183 EXPORT_SYMBOL(__page_table_check_pmd_clear);
185 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
191 if (pud_user_accessible_page(pud)) {
192 page_table_check_clear(mm, addr, pud_pfn(pud),
193 PUD_PAGE_SIZE >> PAGE_SHIFT);
196 EXPORT_SYMBOL(__page_table_check_pud_clear);
198 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
199 pte_t *ptep, pte_t pte)
204 __page_table_check_pte_clear(mm, addr, *ptep);
205 if (pte_user_accessible_page(pte)) {
206 page_table_check_set(mm, addr, pte_pfn(pte),
207 PAGE_SIZE >> PAGE_SHIFT,
211 EXPORT_SYMBOL(__page_table_check_pte_set);
213 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
214 pmd_t *pmdp, pmd_t pmd)
219 __page_table_check_pmd_clear(mm, addr, *pmdp);
220 if (pmd_user_accessible_page(pmd)) {
221 page_table_check_set(mm, addr, pmd_pfn(pmd),
222 PMD_PAGE_SIZE >> PAGE_SHIFT,
226 EXPORT_SYMBOL(__page_table_check_pmd_set);
228 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
229 pud_t *pudp, pud_t pud)
234 __page_table_check_pud_clear(mm, addr, *pudp);
235 if (pud_user_accessible_page(pud)) {
236 page_table_check_set(mm, addr, pud_pfn(pud),
237 PUD_PAGE_SIZE >> PAGE_SHIFT,
241 EXPORT_SYMBOL(__page_table_check_pud_set);
243 void __page_table_check_pte_clear_range(struct mm_struct *mm,
250 if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
251 pte_t *ptep = pte_offset_map(&pmd, addr);
255 for (i = 0; i < PTRS_PER_PTE; i++) {
256 __page_table_check_pte_clear(mm, addr, *ptep);