Merge tag 'kbuild-fixes-v5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/masahi...
[linux-2.6-microblaze.git] / arch / riscv / mm / pageattr.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 SiFive
4  */
5
6 #include <linux/pagewalk.h>
7 #include <linux/pgtable.h>
8 #include <asm/tlbflush.h>
9 #include <asm/bitops.h>
10
11 struct pageattr_masks {
12         pgprot_t set_mask;
13         pgprot_t clear_mask;
14 };
15
16 static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
17 {
18         struct pageattr_masks *masks = walk->private;
19         unsigned long new_val = val;
20
21         new_val &= ~(pgprot_val(masks->clear_mask));
22         new_val |= (pgprot_val(masks->set_mask));
23
24         return new_val;
25 }
26
27 static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
28                               unsigned long next, struct mm_walk *walk)
29 {
30         pgd_t val = READ_ONCE(*pgd);
31
32         if (pgd_leaf(val)) {
33                 val = __pgd(set_pageattr_masks(pgd_val(val), walk));
34                 set_pgd(pgd, val);
35         }
36
37         return 0;
38 }
39
40 static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
41                               unsigned long next, struct mm_walk *walk)
42 {
43         p4d_t val = READ_ONCE(*p4d);
44
45         if (p4d_leaf(val)) {
46                 val = __p4d(set_pageattr_masks(p4d_val(val), walk));
47                 set_p4d(p4d, val);
48         }
49
50         return 0;
51 }
52
53 static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
54                               unsigned long next, struct mm_walk *walk)
55 {
56         pud_t val = READ_ONCE(*pud);
57
58         if (pud_leaf(val)) {
59                 val = __pud(set_pageattr_masks(pud_val(val), walk));
60                 set_pud(pud, val);
61         }
62
63         return 0;
64 }
65
66 static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
67                               unsigned long next, struct mm_walk *walk)
68 {
69         pmd_t val = READ_ONCE(*pmd);
70
71         if (pmd_leaf(val)) {
72                 val = __pmd(set_pageattr_masks(pmd_val(val), walk));
73                 set_pmd(pmd, val);
74         }
75
76         return 0;
77 }
78
79 static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
80                               unsigned long next, struct mm_walk *walk)
81 {
82         pte_t val = READ_ONCE(*pte);
83
84         val = __pte(set_pageattr_masks(pte_val(val), walk));
85         set_pte(pte, val);
86
87         return 0;
88 }
89
90 static int pageattr_pte_hole(unsigned long addr, unsigned long next,
91                              int depth, struct mm_walk *walk)
92 {
93         /* Nothing to do here */
94         return 0;
95 }
96
97 const static struct mm_walk_ops pageattr_ops = {
98         .pgd_entry = pageattr_pgd_entry,
99         .p4d_entry = pageattr_p4d_entry,
100         .pud_entry = pageattr_pud_entry,
101         .pmd_entry = pageattr_pmd_entry,
102         .pte_entry = pageattr_pte_entry,
103         .pte_hole = pageattr_pte_hole,
104 };
105
106 static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
107                         pgprot_t clear_mask)
108 {
109         int ret;
110         unsigned long start = addr;
111         unsigned long end = start + PAGE_SIZE * numpages;
112         struct pageattr_masks masks = {
113                 .set_mask = set_mask,
114                 .clear_mask = clear_mask
115         };
116
117         if (!numpages)
118                 return 0;
119
120         mmap_read_lock(&init_mm);
121         ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
122                                      &masks);
123         mmap_read_unlock(&init_mm);
124
125         flush_tlb_kernel_range(start, end);
126
127         return ret;
128 }
129
130 int set_memory_ro(unsigned long addr, int numpages)
131 {
132         return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
133                             __pgprot(_PAGE_WRITE));
134 }
135
136 int set_memory_rw(unsigned long addr, int numpages)
137 {
138         return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
139                             __pgprot(0));
140 }
141
142 int set_memory_x(unsigned long addr, int numpages)
143 {
144         return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
145 }
146
147 int set_memory_nx(unsigned long addr, int numpages)
148 {
149         return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
150 }
151
152 int set_direct_map_invalid_noflush(struct page *page)
153 {
154         int ret;
155         unsigned long start = (unsigned long)page_address(page);
156         unsigned long end = start + PAGE_SIZE;
157         struct pageattr_masks masks = {
158                 .set_mask = __pgprot(0),
159                 .clear_mask = __pgprot(_PAGE_PRESENT)
160         };
161
162         mmap_read_lock(&init_mm);
163         ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
164         mmap_read_unlock(&init_mm);
165
166         return ret;
167 }
168
169 int set_direct_map_default_noflush(struct page *page)
170 {
171         int ret;
172         unsigned long start = (unsigned long)page_address(page);
173         unsigned long end = start + PAGE_SIZE;
174         struct pageattr_masks masks = {
175                 .set_mask = PAGE_KERNEL,
176                 .clear_mask = __pgprot(0)
177         };
178
179         mmap_read_lock(&init_mm);
180         ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
181         mmap_read_unlock(&init_mm);
182
183         return ret;
184 }
185
186 void __kernel_map_pages(struct page *page, int numpages, int enable)
187 {
188         if (!debug_pagealloc_enabled())
189                 return;
190
191         if (enable)
192                 __set_memory((unsigned long)page_address(page), numpages,
193                              __pgprot(_PAGE_PRESENT), __pgprot(0));
194         else
195                 __set_memory((unsigned long)page_address(page), numpages,
196                              __pgprot(0), __pgprot(_PAGE_PRESENT));
197 }