Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux-2.6-microblaze.git] / arch / riscv / mm / pageattr.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 SiFive
4  */
5
6 #include <linux/pagewalk.h>
7 #include <asm/pgtable.h>
8 #include <asm/tlbflush.h>
9 #include <asm/bitops.h>
10
11 struct pageattr_masks {
12         pgprot_t set_mask;
13         pgprot_t clear_mask;
14 };
15
16 static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
17 {
18         struct pageattr_masks *masks = walk->private;
19         unsigned long new_val = val;
20
21         new_val &= ~(pgprot_val(masks->clear_mask));
22         new_val |= (pgprot_val(masks->set_mask));
23
24         return new_val;
25 }
26
27 static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
28                               unsigned long next, struct mm_walk *walk)
29 {
30         pgd_t val = READ_ONCE(*pgd);
31
32         if (pgd_leaf(val)) {
33                 val = __pgd(set_pageattr_masks(pgd_val(val), walk));
34                 set_pgd(pgd, val);
35         }
36
37         return 0;
38 }
39
40 static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
41                               unsigned long next, struct mm_walk *walk)
42 {
43         p4d_t val = READ_ONCE(*p4d);
44
45         if (p4d_leaf(val)) {
46                 val = __p4d(set_pageattr_masks(p4d_val(val), walk));
47                 set_p4d(p4d, val);
48         }
49
50         return 0;
51 }
52
53 static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
54                               unsigned long next, struct mm_walk *walk)
55 {
56         pud_t val = READ_ONCE(*pud);
57
58         if (pud_leaf(val)) {
59                 val = __pud(set_pageattr_masks(pud_val(val), walk));
60                 set_pud(pud, val);
61         }
62
63         return 0;
64 }
65
66 static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
67                               unsigned long next, struct mm_walk *walk)
68 {
69         pmd_t val = READ_ONCE(*pmd);
70
71         if (pmd_leaf(val)) {
72                 val = __pmd(set_pageattr_masks(pmd_val(val), walk));
73                 set_pmd(pmd, val);
74         }
75
76         return 0;
77 }
78
79 static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
80                               unsigned long next, struct mm_walk *walk)
81 {
82         pte_t val = READ_ONCE(*pte);
83
84         val = __pte(set_pageattr_masks(pte_val(val), walk));
85         set_pte(pte, val);
86
87         return 0;
88 }
89
90 static int pageattr_pte_hole(unsigned long addr, unsigned long next,
91                              int depth, struct mm_walk *walk)
92 {
93         /* Nothing to do here */
94         return 0;
95 }
96
97 const static struct mm_walk_ops pageattr_ops = {
98         .pgd_entry = pageattr_pgd_entry,
99         .p4d_entry = pageattr_p4d_entry,
100         .pud_entry = pageattr_pud_entry,
101         .pmd_entry = pageattr_pmd_entry,
102         .pte_entry = pageattr_pte_entry,
103         .pte_hole = pageattr_pte_hole,
104 };
105
106 static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
107                         pgprot_t clear_mask)
108 {
109         int ret;
110         unsigned long start = addr;
111         unsigned long end = start + PAGE_SIZE * numpages;
112         struct pageattr_masks masks = {
113                 .set_mask = set_mask,
114                 .clear_mask = clear_mask
115         };
116
117         if (!numpages)
118                 return 0;
119
120         down_read(&init_mm.mmap_sem);
121         ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
122                                      &masks);
123         up_read(&init_mm.mmap_sem);
124
125         flush_tlb_kernel_range(start, end);
126
127         return ret;
128 }
129
130 int set_memory_ro(unsigned long addr, int numpages)
131 {
132         return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
133                             __pgprot(_PAGE_WRITE));
134 }
135
136 int set_memory_rw(unsigned long addr, int numpages)
137 {
138         return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
139                             __pgprot(0));
140 }
141
142 int set_memory_x(unsigned long addr, int numpages)
143 {
144         return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
145 }
146
147 int set_memory_nx(unsigned long addr, int numpages)
148 {
149         return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
150 }
151
152 int set_direct_map_invalid_noflush(struct page *page)
153 {
154         unsigned long start = (unsigned long)page_address(page);
155         unsigned long end = start + PAGE_SIZE;
156         struct pageattr_masks masks = {
157                 .set_mask = __pgprot(0),
158                 .clear_mask = __pgprot(_PAGE_PRESENT)
159         };
160
161         return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
162 }
163
164 int set_direct_map_default_noflush(struct page *page)
165 {
166         unsigned long start = (unsigned long)page_address(page);
167         unsigned long end = start + PAGE_SIZE;
168         struct pageattr_masks masks = {
169                 .set_mask = PAGE_KERNEL,
170                 .clear_mask = __pgprot(0)
171         };
172
173         return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
174 }
175
176 void __kernel_map_pages(struct page *page, int numpages, int enable)
177 {
178         if (!debug_pagealloc_enabled())
179                 return;
180
181         if (enable)
182                 __set_memory((unsigned long)page_address(page), numpages,
183                              __pgprot(_PAGE_PRESENT), __pgprot(0));
184         else
185                 __set_memory((unsigned long)page_address(page), numpages,
186                              __pgprot(0), __pgprot(_PAGE_PRESENT));
187 }