1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2008
5 * Guest page hinting for unused pages.
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
14 #include <linux/memblock.h>
15 #include <linux/gfp.h>
16 #include <linux/init.h>
17 #include <asm/asm-extable.h>
18 #include <asm/facility.h>
19 #include <asm/page-states.h>
21 static int cmma_flag = 1;
23 static int __init cmma(char *str)
27 if (!kstrtobool(str, &enabled))
31 __setup("cmma=", cmma);
33 static inline int cmma_test_essa(void)
35 unsigned long tmp = 0;
38 /* test ESSA_GET_STATE */
40 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
44 : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
45 : [cmd] "i" (ESSA_GET_STATE));
49 void __init cmma_init(void)
53 if (cmma_test_essa()) {
57 if (test_facility(147))
61 static inline void set_page_unused(struct page *page, int order)
65 for (i = 0; i < (1 << order); i++)
66 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
68 : "a" (page_to_phys(page + i)),
69 "i" (ESSA_SET_UNUSED));
72 static inline void set_page_stable_dat(struct page *page, int order)
76 for (i = 0; i < (1 << order); i++)
77 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
79 : "a" (page_to_phys(page + i)),
80 "i" (ESSA_SET_STABLE));
83 static inline void set_page_stable_nodat(struct page *page, int order)
87 for (i = 0; i < (1 << order); i++)
88 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
90 : "a" (page_to_phys(page + i)),
91 "i" (ESSA_SET_STABLE_NODAT));
94 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
100 pmd = pmd_offset(pud, addr);
102 next = pmd_addr_end(addr, end);
103 if (pmd_none(*pmd) || pmd_large(*pmd))
105 page = phys_to_page(pmd_val(*pmd));
106 set_bit(PG_arch_1, &page->flags);
107 } while (pmd++, addr = next, addr != end);
110 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
117 pud = pud_offset(p4d, addr);
119 next = pud_addr_end(addr, end);
120 if (pud_none(*pud) || pud_large(*pud))
122 if (!pud_folded(*pud)) {
123 page = phys_to_page(pud_val(*pud));
124 for (i = 0; i < 3; i++)
125 set_bit(PG_arch_1, &page[i].flags);
127 mark_kernel_pmd(pud, addr, next);
128 } while (pud++, addr = next, addr != end);
131 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
138 p4d = p4d_offset(pgd, addr);
140 next = p4d_addr_end(addr, end);
143 if (!p4d_folded(*p4d)) {
144 page = phys_to_page(p4d_val(*p4d));
145 for (i = 0; i < 3; i++)
146 set_bit(PG_arch_1, &page[i].flags);
148 mark_kernel_pud(p4d, addr, next);
149 } while (p4d++, addr = next, addr != end);
152 static void mark_kernel_pgd(void)
154 unsigned long addr, next;
160 pgd = pgd_offset_k(addr);
162 next = pgd_addr_end(addr, MODULES_END);
165 if (!pgd_folded(*pgd)) {
166 page = phys_to_page(pgd_val(*pgd));
167 for (i = 0; i < 3; i++)
168 set_bit(PG_arch_1, &page[i].flags);
170 mark_kernel_p4d(pgd, addr, next);
171 } while (pgd++, addr = next, addr != MODULES_END);
174 void __init cmma_init_nodat(void)
177 unsigned long start, end, ix;
182 /* Mark pages used in kernel page tables */
185 /* Set all kernel pages not used for page tables to stable/no-dat */
186 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
187 page = pfn_to_page(start);
188 for (ix = start; ix < end; ix++, page++) {
189 if (__test_and_clear_bit(PG_arch_1, &page->flags))
190 continue; /* skip page table pages */
191 if (!list_empty(&page->lru))
192 continue; /* skip free pages */
193 set_page_stable_nodat(page, 0);
198 void arch_free_page(struct page *page, int order)
202 set_page_unused(page, order);
205 void arch_alloc_page(struct page *page, int order)
210 set_page_stable_dat(page, order);
212 set_page_stable_nodat(page, order);
215 void arch_set_page_dat(struct page *page, int order)
219 set_page_stable_dat(page, order);