1 // SPDX-License-Identifier: GPL-2.0
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
16 #include <asm/tlbflush.h>
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
23 static struct ctl_table page_table_sysctl[] = {
25 .procname = "allocate_pgste",
26 .data = &page_table_allocate_pgste,
27 .maxlen = sizeof(int),
28 .mode = S_IRUGO | S_IWUSR,
29 .proc_handler = proc_dointvec_minmax,
30 .extra1 = SYSCTL_ZERO,
36 static struct ctl_table page_table_sysctl_dir[] = {
41 .child = page_table_sysctl,
46 static int __init page_table_register_sysctl(void)
48 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
50 __initcall(page_table_register_sysctl);
52 #endif /* CONFIG_PGSTE */
54 unsigned long *crst_table_alloc(struct mm_struct *mm)
56 struct page *page = alloc_pages(GFP_KERNEL, 2);
60 arch_set_page_dat(page, 2);
61 return (unsigned long *) page_to_phys(page);
64 void crst_table_free(struct mm_struct *mm, unsigned long *table)
66 free_pages((unsigned long) table, 2);
69 static void __crst_table_upgrade(void *arg)
71 struct mm_struct *mm = arg;
73 if (current->active_mm == mm)
78 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
80 unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
81 unsigned long asce_limit = mm->context.asce_limit;
83 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
84 VM_BUG_ON(asce_limit < _REGION2_SIZE);
86 if (end <= asce_limit)
89 if (asce_limit == _REGION2_SIZE) {
90 p4d = crst_table_alloc(mm);
93 crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
95 if (end > _REGION1_SIZE) {
96 pgd = crst_table_alloc(mm);
99 crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
102 spin_lock_bh(&mm->page_table_lock);
105 * This routine gets called with mmap_sem lock held and there is
106 * no reason to optimize for the case of otherwise. However, if
107 * that would ever change, the below check will let us know.
109 VM_BUG_ON(asce_limit != mm->context.asce_limit);
112 __pgd = (unsigned long *) mm->pgd;
113 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
114 mm->pgd = (pgd_t *) p4d;
115 mm->context.asce_limit = _REGION1_SIZE;
116 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
117 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
121 __pgd = (unsigned long *) mm->pgd;
122 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
123 mm->pgd = (pgd_t *) pgd;
124 mm->context.asce_limit = -PAGE_SIZE;
125 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
126 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
129 spin_unlock_bh(&mm->page_table_lock);
131 on_each_cpu(__crst_table_upgrade, mm, 0);
136 crst_table_free(mm, p4d);
141 void crst_table_downgrade(struct mm_struct *mm)
145 /* downgrade should only happen from 3 to 2 levels (compat only) */
146 VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
148 if (current->active_mm == mm) {
155 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
156 mm->context.asce_limit = _REGION3_SIZE;
157 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
158 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
159 crst_table_free(mm, (unsigned long *) pgd);
161 if (current->active_mm == mm)
165 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
167 unsigned int old, new;
170 old = atomic_read(v);
172 } while (atomic_cmpxchg(v, old, new) != old);
178 struct page *page_table_alloc_pgste(struct mm_struct *mm)
183 page = alloc_page(GFP_KERNEL);
185 table = (u64 *)page_to_phys(page);
186 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
187 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
192 void page_table_free_pgste(struct page *page)
197 #endif /* CONFIG_PGSTE */
200 * page table entry allocation/free routines.
202 unsigned long *page_table_alloc(struct mm_struct *mm)
204 unsigned long *table;
206 unsigned int mask, bit;
208 /* Try to get a fragment of a 4K page as a 2K page table */
209 if (!mm_alloc_pgste(mm)) {
211 spin_lock_bh(&mm->context.lock);
212 if (!list_empty(&mm->context.pgtable_list)) {
213 page = list_first_entry(&mm->context.pgtable_list,
215 mask = atomic_read(&page->_refcount) >> 24;
216 mask = (mask | (mask >> 4)) & 3;
218 table = (unsigned long *) page_to_phys(page);
219 bit = mask & 1; /* =1 -> second 2K */
221 table += PTRS_PER_PTE;
222 atomic_xor_bits(&page->_refcount,
224 list_del(&page->lru);
227 spin_unlock_bh(&mm->context.lock);
231 /* Allocate a fresh page */
232 page = alloc_page(GFP_KERNEL);
235 if (!pgtable_pte_page_ctor(page)) {
239 arch_set_page_dat(page, 0);
240 /* Initialize page table */
241 table = (unsigned long *) page_to_phys(page);
242 if (mm_alloc_pgste(mm)) {
243 /* Return 4K page table with PGSTEs */
244 atomic_xor_bits(&page->_refcount, 3 << 24);
245 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
246 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
248 /* Return the first 2K fragment of the page */
249 atomic_xor_bits(&page->_refcount, 1 << 24);
250 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
251 spin_lock_bh(&mm->context.lock);
252 list_add(&page->lru, &mm->context.pgtable_list);
253 spin_unlock_bh(&mm->context.lock);
258 void page_table_free(struct mm_struct *mm, unsigned long *table)
261 unsigned int bit, mask;
263 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
264 if (!mm_alloc_pgste(mm)) {
265 /* Free 2K page table fragment of a 4K page */
266 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
267 spin_lock_bh(&mm->context.lock);
268 mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
271 list_add(&page->lru, &mm->context.pgtable_list);
273 list_del(&page->lru);
274 spin_unlock_bh(&mm->context.lock);
278 atomic_xor_bits(&page->_refcount, 3U << 24);
281 pgtable_pte_page_dtor(page);
285 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
286 unsigned long vmaddr)
288 struct mm_struct *mm;
290 unsigned int bit, mask;
293 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
294 if (mm_alloc_pgste(mm)) {
295 gmap_unlink(mm, table, vmaddr);
296 table = (unsigned long *) (__pa(table) | 3);
297 tlb_remove_table(tlb, table);
300 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
301 spin_lock_bh(&mm->context.lock);
302 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
305 list_add_tail(&page->lru, &mm->context.pgtable_list);
307 list_del(&page->lru);
308 spin_unlock_bh(&mm->context.lock);
309 table = (unsigned long *) (__pa(table) | (1U << bit));
310 tlb_remove_table(tlb, table);
313 void __tlb_remove_table(void *_table)
315 unsigned int mask = (unsigned long) _table & 3;
316 void *table = (void *)((unsigned long) _table ^ mask);
317 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
320 case 0: /* pmd, pud, or p4d */
321 free_pages((unsigned long) table, 2);
323 case 1: /* lower 2K of a 4K page table */
324 case 2: /* higher 2K of a 4K page table */
325 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
330 case 3: /* 4K page table with pgstes */
332 atomic_xor_bits(&page->_refcount, 3 << 24);
333 pgtable_pte_page_dtor(page);
340 * Base infrastructure required to generate basic asces, region, segment,
341 * and page tables that do not make use of enhanced features like EDAT1.
344 static struct kmem_cache *base_pgt_cache;
346 static unsigned long base_pgt_alloc(void)
350 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
352 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
353 return (unsigned long) table;
356 static void base_pgt_free(unsigned long table)
358 kmem_cache_free(base_pgt_cache, (void *) table);
361 static unsigned long base_crst_alloc(unsigned long val)
365 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
367 crst_table_init((unsigned long *)table, val);
371 static void base_crst_free(unsigned long table)
373 free_pages(table, CRST_ALLOC_ORDER);
376 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
377 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
380 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
382 return (next - 1) < (end - 1) ? next : end; \
385 BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
386 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
387 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
388 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
389 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
391 static inline unsigned long base_lra(unsigned long address)
397 : "=d" (real) : "a" (address) : "cc");
401 static int base_page_walk(unsigned long origin, unsigned long addr,
402 unsigned long end, int alloc)
404 unsigned long *pte, next;
408 pte = (unsigned long *) origin;
409 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
411 next = base_page_addr_end(addr, end);
412 *pte = base_lra(addr);
413 } while (pte++, addr = next, addr < end);
417 static int base_segment_walk(unsigned long origin, unsigned long addr,
418 unsigned long end, int alloc)
420 unsigned long *ste, next, table;
423 ste = (unsigned long *) origin;
424 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
426 next = base_segment_addr_end(addr, end);
427 if (*ste & _SEGMENT_ENTRY_INVALID) {
430 table = base_pgt_alloc();
433 *ste = table | _SEGMENT_ENTRY;
435 table = *ste & _SEGMENT_ENTRY_ORIGIN;
436 rc = base_page_walk(table, addr, next, alloc);
440 base_pgt_free(table);
442 } while (ste++, addr = next, addr < end);
446 static int base_region3_walk(unsigned long origin, unsigned long addr,
447 unsigned long end, int alloc)
449 unsigned long *rtte, next, table;
452 rtte = (unsigned long *) origin;
453 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
455 next = base_region3_addr_end(addr, end);
456 if (*rtte & _REGION_ENTRY_INVALID) {
459 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
462 *rtte = table | _REGION3_ENTRY;
464 table = *rtte & _REGION_ENTRY_ORIGIN;
465 rc = base_segment_walk(table, addr, next, alloc);
469 base_crst_free(table);
470 } while (rtte++, addr = next, addr < end);
474 static int base_region2_walk(unsigned long origin, unsigned long addr,
475 unsigned long end, int alloc)
477 unsigned long *rste, next, table;
480 rste = (unsigned long *) origin;
481 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
483 next = base_region2_addr_end(addr, end);
484 if (*rste & _REGION_ENTRY_INVALID) {
487 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
490 *rste = table | _REGION2_ENTRY;
492 table = *rste & _REGION_ENTRY_ORIGIN;
493 rc = base_region3_walk(table, addr, next, alloc);
497 base_crst_free(table);
498 } while (rste++, addr = next, addr < end);
502 static int base_region1_walk(unsigned long origin, unsigned long addr,
503 unsigned long end, int alloc)
505 unsigned long *rfte, next, table;
508 rfte = (unsigned long *) origin;
509 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
511 next = base_region1_addr_end(addr, end);
512 if (*rfte & _REGION_ENTRY_INVALID) {
515 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
518 *rfte = table | _REGION1_ENTRY;
520 table = *rfte & _REGION_ENTRY_ORIGIN;
521 rc = base_region2_walk(table, addr, next, alloc);
525 base_crst_free(table);
526 } while (rfte++, addr = next, addr < end);
531 * base_asce_free - free asce and tables returned from base_asce_alloc()
532 * @asce: asce to be freed
534 * Frees all region, segment, and page tables that were allocated with a
535 * corresponding base_asce_alloc() call.
537 void base_asce_free(unsigned long asce)
539 unsigned long table = asce & _ASCE_ORIGIN;
543 switch (asce & _ASCE_TYPE_MASK) {
544 case _ASCE_TYPE_SEGMENT:
545 base_segment_walk(table, 0, _REGION3_SIZE, 0);
547 case _ASCE_TYPE_REGION3:
548 base_region3_walk(table, 0, _REGION2_SIZE, 0);
550 case _ASCE_TYPE_REGION2:
551 base_region2_walk(table, 0, _REGION1_SIZE, 0);
553 case _ASCE_TYPE_REGION1:
554 base_region1_walk(table, 0, -_PAGE_SIZE, 0);
557 base_crst_free(table);
560 static int base_pgt_cache_init(void)
562 static DEFINE_MUTEX(base_pgt_cache_mutex);
563 unsigned long sz = _PAGE_TABLE_SIZE;
567 mutex_lock(&base_pgt_cache_mutex);
569 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
570 mutex_unlock(&base_pgt_cache_mutex);
571 return base_pgt_cache ? 0 : -ENOMEM;
575 * base_asce_alloc - create kernel mapping without enhanced DAT features
576 * @addr: virtual start address of kernel mapping
577 * @num_pages: number of consecutive pages
579 * Generate an asce, including all required region, segment and page tables,
580 * that can be used to access the virtual kernel mapping. The difference is
581 * that the returned asce does not make use of any enhanced DAT features like
582 * e.g. large pages. This is required for some I/O functions that pass an
583 * asce, like e.g. some service call requests.
585 * Note: the returned asce may NEVER be attached to any cpu. It may only be
586 * used for I/O requests. tlb entries that might result because the
587 * asce was attached to a cpu won't be cleared.
589 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
591 unsigned long asce, table, end;
594 if (base_pgt_cache_init())
596 end = addr + num_pages * PAGE_SIZE;
597 if (end <= _REGION3_SIZE) {
598 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
601 rc = base_segment_walk(table, addr, end, 1);
602 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
603 } else if (end <= _REGION2_SIZE) {
604 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
607 rc = base_region3_walk(table, addr, end, 1);
608 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
609 } else if (end <= _REGION1_SIZE) {
610 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
613 rc = base_region2_walk(table, addr, end, 1);
614 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
616 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
619 rc = base_region1_walk(table, addr, end, 1);
620 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
623 base_asce_free(asce);