1 /* SPDX-License-Identifier: GPL-2.0 */
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgtable.h"
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/sections.h>
25 extern pgd_t swapper_pg_dir[];
26 extern void paging_init(void);
27 extern unsigned long s390_invalid_asce;
36 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
38 static inline void update_page_count(int level, long count)
40 if (IS_ENABLED(CONFIG_PROC_FS))
41 atomic_long_add(count, &direct_pages_count[level]);
45 void arch_report_meminfo(struct seq_file *m);
48 * The S390 doesn't have any external MMU info: the kernel page
49 * tables contain all the necessary information.
51 #define update_mmu_cache(vma, address, ptep) do { } while (0)
52 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
55 * ZERO_PAGE is a global shared page that is always zero; used
56 * for zero-mapped memory areas etc..
59 extern unsigned long empty_zero_page;
60 extern unsigned long zero_page_mask;
62 #define ZERO_PAGE(vaddr) \
63 (virt_to_page((void *)(empty_zero_page + \
64 (((unsigned long)(vaddr)) &zero_page_mask))))
65 #define __HAVE_COLOR_ZERO_PAGE
67 /* TODO: s390 cannot support io_remap_pfn_range... */
69 #define pte_ERROR(e) \
70 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
71 #define pmd_ERROR(e) \
72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
73 #define pud_ERROR(e) \
74 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
75 #define p4d_ERROR(e) \
76 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
77 #define pgd_ERROR(e) \
78 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
81 * The vmalloc and module area will always be on the topmost area of the
82 * kernel mapping. 512GB are reserved for vmalloc by default.
83 * At the top of the vmalloc area a 2GB area is reserved where modules
84 * will reside. That makes sure that inter module branches always
85 * happen without trampolines and in addition the placement within a
86 * 2GB frame is branch prediction unit friendly.
88 extern unsigned long __bootdata_preserved(VMALLOC_START);
89 extern unsigned long __bootdata_preserved(VMALLOC_END);
90 #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
91 extern struct page *__bootdata_preserved(vmemmap);
92 extern unsigned long __bootdata_preserved(vmemmap_size);
94 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
96 extern unsigned long __bootdata_preserved(MODULES_VADDR);
97 extern unsigned long __bootdata_preserved(MODULES_END);
98 #define MODULES_VADDR MODULES_VADDR
99 #define MODULES_END MODULES_END
100 #define MODULES_LEN (1UL << 31)
102 static inline int is_module_addr(void *addr)
104 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105 if (addr < (void *)MODULES_VADDR)
107 if (addr > (void *)MODULES_END)
113 * A 64 bit pagetable entry of S390 has following format:
115 * 0000000000111111111122222222223333333333444444444455555555556666
116 * 0123456789012345678901234567890123456789012345678901234567890123
118 * I Page-Invalid Bit: Page is not available for address-translation
119 * P Page-Protection Bit: Store access not possible for page
120 * C Change-bit override: HW is not required to set change bit
122 * A 64 bit segmenttable entry of S390 has following format:
123 * | P-table origin | TT
124 * 0000000000111111111122222222223333333333444444444455555555556666
125 * 0123456789012345678901234567890123456789012345678901234567890123
127 * I Segment-Invalid Bit: Segment is not available for address-translation
128 * C Common-Segment Bit: Segment is not private (PoP 3-30)
129 * P Page-Protection Bit: Store access not possible for page
132 * A 64 bit region table entry of S390 has following format:
133 * | S-table origin | TF TTTL
134 * 0000000000111111111122222222223333333333444444444455555555556666
135 * 0123456789012345678901234567890123456789012345678901234567890123
137 * I Segment-Invalid Bit: Segment is not available for address-translation
142 * The 64 bit regiontable origin of S390 has following format:
143 * | region table origon | DTTL
144 * 0000000000111111111122222222223333333333444444444455555555556666
145 * 0123456789012345678901234567890123456789012345678901234567890123
147 * X Space-Switch event:
148 * G Segment-Invalid Bit:
149 * P Private-Space Bit:
150 * S Storage-Alteration:
154 * A storage key has the following format:
158 * F : fetch protection bit
163 /* Hardware bits in the page table entry */
164 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
165 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
166 #define _PAGE_INVALID 0x400 /* HW invalid bit */
167 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
169 /* Software bits in the page table entry */
170 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
171 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
172 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
173 #define _PAGE_READ 0x010 /* SW pte read bit */
174 #define _PAGE_WRITE 0x020 /* SW pte write bit */
175 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
176 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
178 #ifdef CONFIG_MEM_SOFT_DIRTY
179 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
181 #define _PAGE_SOFT_DIRTY 0x000
184 #define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */
186 /* Set of bits not changed in pte_modify */
187 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
188 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
191 * handle_pte_fault uses pte_present and pte_none to find out the pte type
192 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
193 * distinguish present from not-present ptes. It is changed only with the page
196 * The following table gives the different possible bit combinations for
197 * the pte hardware and software bits in the last 12 bits of a pte
198 * (. unassigned bit, x don't care, t swap type):
206 * prot-none, clean, old .11.xx0000.1
207 * prot-none, clean, young .11.xx0001.1
208 * prot-none, dirty, old .11.xx0010.1
209 * prot-none, dirty, young .11.xx0011.1
210 * read-only, clean, old .11.xx0100.1
211 * read-only, clean, young .01.xx0101.1
212 * read-only, dirty, old .11.xx0110.1
213 * read-only, dirty, young .01.xx0111.1
214 * read-write, clean, old .11.xx1100.1
215 * read-write, clean, young .01.xx1101.1
216 * read-write, dirty, old .10.xx1110.1
217 * read-write, dirty, young .00.xx1111.1
218 * HW-bits: R read-only, I invalid
219 * SW-bits: p present, y young, d dirty, r read, w write, s special,
222 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
223 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
224 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
227 /* Bits in the segment/region table address-space-control-element */
228 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
229 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
230 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
231 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
232 #define _ASCE_REAL_SPACE 0x20 /* real space control */
233 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
234 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
235 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
236 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
237 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
238 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
240 /* Bits in the region table entry */
241 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
242 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
243 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
244 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
245 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
246 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
247 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
248 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
249 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
250 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
252 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
253 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
254 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
255 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
256 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
257 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
259 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
260 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
261 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
262 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
263 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
264 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
266 #ifdef CONFIG_MEM_SOFT_DIRTY
267 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
269 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
272 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
274 /* Bits in the segment table entry */
275 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
276 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
277 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
278 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
279 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
280 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
281 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
282 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
283 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
285 #define _SEGMENT_ENTRY (0)
286 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
288 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
289 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
290 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
291 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
292 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
294 #ifdef CONFIG_MEM_SOFT_DIRTY
295 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
297 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
300 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
301 #define _PAGE_ENTRIES 256 /* number of page table entries */
303 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
304 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
306 #define _REGION1_SHIFT 53
307 #define _REGION2_SHIFT 42
308 #define _REGION3_SHIFT 31
309 #define _SEGMENT_SHIFT 20
311 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
312 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
313 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
314 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
315 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
317 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
318 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
319 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
320 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
322 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
323 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
324 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
325 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
327 #define PMD_SHIFT _SEGMENT_SHIFT
328 #define PUD_SHIFT _REGION3_SHIFT
329 #define P4D_SHIFT _REGION2_SHIFT
330 #define PGDIR_SHIFT _REGION1_SHIFT
332 #define PMD_SIZE _SEGMENT_SIZE
333 #define PUD_SIZE _REGION3_SIZE
334 #define P4D_SIZE _REGION2_SIZE
335 #define PGDIR_SIZE _REGION1_SIZE
337 #define PMD_MASK _SEGMENT_MASK
338 #define PUD_MASK _REGION3_MASK
339 #define P4D_MASK _REGION2_MASK
340 #define PGDIR_MASK _REGION1_MASK
342 #define PTRS_PER_PTE _PAGE_ENTRIES
343 #define PTRS_PER_PMD _CRST_ENTRIES
344 #define PTRS_PER_PUD _CRST_ENTRIES
345 #define PTRS_PER_P4D _CRST_ENTRIES
346 #define PTRS_PER_PGD _CRST_ENTRIES
349 * Segment table and region3 table entry encoding
350 * (R = read-only, I = invalid, y = young bit):
352 * prot-none, clean, old 00..1...1...00
353 * prot-none, clean, young 01..1...1...00
354 * prot-none, dirty, old 10..1...1...00
355 * prot-none, dirty, young 11..1...1...00
356 * read-only, clean, old 00..1...1...01
357 * read-only, clean, young 01..1...0...01
358 * read-only, dirty, old 10..1...1...01
359 * read-only, dirty, young 11..1...0...01
360 * read-write, clean, old 00..1...1...11
361 * read-write, clean, young 01..1...0...11
362 * read-write, dirty, old 10..0...1...11
363 * read-write, dirty, young 11..0...0...11
364 * The segment table origin is used to distinguish empty (origin==0) from
365 * read-write, old segment table entries (origin!=0)
366 * HW-bits: R read-only, I invalid
367 * SW-bits: y young, d dirty, r read, w write
370 /* Page status table bits for virtualization */
371 #define PGSTE_ACC_BITS 0xf000000000000000UL
372 #define PGSTE_FP_BIT 0x0800000000000000UL
373 #define PGSTE_PCL_BIT 0x0080000000000000UL
374 #define PGSTE_HR_BIT 0x0040000000000000UL
375 #define PGSTE_HC_BIT 0x0020000000000000UL
376 #define PGSTE_GR_BIT 0x0004000000000000UL
377 #define PGSTE_GC_BIT 0x0002000000000000UL
378 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
379 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
380 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
382 /* Guest Page State used for virtualization */
383 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
384 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
385 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
386 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
387 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
388 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
389 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
392 * A user page table pointer has the space-switch-event bit, the
393 * private-space-control bit and the storage-alteration-event-control
394 * bit set. A kernel page table pointer doesn't need them.
396 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
400 * Page protection definitions.
402 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
406 _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
409 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410 _PAGE_INVALID | _PAGE_PROTECT)
412 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
417 _PAGE_PROTECT | _PAGE_NOEXEC)
418 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 _PAGE_YOUNG | _PAGE_DIRTY)
422 * On s390 the page table entry has an invalid bit and a read-only bit.
423 * Read permission implies execute permission and write permission
424 * implies read permission.
429 * Segment entry (large page) protection definitions.
431 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
432 _SEGMENT_ENTRY_PROTECT)
433 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
434 _SEGMENT_ENTRY_READ | \
435 _SEGMENT_ENTRY_NOEXEC)
436 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
438 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
439 _SEGMENT_ENTRY_WRITE | \
440 _SEGMENT_ENTRY_NOEXEC)
441 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
442 _SEGMENT_ENTRY_WRITE)
443 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
444 _SEGMENT_ENTRY_LARGE | \
445 _SEGMENT_ENTRY_READ | \
446 _SEGMENT_ENTRY_WRITE | \
447 _SEGMENT_ENTRY_YOUNG | \
448 _SEGMENT_ENTRY_DIRTY | \
449 _SEGMENT_ENTRY_NOEXEC)
450 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
451 _SEGMENT_ENTRY_LARGE | \
452 _SEGMENT_ENTRY_READ | \
453 _SEGMENT_ENTRY_YOUNG | \
454 _SEGMENT_ENTRY_PROTECT | \
455 _SEGMENT_ENTRY_NOEXEC)
456 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
457 _SEGMENT_ENTRY_LARGE | \
458 _SEGMENT_ENTRY_READ | \
459 _SEGMENT_ENTRY_WRITE | \
460 _SEGMENT_ENTRY_YOUNG | \
461 _SEGMENT_ENTRY_DIRTY)
464 * Region3 entry (large page) protection definitions.
467 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
468 _REGION3_ENTRY_LARGE | \
469 _REGION3_ENTRY_READ | \
470 _REGION3_ENTRY_WRITE | \
471 _REGION3_ENTRY_YOUNG | \
472 _REGION3_ENTRY_DIRTY | \
473 _REGION_ENTRY_NOEXEC)
474 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
475 _REGION3_ENTRY_LARGE | \
476 _REGION3_ENTRY_READ | \
477 _REGION3_ENTRY_YOUNG | \
478 _REGION_ENTRY_PROTECT | \
479 _REGION_ENTRY_NOEXEC)
481 static inline bool mm_p4d_folded(struct mm_struct *mm)
483 return mm->context.asce_limit <= _REGION1_SIZE;
485 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
487 static inline bool mm_pud_folded(struct mm_struct *mm)
489 return mm->context.asce_limit <= _REGION2_SIZE;
491 #define mm_pud_folded(mm) mm_pud_folded(mm)
493 static inline bool mm_pmd_folded(struct mm_struct *mm)
495 return mm->context.asce_limit <= _REGION3_SIZE;
497 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
499 static inline int mm_has_pgste(struct mm_struct *mm)
502 if (unlikely(mm->context.has_pgste))
508 static inline int mm_is_protected(struct mm_struct *mm)
511 if (unlikely(atomic_read(&mm->context.protected_count)))
517 static inline int mm_alloc_pgste(struct mm_struct *mm)
520 if (unlikely(mm->context.alloc_pgste))
526 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
528 return __pte(pte_val(pte) & ~pgprot_val(prot));
531 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
533 return __pte(pte_val(pte) | pgprot_val(prot));
536 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
538 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
541 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
543 return __pmd(pmd_val(pmd) | pgprot_val(prot));
546 static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
548 return __pud(pud_val(pud) & ~pgprot_val(prot));
551 static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
553 return __pud(pud_val(pud) | pgprot_val(prot));
557 * In the case that a guest uses storage keys
558 * faults should no longer be backed by zero pages
560 #define mm_forbids_zeropage mm_has_pgste
561 static inline int mm_uses_skeys(struct mm_struct *mm)
564 if (mm->context.uses_skeys)
570 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
572 union register_pair r1 = { .even = old, .odd = new, };
573 unsigned long address = (unsigned long)ptr | 1;
576 " csp %[r1],%[address]"
577 : [r1] "+&d" (r1.pair), "+m" (*ptr)
578 : [address] "d" (address)
582 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
584 union register_pair r1 = { .even = old, .odd = new, };
585 unsigned long address = (unsigned long)ptr | 1;
588 " cspg %[r1],%[address]"
589 : [r1] "+&d" (r1.pair), "+m" (*ptr)
590 : [address] "d" (address)
594 #define CRDTE_DTT_PAGE 0x00UL
595 #define CRDTE_DTT_SEGMENT 0x10UL
596 #define CRDTE_DTT_REGION3 0x14UL
597 #define CRDTE_DTT_REGION2 0x18UL
598 #define CRDTE_DTT_REGION1 0x1cUL
600 static inline void crdte(unsigned long old, unsigned long new,
601 unsigned long *table, unsigned long dtt,
602 unsigned long address, unsigned long asce)
604 union register_pair r1 = { .even = old, .odd = new, };
605 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
607 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
608 : [r1] "+&d" (r1.pair)
609 : [r2] "d" (r2.pair), [asce] "a" (asce)
614 * pgd/p4d/pud/pmd/pte query functions
616 static inline int pgd_folded(pgd_t pgd)
618 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
621 static inline int pgd_present(pgd_t pgd)
625 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
628 static inline int pgd_none(pgd_t pgd)
632 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
635 static inline int pgd_bad(pgd_t pgd)
637 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
639 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
642 static inline unsigned long pgd_pfn(pgd_t pgd)
644 unsigned long origin_mask;
646 origin_mask = _REGION_ENTRY_ORIGIN;
647 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
650 static inline int p4d_folded(p4d_t p4d)
652 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
655 static inline int p4d_present(p4d_t p4d)
659 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
662 static inline int p4d_none(p4d_t p4d)
666 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
669 static inline unsigned long p4d_pfn(p4d_t p4d)
671 unsigned long origin_mask;
673 origin_mask = _REGION_ENTRY_ORIGIN;
674 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
677 static inline int pud_folded(pud_t pud)
679 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
682 static inline int pud_present(pud_t pud)
686 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
689 static inline int pud_none(pud_t pud)
693 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
696 #define pud_leaf pud_large
697 static inline int pud_large(pud_t pud)
699 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
701 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
704 #define pmd_leaf pmd_large
705 static inline int pmd_large(pmd_t pmd)
707 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
710 static inline int pmd_bad(pmd_t pmd)
712 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
714 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
717 static inline int pud_bad(pud_t pud)
719 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
721 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
723 if (type < _REGION_ENTRY_TYPE_R3)
725 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
728 static inline int p4d_bad(p4d_t p4d)
730 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
732 if (type > _REGION_ENTRY_TYPE_R2)
734 if (type < _REGION_ENTRY_TYPE_R2)
736 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
739 static inline int pmd_present(pmd_t pmd)
741 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
744 static inline int pmd_none(pmd_t pmd)
746 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
749 #define pmd_write pmd_write
750 static inline int pmd_write(pmd_t pmd)
752 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
755 #define pud_write pud_write
756 static inline int pud_write(pud_t pud)
758 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
761 static inline int pmd_dirty(pmd_t pmd)
763 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
766 static inline int pmd_young(pmd_t pmd)
768 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
771 static inline int pte_present(pte_t pte)
773 /* Bit pattern: (pte & 0x001) == 0x001 */
774 return (pte_val(pte) & _PAGE_PRESENT) != 0;
777 static inline int pte_none(pte_t pte)
779 /* Bit pattern: pte == 0x400 */
780 return pte_val(pte) == _PAGE_INVALID;
783 static inline int pte_swap(pte_t pte)
785 /* Bit pattern: (pte & 0x201) == 0x200 */
786 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
790 static inline int pte_special(pte_t pte)
792 return (pte_val(pte) & _PAGE_SPECIAL);
795 #define __HAVE_ARCH_PTE_SAME
796 static inline int pte_same(pte_t a, pte_t b)
798 return pte_val(a) == pte_val(b);
801 #ifdef CONFIG_NUMA_BALANCING
802 static inline int pte_protnone(pte_t pte)
804 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
807 static inline int pmd_protnone(pmd_t pmd)
809 /* pmd_large(pmd) implies pmd_present(pmd) */
810 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
814 #define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
815 static inline int pte_swp_exclusive(pte_t pte)
817 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
820 static inline pte_t pte_swp_mkexclusive(pte_t pte)
822 return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
825 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
827 return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
830 static inline int pte_soft_dirty(pte_t pte)
832 return pte_val(pte) & _PAGE_SOFT_DIRTY;
834 #define pte_swp_soft_dirty pte_soft_dirty
836 static inline pte_t pte_mksoft_dirty(pte_t pte)
838 return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
840 #define pte_swp_mksoft_dirty pte_mksoft_dirty
842 static inline pte_t pte_clear_soft_dirty(pte_t pte)
844 return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
846 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
848 static inline int pmd_soft_dirty(pmd_t pmd)
850 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
853 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
855 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
858 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
860 return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
864 * query functions pte_write/pte_dirty/pte_young only work if
865 * pte_present() is true. Undefined behaviour if not..
867 static inline int pte_write(pte_t pte)
869 return (pte_val(pte) & _PAGE_WRITE) != 0;
872 static inline int pte_dirty(pte_t pte)
874 return (pte_val(pte) & _PAGE_DIRTY) != 0;
877 static inline int pte_young(pte_t pte)
879 return (pte_val(pte) & _PAGE_YOUNG) != 0;
882 #define __HAVE_ARCH_PTE_UNUSED
883 static inline int pte_unused(pte_t pte)
885 return pte_val(pte) & _PAGE_UNUSED;
889 * Extract the pgprot value from the given pte while at the same time making it
890 * usable for kernel address space mappings where fault driven dirty and
891 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
894 static inline pgprot_t pte_pgprot(pte_t pte)
896 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
899 pte_flags |= pgprot_val(PAGE_KERNEL);
901 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
902 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
904 return __pgprot(pte_flags);
908 * pgd/pmd/pte modification functions
911 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
913 WRITE_ONCE(*pgdp, pgd);
916 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
918 WRITE_ONCE(*p4dp, p4d);
921 static inline void set_pud(pud_t *pudp, pud_t pud)
923 WRITE_ONCE(*pudp, pud);
926 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
928 WRITE_ONCE(*pmdp, pmd);
931 static inline void set_pte(pte_t *ptep, pte_t pte)
933 WRITE_ONCE(*ptep, pte);
936 static inline void pgd_clear(pgd_t *pgd)
938 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
939 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
942 static inline void p4d_clear(p4d_t *p4d)
944 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
945 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
948 static inline void pud_clear(pud_t *pud)
950 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
951 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
954 static inline void pmd_clear(pmd_t *pmdp)
956 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
959 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
961 set_pte(ptep, __pte(_PAGE_INVALID));
965 * The following pte modification functions only work if
966 * pte_present() is true. Undefined behaviour if not..
968 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
970 pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
971 pte = set_pte_bit(pte, newprot);
973 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
974 * has the invalid bit set, clear it again for readable, young pages
976 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
977 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
979 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
980 * protection bit set, clear it again for writable, dirty pages
982 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
983 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
987 static inline pte_t pte_wrprotect(pte_t pte)
989 pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
990 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
993 static inline pte_t pte_mkwrite(pte_t pte)
995 pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
996 if (pte_val(pte) & _PAGE_DIRTY)
997 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1001 static inline pte_t pte_mkclean(pte_t pte)
1003 pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1004 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1007 static inline pte_t pte_mkdirty(pte_t pte)
1009 pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1010 if (pte_val(pte) & _PAGE_WRITE)
1011 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1015 static inline pte_t pte_mkold(pte_t pte)
1017 pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1018 return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1021 static inline pte_t pte_mkyoung(pte_t pte)
1023 pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1024 if (pte_val(pte) & _PAGE_READ)
1025 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1029 static inline pte_t pte_mkspecial(pte_t pte)
1031 return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1034 #ifdef CONFIG_HUGETLB_PAGE
1035 static inline pte_t pte_mkhuge(pte_t pte)
1037 return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1041 #define IPTE_GLOBAL 0
1042 #define IPTE_LOCAL 1
1044 #define IPTE_NODAT 0x400
1045 #define IPTE_GUEST_ASCE 0x800
1047 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1048 unsigned long opt, unsigned long asce,
1051 unsigned long pto = __pa(ptep);
1053 if (__builtin_constant_p(opt) && opt == 0) {
1054 /* Invalidation + TLB flush for the pte */
1056 " ipte %[r1],%[r2],0,%[m4]"
1057 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1062 /* Invalidate ptes with options + TLB flush of the ptes */
1063 opt = opt | (asce & _ASCE_ORIGIN);
1065 " ipte %[r1],%[r2],%[r3],%[m4]"
1066 : [r2] "+a" (address), [r3] "+a" (opt)
1067 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1070 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1071 pte_t *ptep, int local)
1073 unsigned long pto = __pa(ptep);
1075 /* Invalidate a range of ptes + TLB flush of the ptes */
1078 " ipte %[r1],%[r2],%[r3],%[m4]"
1079 : [r2] "+a" (address), [r3] "+a" (nr)
1080 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1081 } while (nr != 255);
1085 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1086 * both clear the TLB for the unmapped pte. The reason is that
1087 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1088 * to modify an active pte. The sequence is
1089 * 1) ptep_get_and_clear
1091 * 3) flush_tlb_range
1092 * On s390 the tlb needs to get flushed with the modification of the pte
1093 * if the pte is active. The only way how this can be implemented is to
1094 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1097 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1098 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1100 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1101 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1102 unsigned long addr, pte_t *ptep)
1106 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1107 return pte_young(pte);
1110 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1111 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1112 unsigned long address, pte_t *ptep)
1114 return ptep_test_and_clear_young(vma, address, ptep);
1117 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1118 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1119 unsigned long addr, pte_t *ptep)
1123 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1124 /* At this point the reference through the mapping is still present */
1125 if (mm_is_protected(mm) && pte_present(res))
1126 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1130 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1131 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1132 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1133 pte_t *, pte_t, pte_t);
1135 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1136 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1137 unsigned long addr, pte_t *ptep)
1141 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1142 /* At this point the reference through the mapping is still present */
1143 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1144 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1149 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1150 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1151 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1152 * cannot be accessed while the batched unmap is running. In this case
1153 * full==1 and a simple pte_clear is enough. See tlb.h.
1155 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1156 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1158 pte_t *ptep, int full)
1164 set_pte(ptep, __pte(_PAGE_INVALID));
1166 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1169 if (!mm_is_protected(mm) || !pte_present(res))
1172 * At this point the reference through the mapping is still present.
1173 * The notifier should have destroyed all protected vCPUs at this
1174 * point, so the destroy should be successful.
1176 if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK))
1179 * If something went wrong and the page could not be destroyed, or
1180 * if this is not a mm teardown, the slower export is used as
1183 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1187 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1188 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1189 unsigned long addr, pte_t *ptep)
1194 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1197 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1198 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1199 unsigned long addr, pte_t *ptep,
1200 pte_t entry, int dirty)
1202 if (pte_same(*ptep, entry))
1204 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1209 * Additional functions to handle KVM guest page tables
1211 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1212 pte_t *ptep, pte_t entry);
1213 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1214 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1215 pte_t *ptep, unsigned long bits);
1216 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1217 pte_t *ptep, int prot, unsigned long bit);
1218 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1219 pte_t *ptep , int reset);
1220 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1221 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1222 pte_t *sptep, pte_t *tptep, pte_t pte);
1223 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1225 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1227 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1228 unsigned char key, bool nq);
1229 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1230 unsigned char key, unsigned char *oldkey,
1231 bool nq, bool mr, bool mc);
1232 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1233 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1234 unsigned char *key);
1236 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1237 unsigned long bits, unsigned long value);
1238 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1239 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1240 unsigned long *oldpte, unsigned long *oldpgste);
1241 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1242 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1243 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1244 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1246 #define pgprot_writecombine pgprot_writecombine
1247 pgprot_t pgprot_writecombine(pgprot_t prot);
1249 #define pgprot_writethrough pgprot_writethrough
1250 pgprot_t pgprot_writethrough(pgprot_t prot);
1253 * Certain architectures need to do special things when PTEs
1254 * within a page table are directly modified. Thus, the following
1255 * hook is made available.
1257 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1258 pte_t *ptep, pte_t entry)
1260 if (pte_present(entry))
1261 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1262 if (mm_has_pgste(mm))
1263 ptep_set_pte_at(mm, addr, ptep, entry);
1265 set_pte(ptep, entry);
1269 * Conversion functions: convert a page and protection to a page entry,
1270 * and a page entry and page directory to the page they refer to.
1272 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1276 __pte = __pte(physpage | pgprot_val(pgprot));
1277 if (!MACHINE_HAS_NX)
1278 __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
1279 return pte_mkyoung(__pte);
1282 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1284 unsigned long physpage = page_to_phys(page);
1285 pte_t __pte = mk_pte_phys(physpage, pgprot);
1287 if (pte_write(__pte) && PageDirty(page))
1288 __pte = pte_mkdirty(__pte);
1292 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1293 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1294 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1295 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1297 #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1298 #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1300 static inline unsigned long pmd_deref(pmd_t pmd)
1302 unsigned long origin_mask;
1304 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1306 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1307 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1310 static inline unsigned long pmd_pfn(pmd_t pmd)
1312 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1315 static inline unsigned long pud_deref(pud_t pud)
1317 unsigned long origin_mask;
1319 origin_mask = _REGION_ENTRY_ORIGIN;
1321 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1322 return (unsigned long)__va(pud_val(pud) & origin_mask);
1325 static inline unsigned long pud_pfn(pud_t pud)
1327 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1331 * The pgd_offset function *always* adds the index for the top-level
1332 * region/segment table. This is done to get a sequence like the
1333 * following to work:
1334 * pgdp = pgd_offset(current->mm, addr);
1335 * pgd = READ_ONCE(*pgdp);
1336 * p4dp = p4d_offset(&pgd, addr);
1338 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1339 * only add an index if they dereferenced the pointer.
1341 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1346 /* Get the first entry of the top level table */
1347 rste = pgd_val(*pgd);
1348 /* Pick up the shift from the table type of the first entry */
1349 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1350 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1353 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1355 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1357 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1358 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1359 return (p4d_t *) pgdp;
1361 #define p4d_offset_lockless p4d_offset_lockless
1363 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1365 return p4d_offset_lockless(pgdp, *pgdp, address);
1368 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1370 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1371 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1372 return (pud_t *) p4dp;
1374 #define pud_offset_lockless pud_offset_lockless
1376 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1378 return pud_offset_lockless(p4dp, *p4dp, address);
1380 #define pud_offset pud_offset
1382 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1384 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1385 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1386 return (pmd_t *) pudp;
1388 #define pmd_offset_lockless pmd_offset_lockless
1390 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1392 return pmd_offset_lockless(pudp, *pudp, address);
1394 #define pmd_offset pmd_offset
1396 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1398 return (unsigned long) pmd_deref(pmd);
1401 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1403 return end <= current->mm->context.asce_limit;
1405 #define gup_fast_permitted gup_fast_permitted
1407 #define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1408 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1409 #define pte_page(x) pfn_to_page(pte_pfn(x))
1411 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1412 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1413 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1414 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1416 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1418 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1419 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1422 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1424 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1425 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1426 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1430 static inline pmd_t pmd_mkclean(pmd_t pmd)
1432 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1433 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1436 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1438 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1439 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1440 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1444 static inline pud_t pud_wrprotect(pud_t pud)
1446 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1447 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1450 static inline pud_t pud_mkwrite(pud_t pud)
1452 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1453 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1454 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1458 static inline pud_t pud_mkclean(pud_t pud)
1460 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1461 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1464 static inline pud_t pud_mkdirty(pud_t pud)
1466 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1467 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1468 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1472 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1473 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1476 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1477 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1479 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1480 return pgprot_val(SEGMENT_NONE);
1481 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1482 return pgprot_val(SEGMENT_RO);
1483 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1484 return pgprot_val(SEGMENT_RX);
1485 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1486 return pgprot_val(SEGMENT_RW);
1487 return pgprot_val(SEGMENT_RWX);
1490 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1492 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1493 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1494 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1498 static inline pmd_t pmd_mkold(pmd_t pmd)
1500 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1501 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1504 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1508 mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1509 mask |= _SEGMENT_ENTRY_DIRTY;
1510 mask |= _SEGMENT_ENTRY_YOUNG;
1511 mask |= _SEGMENT_ENTRY_LARGE;
1512 mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1513 pmd = __pmd(pmd_val(pmd) & mask);
1514 pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1515 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1516 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1517 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1518 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1522 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1524 return __pmd(physpage + massage_pgprot_pmd(pgprot));
1527 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1529 static inline void __pmdp_csp(pmd_t *pmdp)
1531 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1532 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1535 #define IDTE_GLOBAL 0
1536 #define IDTE_LOCAL 1
1538 #define IDTE_PTOA 0x0800
1539 #define IDTE_NODAT 0x1000
1540 #define IDTE_GUEST_ASCE 0x2000
1542 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1543 unsigned long opt, unsigned long asce,
1548 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1549 if (__builtin_constant_p(opt) && opt == 0) {
1550 /* flush without guest asce */
1552 " idte %[r1],0,%[r2],%[m4]"
1554 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1558 /* flush with guest asce */
1560 " idte %[r1],%[r3],%[r2],%[m4]"
1562 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1563 [r3] "a" (asce), [m4] "i" (local)
1568 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1569 unsigned long opt, unsigned long asce,
1574 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1575 r3o |= _ASCE_TYPE_REGION3;
1576 if (__builtin_constant_p(opt) && opt == 0) {
1577 /* flush without guest asce */
1579 " idte %[r1],0,%[r2],%[m4]"
1581 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1585 /* flush with guest asce */
1587 " idte %[r1],%[r3],%[r2],%[m4]"
1589 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1590 [r3] "a" (asce), [m4] "i" (local)
1595 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1596 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1597 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1599 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1601 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1602 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1605 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1606 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1608 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1609 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1610 unsigned long addr, pmd_t *pmdp,
1611 pmd_t entry, int dirty)
1613 VM_BUG_ON(addr & ~HPAGE_MASK);
1615 entry = pmd_mkyoung(entry);
1617 entry = pmd_mkdirty(entry);
1618 if (pmd_val(*pmdp) == pmd_val(entry))
1620 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1624 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1625 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1626 unsigned long addr, pmd_t *pmdp)
1630 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1631 return pmd_young(pmd);
1634 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1635 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1636 unsigned long addr, pmd_t *pmdp)
1638 VM_BUG_ON(addr & ~HPAGE_MASK);
1639 return pmdp_test_and_clear_young(vma, addr, pmdp);
1642 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1643 pmd_t *pmdp, pmd_t entry)
1645 if (!MACHINE_HAS_NX)
1646 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
1647 set_pmd(pmdp, entry);
1650 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1652 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1653 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1654 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1657 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1658 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1659 unsigned long addr, pmd_t *pmdp)
1661 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1664 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1665 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1667 pmd_t *pmdp, int full)
1671 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1674 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1677 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1678 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1679 unsigned long addr, pmd_t *pmdp)
1681 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1684 #define __HAVE_ARCH_PMDP_INVALIDATE
1685 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1686 unsigned long addr, pmd_t *pmdp)
1688 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1690 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1693 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1694 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1695 unsigned long addr, pmd_t *pmdp)
1700 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1703 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1704 unsigned long address,
1707 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1709 #define pmdp_collapse_flush pmdp_collapse_flush
1711 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1712 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1714 static inline int pmd_trans_huge(pmd_t pmd)
1716 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1719 #define has_transparent_hugepage has_transparent_hugepage
1720 static inline int has_transparent_hugepage(void)
1722 return MACHINE_HAS_EDAT1 ? 1 : 0;
1724 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1727 * 64 bit swap entry format:
1728 * A page-table entry has some bits we have to treat in a special way.
1729 * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte
1731 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1732 * | offset |E11XX|type |S0|
1733 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1734 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1736 * Bits 0-51 store the offset.
1737 * Bit 52 (E) is used to remember PG_anon_exclusive.
1738 * Bits 57-61 store the type.
1739 * Bit 62 (S) is used for softdirty tracking.
1740 * Bits 55 and 56 (X) are unused.
1743 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1744 #define __SWP_OFFSET_SHIFT 12
1745 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1746 #define __SWP_TYPE_SHIFT 2
1748 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1750 unsigned long pteval;
1752 pteval = _PAGE_INVALID | _PAGE_PROTECT;
1753 pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1754 pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1755 return __pte(pteval);
1758 static inline unsigned long __swp_type(swp_entry_t entry)
1760 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1763 static inline unsigned long __swp_offset(swp_entry_t entry)
1765 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1768 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1770 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1773 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1774 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1776 #define kern_addr_valid(addr) (1)
1778 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1779 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1780 extern int s390_enable_sie(void);
1781 extern int s390_enable_skey(void);
1782 extern void s390_reset_cmma(struct mm_struct *mm);
1784 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1785 #define HAVE_ARCH_UNMAPPED_AREA
1786 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1788 #define pmd_pgtable(pmd) \
1789 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1791 #endif /* _S390_PAGE_H */