1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
5 #include <asm-generic/pgtable-nopmd.h>
8 * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
9 * table containing PTEs, together with a set of 16 segment registers,
10 * to define the virtual to physical address mapping.
12 * We use the hash table as an extended TLB, i.e. a cache of currently
13 * active mappings. We maintain a two-level page table tree, much
14 * like that used by the i386, for the sake of the Linux memory
15 * management code. Low-level assembler code in hash_low_32.S
16 * (procedure hash_page) is responsible for extracting ptes from the
17 * tree and putting them into the hash table when necessary, and
18 * updating the accessed and modified bits in the page table tree.
21 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
22 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
23 #define _PAGE_USER 0x004 /* usermode access allowed */
24 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
25 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
26 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
27 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
28 #define _PAGE_DIRTY 0x080 /* C: page changed */
29 #define _PAGE_ACCESSED 0x100 /* R: page referenced */
30 #define _PAGE_EXEC 0x200 /* software: exec allowed */
31 #define _PAGE_RW 0x400 /* software: user write access allowed */
32 #define _PAGE_SPECIAL 0x800 /* software: Special page */
34 #ifdef CONFIG_PTE_64BIT
35 /* We never clear the high word of the pte */
36 #define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
38 #define _PTE_NONE_MASK _PAGE_HASHPTE
41 #define _PMD_PRESENT 0
42 #define _PMD_PRESENT_MASK (PAGE_MASK)
43 #define _PMD_BAD (~PAGE_MASK)
45 /* And here we include common definitions */
47 #define _PAGE_KERNEL_RO 0
48 #define _PAGE_KERNEL_ROX (_PAGE_EXEC)
49 #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
50 #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
52 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
56 static inline bool pte_user(pte_t pte)
58 return pte_val(pte) & _PAGE_USER;
60 #endif /* __ASSEMBLY__ */
63 * Location of the PFN in the PTE. Most 32-bit platforms use the same
64 * as _PAGE_SHIFT here (ie, naturally aligned).
65 * Platform who don't just pre-define the value so we don't override it here.
67 #define PTE_RPN_SHIFT (PAGE_SHIFT)
70 * The mask covered by the RPN must be a ULL on 32-bit platforms with
73 #ifdef CONFIG_PTE_64BIT
74 #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
75 #define MAX_POSSIBLE_PHYSMEM_BITS 36
77 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
78 #define MAX_POSSIBLE_PHYSMEM_BITS 32
82 * _PAGE_CHG_MASK masks of bits that are to be preserved across
85 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
86 _PAGE_ACCESSED | _PAGE_SPECIAL)
89 * We define 2 sets of base prot bits, one for basic pages (ie,
90 * cacheable kernel and user pages) and one for non cacheable
91 * pages. We always set _PAGE_COHERENT when SMP is enabled or
92 * the processor might need it for DMA coherency.
94 #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
95 #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
98 * Permission masks used to generate the __P and __S table.
100 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
102 * Write permissions imply read permissions for now.
104 #define PAGE_NONE __pgprot(_PAGE_BASE)
105 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
106 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
107 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
108 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
109 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
110 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
112 /* Permission masks used for kernel mappings */
113 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
114 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
115 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
116 _PAGE_NO_CACHE | _PAGE_GUARDED)
117 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
118 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
119 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
122 * Protection used for kernel text. We want the debuggers to be able to
123 * set breakpoints anywhere, so don't write protect the kernel text
124 * on platforms where such control is possible.
126 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
127 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
128 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
130 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
133 /* Make modules code happy. We don't set RO yet */
134 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
136 /* Advertise special mapping type for AGP */
137 #define PAGE_AGP (PAGE_KERNEL_NC)
138 #define HAVE_PAGE_AGP
140 #define PTE_INDEX_SIZE PTE_SHIFT
141 #define PMD_INDEX_SIZE 0
142 #define PUD_INDEX_SIZE 0
143 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
145 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
146 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
149 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
150 #define PMD_TABLE_SIZE 0
151 #define PUD_TABLE_SIZE 0
152 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
154 /* Bits to mask out from a PMD to get to the PTE page */
155 #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
156 #endif /* __ASSEMBLY__ */
158 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
159 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
162 * The normal case is that PTEs are 32-bits and we have a 1-page
163 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
165 * For any >32-bit physical address platform, we can use the following
166 * two level page table layout where the pgdir is 8KB and the MS 13 bits
167 * are an index to the second level table. The combined pgdir/pmd first
168 * level has 2048 entries and the second level has 512 64-bit PTE entries.
171 /* PGDIR_SHIFT determines what a top-level page table entry can map */
172 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
173 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
174 #define PGDIR_MASK (~(PGDIR_SIZE-1))
176 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
180 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
182 #endif /* !__ASSEMBLY__ */
185 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
186 * value (for now) on others, from where we can start layout kernel
187 * virtual space that goes below PKMAP and FIXMAP
189 #include <asm/fixmap.h>
192 * ioremap_bot starts at that address. Early ioremaps move down from there,
193 * until mem_init() at which point this becomes the top of the vmalloc
196 #ifdef CONFIG_HIGHMEM
197 #define IOREMAP_TOP PKMAP_BASE
199 #define IOREMAP_TOP FIXADDR_START
202 /* PPC32 shares vmalloc area with ioremap */
203 #define IOREMAP_START VMALLOC_START
204 #define IOREMAP_END VMALLOC_END
207 * Just any arbitrary offset to the start of the vmalloc VM area: the
208 * current 16MB value just means that there will be a 64MB "hole" after the
209 * physical memory until the kernel virtual memory starts. That means that
210 * any out-of-bounds memory accesses will hopefully be caught.
211 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
212 * area for the same reason. ;)
214 * We no longer map larger than phys RAM with the BATs so we don't have
215 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
216 * about clashes between our early calls to ioremap() that start growing down
217 * from ioremap_base being run into the VM area allocations (growing upwards
218 * from VMALLOC_START). For this reason we have ioremap_bot to check when
219 * we actually run into our mappings setup in the early boot with the VM
220 * system. This really does become a problem for machines with good amounts
223 #define VMALLOC_OFFSET (0x1000000) /* 16M */
225 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
227 #ifdef CONFIG_KASAN_VMALLOC
228 #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
230 #define VMALLOC_END ioremap_bot
233 #define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
234 #define MODULES_VADDR (MODULES_END - SZ_256M)
237 #include <linux/sched.h>
238 #include <linux/threads.h>
240 /* Bits to mask out from a PGD to get to the PUD page */
241 #define PGD_MASKED_BITS 0
243 #define pte_ERROR(e) \
244 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
245 (unsigned long long)pte_val(e))
246 #define pgd_ERROR(e) \
247 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
249 * Bits in a linux-style PTE. These match the bits in the
250 * (hardware-defined) PowerPC PTE as closely as possible.
253 #define pte_clear(mm, addr, ptep) \
254 do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
256 #define pmd_none(pmd) (!pmd_val(pmd))
257 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
258 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
259 static inline void pmd_clear(pmd_t *pmdp)
266 * When flushing the tlb entry for a page, we also need to flush the hash
267 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
269 extern int flush_hash_pages(unsigned context, unsigned long va,
270 unsigned long pmdval, int count);
272 /* Add an HPTE to the hash table */
273 extern void add_hash_page(unsigned context, unsigned long va,
274 unsigned long pmdval);
276 /* Flush an entry from the TLB/hash table */
277 static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
279 if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
280 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
282 flush_hash_pages(mm->context.id, addr, ptephys, 1);
287 * PTE updates. This function is called whenever an existing
288 * valid PTE is updated. This does -not- include set_pte_at()
289 * which nowadays only sets a new PTE.
291 * Depending on the type of MMU, we may need to use atomic updates
292 * and the PTE may be either 32 or 64 bit wide. In the later case,
293 * when using atomic updates, only the low part of the PTE is
294 * accessed atomically.
296 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
297 unsigned long clr, unsigned long set, int huge)
302 __asm__ __volatile__(
303 #ifndef CONFIG_PTE_64BIT
304 "1: lwarx %0, 0, %3\n"
307 "1: lwarx %L0, 0, %3\n"
309 " andc %1, %L0, %4\n"
312 " stwcx. %1, 0, %3\n"
314 : "=&r" (old), "=&r" (tmp), "=m" (*p)
315 #ifndef CONFIG_PTE_64BIT
318 : "b" ((unsigned long)(p) + 4),
320 "r" (clr), "r" (set), "m" (*p)
327 * 2.6 calls this without flushing the TLB entry; this is wrong
328 * for our hash-based implementation, we fix that up here.
330 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
331 static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
332 unsigned long addr, pte_t *ptep)
335 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
336 if (old & _PAGE_HASHPTE)
337 flush_hash_entry(mm, ptep, addr);
339 return (old & _PAGE_ACCESSED) != 0;
341 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
342 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
344 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
345 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
348 return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
351 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
352 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
355 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
358 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
359 pte_t *ptep, pte_t entry,
360 unsigned long address,
363 unsigned long set = pte_val(entry) &
364 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
366 pte_update(vma->vm_mm, address, ptep, 0, set, 0);
368 flush_tlb_page(vma, address);
371 #define __HAVE_ARCH_PTE_SAME
372 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
374 #define pmd_page(pmd) \
375 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
378 * Encode and decode a swap entry.
379 * Note that the bits we use in a PTE for representing a swap entry
380 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
383 #define __swp_type(entry) ((entry).val & 0x1f)
384 #define __swp_offset(entry) ((entry).val >> 5)
385 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
386 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
387 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
389 /* Generic accessors to PTE bits */
390 static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
391 static inline int pte_read(pte_t pte) { return 1; }
392 static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
393 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
394 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
395 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
396 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
398 static inline int pte_present(pte_t pte)
400 return pte_val(pte) & _PAGE_PRESENT;
403 static inline bool pte_hw_valid(pte_t pte)
405 return pte_val(pte) & _PAGE_PRESENT;
408 static inline bool pte_hashpte(pte_t pte)
410 return !!(pte_val(pte) & _PAGE_HASHPTE);
413 static inline bool pte_ci(pte_t pte)
415 return !!(pte_val(pte) & _PAGE_NO_CACHE);
419 * We only find page table entry in the last level
420 * Hence no need for other accessors
422 #define pte_access_permitted pte_access_permitted
423 static inline bool pte_access_permitted(pte_t pte, bool write)
426 * A read-only access is controlled by _PAGE_USER bit.
427 * We have _PAGE_READ set for WRITE and EXECUTE
429 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
432 if (write && !pte_write(pte))
438 /* Conversion functions: convert a page and protection to a page entry,
439 * and a page entry and page directory to the page they refer to.
441 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
444 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
446 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
450 static inline unsigned long pte_pfn(pte_t pte)
452 return pte_val(pte) >> PTE_RPN_SHIFT;
455 /* Generic modifiers for PTE bits */
456 static inline pte_t pte_wrprotect(pte_t pte)
458 return __pte(pte_val(pte) & ~_PAGE_RW);
461 static inline pte_t pte_exprotect(pte_t pte)
463 return __pte(pte_val(pte) & ~_PAGE_EXEC);
466 static inline pte_t pte_mkclean(pte_t pte)
468 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
471 static inline pte_t pte_mkold(pte_t pte)
473 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
476 static inline pte_t pte_mkexec(pte_t pte)
478 return __pte(pte_val(pte) | _PAGE_EXEC);
481 static inline pte_t pte_mkpte(pte_t pte)
486 static inline pte_t pte_mkwrite(pte_t pte)
488 return __pte(pte_val(pte) | _PAGE_RW);
491 static inline pte_t pte_mkdirty(pte_t pte)
493 return __pte(pte_val(pte) | _PAGE_DIRTY);
496 static inline pte_t pte_mkyoung(pte_t pte)
498 return __pte(pte_val(pte) | _PAGE_ACCESSED);
501 static inline pte_t pte_mkspecial(pte_t pte)
503 return __pte(pte_val(pte) | _PAGE_SPECIAL);
506 static inline pte_t pte_mkhuge(pte_t pte)
511 static inline pte_t pte_mkprivileged(pte_t pte)
513 return __pte(pte_val(pte) & ~_PAGE_USER);
516 static inline pte_t pte_mkuser(pte_t pte)
518 return __pte(pte_val(pte) | _PAGE_USER);
521 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
523 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
528 /* This low level function performs the actual PTE insertion
529 * Setting the PTE depends on the MMU type and other factors. It's
530 * an horrible mess that I'm not going to try to clean up now but
531 * I'm keeping it in one place rather than spread around
533 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
534 pte_t *ptep, pte_t pte, int percpu)
536 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
537 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
538 * helper pte_update() which does an atomic update. We need to do that
539 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
540 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
541 * the hash bits instead (ie, same as the non-SMP case)
544 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
545 | (pte_val(pte) & ~_PAGE_HASHPTE));
547 pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
549 #elif defined(CONFIG_PTE_64BIT)
550 /* Second case is 32-bit with 64-bit PTE. In this case, we
551 * can just store as long as we do the two halves in the right order
552 * with a barrier in between. This is possible because we take care,
553 * in the hash code, to pre-invalidate if the PTE was already hashed,
554 * which synchronizes us with any concurrent invalidation.
555 * In the percpu case, we also fallback to the simple update preserving
559 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
560 | (pte_val(pte) & ~_PAGE_HASHPTE));
563 if (pte_val(*ptep) & _PAGE_HASHPTE)
564 flush_hash_entry(mm, ptep, addr);
565 __asm__ __volatile__("\
569 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
570 : "r" (pte) : "memory");
573 /* Third case is 32-bit hash table in UP mode, we need to preserve
574 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
575 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
576 * and see we need to keep track that this PTE needs invalidating
578 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
579 | (pte_val(pte) & ~_PAGE_HASHPTE));
584 * Macro to mark a page protection value as "uncacheable".
587 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
590 #define pgprot_noncached pgprot_noncached
591 static inline pgprot_t pgprot_noncached(pgprot_t prot)
593 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
594 _PAGE_NO_CACHE | _PAGE_GUARDED);
597 #define pgprot_noncached_wc pgprot_noncached_wc
598 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
600 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
604 #define pgprot_cached pgprot_cached
605 static inline pgprot_t pgprot_cached(pgprot_t prot)
607 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
611 #define pgprot_cached_wthru pgprot_cached_wthru
612 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
614 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
615 _PAGE_COHERENT | _PAGE_WRITETHRU);
618 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
619 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
621 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
624 #define pgprot_writecombine pgprot_writecombine
625 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
627 return pgprot_noncached_wc(prot);
630 #endif /* !__ASSEMBLY__ */
632 #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */