1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Page table handling routines for radix page table.
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
8 #define pr_fmt(fmt) "radix-mmu: " fmt
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
15 #include <linux/of_fdt.h>
17 #include <linux/hugetlb.h>
18 #include <linux/string_helpers.h>
19 #include <linux/memory.h>
21 #include <asm/pgalloc.h>
22 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
26 #include <asm/firmware.h>
27 #include <asm/powernv.h>
28 #include <asm/sections.h>
30 #include <asm/trace.h>
31 #include <asm/uaccess.h>
32 #include <asm/ultravisor.h>
34 #include <trace/events/thp.h>
36 unsigned int mmu_pid_bits;
37 unsigned int mmu_base_pid;
38 unsigned long radix_mem_block_size __ro_after_init;
40 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
41 unsigned long region_start, unsigned long region_end)
43 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
44 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
48 min_addr = region_start;
50 max_addr = region_end;
52 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
55 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
56 __func__, size, size, nid, &min_addr, &max_addr);
62 * When allocating pud or pmd pointers, we allocate a complete page
63 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
64 * is to ensure that the page obtained from the memblock allocator
65 * can be completely used as page table page and can be freed
66 * correctly when the page table entries are removed.
68 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
70 unsigned int map_page_size,
72 unsigned long region_start, unsigned long region_end)
74 unsigned long pfn = pa >> PAGE_SHIFT;
81 pgdp = pgd_offset_k(ea);
82 p4dp = p4d_offset(pgdp, ea);
83 if (p4d_none(*p4dp)) {
84 pudp = early_alloc_pgtable(PAGE_SIZE, nid,
85 region_start, region_end);
86 p4d_populate(&init_mm, p4dp, pudp);
88 pudp = pud_offset(p4dp, ea);
89 if (map_page_size == PUD_SIZE) {
93 if (pud_none(*pudp)) {
94 pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
96 pud_populate(&init_mm, pudp, pmdp);
98 pmdp = pmd_offset(pudp, ea);
99 if (map_page_size == PMD_SIZE) {
100 ptep = pmdp_ptep(pmdp);
103 if (!pmd_present(*pmdp)) {
104 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
105 region_start, region_end);
106 pmd_populate_kernel(&init_mm, pmdp, ptep);
108 ptep = pte_offset_kernel(pmdp, ea);
111 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
112 asm volatile("ptesync": : :"memory");
117 * nid, region_start, and region_end are hints to try to place the page
118 * table memory in the same node or region.
120 static int __map_kernel_page(unsigned long ea, unsigned long pa,
122 unsigned int map_page_size,
124 unsigned long region_start, unsigned long region_end)
126 unsigned long pfn = pa >> PAGE_SHIFT;
133 * Make sure task size is correct as per the max adddr
135 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
137 #ifdef CONFIG_PPC_64K_PAGES
138 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
141 if (unlikely(!slab_is_available()))
142 return early_map_kernel_page(ea, pa, flags, map_page_size,
143 nid, region_start, region_end);
146 * Should make page table allocation functions be able to take a
147 * node, so we can place kernel page tables on the right nodes after
150 pgdp = pgd_offset_k(ea);
151 p4dp = p4d_offset(pgdp, ea);
152 pudp = pud_alloc(&init_mm, p4dp, ea);
155 if (map_page_size == PUD_SIZE) {
156 ptep = (pte_t *)pudp;
159 pmdp = pmd_alloc(&init_mm, pudp, ea);
162 if (map_page_size == PMD_SIZE) {
163 ptep = pmdp_ptep(pmdp);
166 ptep = pte_alloc_kernel(pmdp, ea);
171 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
172 asm volatile("ptesync": : :"memory");
176 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
178 unsigned int map_page_size)
180 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
183 #ifdef CONFIG_STRICT_KERNEL_RWX
184 static void radix__change_memory_range(unsigned long start, unsigned long end,
194 start = ALIGN_DOWN(start, PAGE_SIZE);
195 end = PAGE_ALIGN(end); // aligns up
197 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
200 for (idx = start; idx < end; idx += PAGE_SIZE) {
201 pgdp = pgd_offset_k(idx);
202 p4dp = p4d_offset(pgdp, idx);
203 pudp = pud_alloc(&init_mm, p4dp, idx);
206 if (pud_is_leaf(*pudp)) {
207 ptep = (pte_t *)pudp;
210 pmdp = pmd_alloc(&init_mm, pudp, idx);
213 if (pmd_is_leaf(*pmdp)) {
214 ptep = pmdp_ptep(pmdp);
217 ptep = pte_alloc_kernel(pmdp, idx);
221 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
224 radix__flush_tlb_kernel_range(start, end);
227 void radix__mark_rodata_ro(void)
229 unsigned long start, end;
231 start = (unsigned long)_stext;
232 end = (unsigned long)__init_begin;
234 radix__change_memory_range(start, end, _PAGE_WRITE);
237 void radix__mark_initmem_nx(void)
239 unsigned long start = (unsigned long)__init_begin;
240 unsigned long end = (unsigned long)__init_end;
242 radix__change_memory_range(start, end, _PAGE_EXEC);
244 #endif /* CONFIG_STRICT_KERNEL_RWX */
246 static inline void __meminit
247 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
254 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
256 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
257 exec ? " (exec)" : "");
260 static unsigned long next_boundary(unsigned long addr, unsigned long end)
262 #ifdef CONFIG_STRICT_KERNEL_RWX
263 if (addr < __pa_symbol(__init_begin))
264 return __pa_symbol(__init_begin);
269 static int __meminit create_physical_mapping(unsigned long start,
271 unsigned long max_mapping_size,
272 int nid, pgprot_t _prot)
274 unsigned long vaddr, addr, mapping_size = 0;
275 bool prev_exec, exec = false;
279 start = ALIGN(start, PAGE_SIZE);
280 end = ALIGN_DOWN(end, PAGE_SIZE);
281 for (addr = start; addr < end; addr += mapping_size) {
282 unsigned long gap, previous_size;
285 gap = next_boundary(addr, end) - addr;
286 if (gap > max_mapping_size)
287 gap = max_mapping_size;
288 previous_size = mapping_size;
291 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
292 mmu_psize_defs[MMU_PAGE_1G].shift) {
293 mapping_size = PUD_SIZE;
295 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
296 mmu_psize_defs[MMU_PAGE_2M].shift) {
297 mapping_size = PMD_SIZE;
300 mapping_size = PAGE_SIZE;
301 psize = mmu_virtual_psize;
304 vaddr = (unsigned long)__va(addr);
306 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
307 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
308 prot = PAGE_KERNEL_X;
315 if (mapping_size != previous_size || exec != prev_exec) {
316 print_mapping(start, addr, previous_size, prev_exec);
320 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
324 update_page_count(psize, 1);
327 print_mapping(start, addr, mapping_size, exec);
331 static void __init radix_init_pgtable(void)
333 unsigned long rts_field;
334 phys_addr_t start, end;
337 /* We don't support slb for radix */
341 * Create the linear mapping
343 for_each_mem_range(i, &start, &end) {
345 * The memblock allocator is up at this point, so the
346 * page tables will be allocated within the range. No
347 * need or a node (which we don't have yet).
350 if (end >= RADIX_VMALLOC_START) {
351 pr_warn("Outside the supported range\n");
355 WARN_ON(create_physical_mapping(start, end,
356 radix_mem_block_size,
360 /* Find out how many PID bits are supported */
361 if (!cpu_has_feature(CPU_FTR_HVMODE) &&
362 cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
364 * Older versions of KVM on these machines perfer if the
365 * guest only uses the low 19 PID bits.
376 * Allocate Partition table and process table for the
379 BUG_ON(PRTB_SIZE_SHIFT > 36);
380 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
382 * Fill in the process table.
384 rts_field = radix__get_tree_size();
385 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
388 * The init_mm context is given the first available (non-zero) PID,
389 * which is the "guard PID" and contains no page table. PIDR should
390 * never be set to zero because that duplicates the kernel address
391 * space at the 0x0... offset (quadrant 0)!
393 * An arbitrary PID that may later be allocated by the PID allocator
394 * for userspace processes must not be used either, because that
395 * would cause stale user mappings for that PID on CPUs outside of
396 * the TLB invalidation scheme (because it won't be in mm_cpumask).
398 * So permanently carve out one PID for the purpose of a guard PID.
400 init_mm.context.id = mmu_base_pid;
404 static void __init radix_init_partition_table(void)
406 unsigned long rts_field, dw0, dw1;
408 mmu_partition_table_init();
409 rts_field = radix__get_tree_size();
410 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
411 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
412 mmu_partition_table_set_entry(0, dw0, dw1, false);
414 pr_info("Initializing Radix MMU\n");
417 static int __init get_idx_from_shift(unsigned int shift)
438 static int __init radix_dt_scan_page_sizes(unsigned long node,
439 const char *uname, int depth,
446 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
448 /* We are scanning "cpu" nodes only */
449 if (type == NULL || strcmp(type, "cpu") != 0)
452 /* Find MMU PID size */
453 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
454 if (prop && size == 4)
455 mmu_pid_bits = be32_to_cpup(prop);
457 /* Grab page size encodings */
458 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
462 pr_info("Page sizes from device-tree:\n");
463 for (; size >= 4; size -= 4, ++prop) {
465 struct mmu_psize_def *def;
467 /* top 3 bit is AP encoding */
468 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
469 ap = be32_to_cpu(prop[0]) >> 29;
470 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
472 idx = get_idx_from_shift(shift);
476 def = &mmu_psize_defs[idx];
479 def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
483 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
487 #ifdef CONFIG_MEMORY_HOTPLUG
488 static int __init probe_memory_block_size(unsigned long node, const char *uname, int
491 unsigned long *mem_block_size = (unsigned long *)data;
498 if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
501 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
503 if (!prop || len < dt_root_size_cells * sizeof(__be32))
505 * Nothing in the device tree
507 *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
509 *mem_block_size = of_read_number(prop, dt_root_size_cells);
513 static unsigned long radix_memory_block_size(void)
515 unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
518 * OPAL firmware feature is set by now. Hence we are ok
519 * to test OPAL feature.
521 if (firmware_has_feature(FW_FEATURE_OPAL))
522 mem_block_size = 1UL * 1024 * 1024 * 1024;
524 of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
526 return mem_block_size;
529 #else /* CONFIG_MEMORY_HOTPLUG */
531 static unsigned long radix_memory_block_size(void)
533 return 1UL * 1024 * 1024 * 1024;
536 #endif /* CONFIG_MEMORY_HOTPLUG */
539 void __init radix__early_init_devtree(void)
544 * Try to find the available page sizes in the device-tree
546 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
549 * No page size details found in device tree.
550 * Let's assume we have page 4k and 64k support
552 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
553 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
554 mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
555 psize_to_rpti_pgsize(MMU_PAGE_4K);
557 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
558 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
559 mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
560 psize_to_rpti_pgsize(MMU_PAGE_64K);
564 * Max mapping size used when mapping pages. We don't use
565 * ppc_md.memory_block_size() here because this get called
566 * early and we don't have machine probe called yet. Also
567 * the pseries implementation only check for ibm,lmb-size.
568 * All hypervisor supporting radix do expose that device
571 radix_mem_block_size = radix_memory_block_size();
575 static void radix_init_amor(void)
578 * In HV mode, we init AMOR (Authority Mask Override Register) so that
579 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
580 * Register), enable key 0 and set it to 1.
582 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
584 mtspr(SPRN_AMOR, (3ul << 62));
587 void __init radix__early_init_mmu(void)
591 #ifdef CONFIG_PPC_64K_PAGES
592 /* PAGE_SIZE mappings */
593 mmu_virtual_psize = MMU_PAGE_64K;
595 mmu_virtual_psize = MMU_PAGE_4K;
598 #ifdef CONFIG_SPARSEMEM_VMEMMAP
599 /* vmemmap mapping */
600 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
602 * map vmemmap using 2M if available
604 mmu_vmemmap_psize = MMU_PAGE_2M;
606 mmu_vmemmap_psize = mmu_virtual_psize;
609 * initialize page table size
611 __pte_index_size = RADIX_PTE_INDEX_SIZE;
612 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
613 __pud_index_size = RADIX_PUD_INDEX_SIZE;
614 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
615 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
616 __pte_table_size = RADIX_PTE_TABLE_SIZE;
617 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
618 __pud_table_size = RADIX_PUD_TABLE_SIZE;
619 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
621 __pmd_val_bits = RADIX_PMD_VAL_BITS;
622 __pud_val_bits = RADIX_PUD_VAL_BITS;
623 __pgd_val_bits = RADIX_PGD_VAL_BITS;
625 __kernel_virt_start = RADIX_KERN_VIRT_START;
626 __vmalloc_start = RADIX_VMALLOC_START;
627 __vmalloc_end = RADIX_VMALLOC_END;
628 __kernel_io_start = RADIX_KERN_IO_START;
629 __kernel_io_end = RADIX_KERN_IO_END;
630 vmemmap = (struct page *)RADIX_VMEMMAP_START;
631 ioremap_bot = IOREMAP_BASE;
634 pci_io_base = ISA_IO_BASE;
636 __pte_frag_nr = RADIX_PTE_FRAG_NR;
637 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
638 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
639 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
641 radix_init_pgtable();
643 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
644 lpcr = mfspr(SPRN_LPCR);
645 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
646 radix_init_partition_table();
649 radix_init_pseries();
652 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
654 /* Switch to the guard PID before turning on MMU */
655 radix__switch_mmu_context(NULL, &init_mm);
659 void radix__early_init_mmu_secondary(void)
663 * update partition table control register and UPRT
665 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
666 lpcr = mfspr(SPRN_LPCR);
667 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
669 set_ptcr_when_no_uv(__pa(partition_tb) |
670 (PATB_SIZE_SHIFT - 12));
675 radix__switch_mmu_context(NULL, &init_mm);
678 /* Make sure userspace can't change the AMR */
679 mtspr(SPRN_UAMOR, 0);
682 /* Called during kexec sequence with MMU off */
683 notrace void radix__mmu_cleanup_all(void)
687 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
688 lpcr = mfspr(SPRN_LPCR);
689 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
690 set_ptcr_when_no_uv(0);
691 powernv_set_nmmu_ptcr(0);
692 radix__flush_tlb_all();
696 #ifdef CONFIG_MEMORY_HOTPLUG
697 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
702 for (i = 0; i < PTRS_PER_PTE; i++) {
708 pte_free_kernel(&init_mm, pte_start);
712 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
717 for (i = 0; i < PTRS_PER_PMD; i++) {
723 pmd_free(&init_mm, pmd_start);
727 static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
732 for (i = 0; i < PTRS_PER_PUD; i++) {
738 pud_free(&init_mm, pud_start);
742 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
748 pte = pte_start + pte_index(addr);
749 for (; addr < end; addr = next, pte++) {
750 next = (addr + PAGE_SIZE) & PAGE_MASK;
754 if (!pte_present(*pte))
757 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
759 * The vmemmap_free() and remove_section_mapping()
760 * codepaths call us with aligned addresses.
762 WARN_ONCE(1, "%s: unaligned range\n", __func__);
766 pte_clear(&init_mm, addr, pte);
770 static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
777 pmd = pmd_start + pmd_index(addr);
778 for (; addr < end; addr = next, pmd++) {
779 next = pmd_addr_end(addr, end);
781 if (!pmd_present(*pmd))
784 if (pmd_is_leaf(*pmd)) {
785 if (!IS_ALIGNED(addr, PMD_SIZE) ||
786 !IS_ALIGNED(next, PMD_SIZE)) {
787 WARN_ONCE(1, "%s: unaligned range\n", __func__);
790 pte_clear(&init_mm, addr, (pte_t *)pmd);
794 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
795 remove_pte_table(pte_base, addr, next);
796 free_pte_table(pte_base, pmd);
800 static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
807 pud = pud_start + pud_index(addr);
808 for (; addr < end; addr = next, pud++) {
809 next = pud_addr_end(addr, end);
811 if (!pud_present(*pud))
814 if (pud_is_leaf(*pud)) {
815 if (!IS_ALIGNED(addr, PUD_SIZE) ||
816 !IS_ALIGNED(next, PUD_SIZE)) {
817 WARN_ONCE(1, "%s: unaligned range\n", __func__);
820 pte_clear(&init_mm, addr, (pte_t *)pud);
824 pmd_base = pud_pgtable(*pud);
825 remove_pmd_table(pmd_base, addr, next);
826 free_pmd_table(pmd_base, pud);
830 static void __meminit remove_pagetable(unsigned long start, unsigned long end)
832 unsigned long addr, next;
837 spin_lock(&init_mm.page_table_lock);
839 for (addr = start; addr < end; addr = next) {
840 next = pgd_addr_end(addr, end);
842 pgd = pgd_offset_k(addr);
843 p4d = p4d_offset(pgd, addr);
844 if (!p4d_present(*p4d))
847 if (p4d_is_leaf(*p4d)) {
848 if (!IS_ALIGNED(addr, P4D_SIZE) ||
849 !IS_ALIGNED(next, P4D_SIZE)) {
850 WARN_ONCE(1, "%s: unaligned range\n", __func__);
854 pte_clear(&init_mm, addr, (pte_t *)pgd);
858 pud_base = p4d_pgtable(*p4d);
859 remove_pud_table(pud_base, addr, next);
860 free_pud_table(pud_base, p4d);
863 spin_unlock(&init_mm.page_table_lock);
864 radix__flush_tlb_kernel_range(start, end);
867 int __meminit radix__create_section_mapping(unsigned long start,
868 unsigned long end, int nid,
871 if (end >= RADIX_VMALLOC_START) {
872 pr_warn("Outside the supported range\n");
876 return create_physical_mapping(__pa(start), __pa(end),
877 radix_mem_block_size, nid, prot);
880 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
882 remove_pagetable(start, end);
885 #endif /* CONFIG_MEMORY_HOTPLUG */
887 #ifdef CONFIG_SPARSEMEM_VMEMMAP
888 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
889 pgprot_t flags, unsigned int map_page_size,
892 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
895 int __meminit radix__vmemmap_create_mapping(unsigned long start,
896 unsigned long page_size,
899 /* Create a PTE encoding */
900 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
901 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
904 if ((start + page_size) >= RADIX_VMEMMAP_END) {
905 pr_warn("Outside the supported range\n");
909 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
915 #ifdef CONFIG_MEMORY_HOTPLUG
916 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
918 remove_pagetable(start, start + page_size);
923 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
925 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
926 pmd_t *pmdp, unsigned long clr,
931 #ifdef CONFIG_DEBUG_VM
932 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
933 assert_spin_locked(pmd_lockptr(mm, pmdp));
936 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
937 trace_hugepage_update(addr, old, clr, set);
942 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
948 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
949 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
950 VM_BUG_ON(pmd_devmap(*pmdp));
952 * khugepaged calls this for normal pmd
958 * pmdp collapse_flush need to ensure that there are no parallel gup
959 * walk after this call. This is needed so that we can have stable
960 * page ref count when collapsing a page. We don't allow a collapse page
961 * if we have gup taken on the page. We can ensure that by sending IPI
962 * because gup walk happens with IRQ disabled.
964 serialize_against_pte_lookup(vma->vm_mm);
966 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
972 * For us pgtable_t is pte_t *. Inorder to save the deposisted
973 * page table, we consider the allocated page table as a list
974 * head. On withdraw we need to make sure we zero out the used
975 * list_head memory area.
977 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
980 struct list_head *lh = (struct list_head *) pgtable;
982 assert_spin_locked(pmd_lockptr(mm, pmdp));
985 if (!pmd_huge_pte(mm, pmdp))
988 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
989 pmd_huge_pte(mm, pmdp) = pgtable;
992 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
996 struct list_head *lh;
998 assert_spin_locked(pmd_lockptr(mm, pmdp));
1001 pgtable = pmd_huge_pte(mm, pmdp);
1002 lh = (struct list_head *) pgtable;
1004 pmd_huge_pte(mm, pmdp) = NULL;
1006 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1009 ptep = (pte_t *) pgtable;
1016 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1017 unsigned long addr, pmd_t *pmdp)
1022 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1023 old_pmd = __pmd(old);
1027 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1029 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1030 pte_t entry, unsigned long address, int psize)
1032 struct mm_struct *mm = vma->vm_mm;
1033 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1034 _PAGE_RW | _PAGE_EXEC);
1036 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1038 * To avoid NMMU hang while relaxing access, we need mark
1039 * the pte invalid in between.
1041 if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
1042 unsigned long old_pte, new_pte;
1044 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1048 new_pte = old_pte | set;
1049 radix__flush_tlb_page_psize(mm, address, psize);
1050 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1052 __radix_pte_update(ptep, 0, set);
1054 * Book3S does not require a TLB flush when relaxing access
1055 * restrictions when the address space is not attached to a
1056 * NMMU, because the core MMU will reload the pte after taking
1057 * an access fault, which is defined by the architecture.
1060 /* See ptesync comment in radix__set_pte_at */
1063 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1064 unsigned long addr, pte_t *ptep,
1065 pte_t old_pte, pte_t pte)
1067 struct mm_struct *mm = vma->vm_mm;
1070 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1071 * we set the new value. We need to do this only for radix, because hash
1072 * translation does flush when updating the linux pte.
1074 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1075 (atomic_read(&mm->context.copros) > 0))
1076 radix__flush_tlb_page(vma, addr);
1078 set_pte_at(mm, addr, ptep, pte);
1081 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1083 pte_t *ptep = (pte_t *)pud;
1084 pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1086 if (!radix_enabled())
1089 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1094 int pud_clear_huge(pud_t *pud)
1096 if (pud_huge(*pud)) {
1104 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1109 pmd = pud_pgtable(*pud);
1112 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1114 for (i = 0; i < PTRS_PER_PMD; i++) {
1115 if (!pmd_none(pmd[i])) {
1117 pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1119 pte_free_kernel(&init_mm, pte);
1123 pmd_free(&init_mm, pmd);
1128 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1130 pte_t *ptep = (pte_t *)pmd;
1131 pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1133 if (!radix_enabled())
1136 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1141 int pmd_clear_huge(pmd_t *pmd)
1143 if (pmd_huge(*pmd)) {
1151 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1155 pte = (pte_t *)pmd_page_vaddr(*pmd);
1158 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1160 pte_free_kernel(&init_mm, pte);