2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/export.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
25 #include <linux/memblock.h>
26 #include <linux/highmem.h>
27 #include <linux/swap.h>
28 #include <linux/proc_fs.h>
29 #include <linux/pfn.h>
30 #include <linux/hardirq.h>
31 #include <linux/gfp.h>
32 #include <linux/kcore.h>
33 #include <linux/initrd.h>
35 #include <asm/bootinfo.h>
36 #include <asm/cachectl.h>
40 #include <asm/mmu_context.h>
41 #include <asm/mmzone.h>
42 #include <asm/sections.h>
43 #include <asm/pgalloc.h>
45 #include <asm/fixmap.h>
48 * We have up to 8 empty zeroed pages so we can map one of the right colour
49 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
50 * where we have to avoid VCED / VECI exceptions for good performance at
51 * any price. Since page is never written to after the initialization we
52 * don't have to care about aliases on other CPUs.
54 unsigned long empty_zero_page, zero_page_mask;
55 EXPORT_SYMBOL_GPL(empty_zero_page);
56 EXPORT_SYMBOL(zero_page_mask);
59 * Not static inline because used by IP27 special magic initialization code
61 void setup_zero_pages(void)
63 unsigned int order, i;
71 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
73 panic("Oh boy, that early out of memory?");
75 page = virt_to_page((void *)empty_zero_page);
76 split_page(page, order);
77 for (i = 0; i < (1 << order); i++, page++)
78 mark_page_reserved(page);
80 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
83 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
85 enum fixed_addresses idx;
86 unsigned int old_mmid;
87 unsigned long vaddr, flags, entrylo;
88 unsigned long old_ctx;
92 BUG_ON(folio_test_dcache_dirty(page_folio(page)));
96 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
97 idx += in_interrupt() ? FIX_N_COLOURS : 0;
98 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
99 pte = mk_pte(page, prot);
100 #if defined(CONFIG_XPA)
101 entrylo = pte_to_entrylo(pte.pte_high);
102 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
103 entrylo = pte.pte_high;
105 entrylo = pte_to_entrylo(pte_val(pte));
108 local_irq_save(flags);
109 old_ctx = read_c0_entryhi();
110 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
111 write_c0_entrylo0(entrylo);
112 write_c0_entrylo1(entrylo);
114 old_mmid = read_c0_memorymapid();
115 write_c0_memorymapid(MMID_KERNEL_WIRED);
119 entrylo = (pte.pte_low & _PFNX_MASK);
120 writex_c0_entrylo0(entrylo);
121 writex_c0_entrylo1(entrylo);
124 tlbidx = num_wired_entries();
125 write_c0_wired(tlbidx + 1);
126 write_c0_index(tlbidx);
130 write_c0_entryhi(old_ctx);
132 write_c0_memorymapid(old_mmid);
133 local_irq_restore(flags);
135 return (void*) vaddr;
138 void *kmap_coherent(struct page *page, unsigned long addr)
140 return __kmap_pgprot(page, addr, PAGE_KERNEL);
143 void *kmap_noncoherent(struct page *page, unsigned long addr)
145 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
148 void kunmap_coherent(void)
151 unsigned long flags, old_ctx;
153 local_irq_save(flags);
154 old_ctx = read_c0_entryhi();
155 wired = num_wired_entries() - 1;
156 write_c0_wired(wired);
157 write_c0_index(wired);
158 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
159 write_c0_entrylo0(0);
160 write_c0_entrylo1(0);
164 write_c0_entryhi(old_ctx);
165 local_irq_restore(flags);
170 void copy_user_highpage(struct page *to, struct page *from,
171 unsigned long vaddr, struct vm_area_struct *vma)
173 struct folio *src = page_folio(from);
176 vto = kmap_atomic(to);
177 if (cpu_has_dc_aliases &&
178 folio_mapped(src) && !folio_test_dcache_dirty(src)) {
179 vfrom = kmap_coherent(from, vaddr);
180 copy_page(vto, vfrom);
183 vfrom = kmap_atomic(from);
184 copy_page(vto, vfrom);
185 kunmap_atomic(vfrom);
187 if ((!cpu_has_ic_fills_f_dc) ||
188 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
189 flush_data_cache_page((unsigned long)vto);
191 /* Make sure this page is cleared on other CPU's too before using it */
195 void copy_to_user_page(struct vm_area_struct *vma,
196 struct page *page, unsigned long vaddr, void *dst, const void *src,
199 struct folio *folio = page_folio(page);
201 if (cpu_has_dc_aliases &&
202 folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
203 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
204 memcpy(vto, src, len);
207 memcpy(dst, src, len);
208 if (cpu_has_dc_aliases)
209 folio_set_dcache_dirty(folio);
211 if (vma->vm_flags & VM_EXEC)
212 flush_cache_page(vma, vaddr, page_to_pfn(page));
215 void copy_from_user_page(struct vm_area_struct *vma,
216 struct page *page, unsigned long vaddr, void *dst, const void *src,
219 struct folio *folio = page_folio(page);
221 if (cpu_has_dc_aliases &&
222 folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
223 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
224 memcpy(dst, vfrom, len);
227 memcpy(dst, src, len);
228 if (cpu_has_dc_aliases)
229 folio_set_dcache_dirty(folio);
232 EXPORT_SYMBOL_GPL(copy_from_user_page);
234 void __init fixrange_init(unsigned long start, unsigned long end,
237 #ifdef CONFIG_HIGHMEM
246 i = pgd_index(vaddr);
247 j = pud_index(vaddr);
248 k = pmd_index(vaddr);
251 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
253 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
255 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
256 if (pmd_none(*pmd)) {
257 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
260 panic("%s: Failed to allocate %lu bytes align=%lx\n",
264 set_pmd(pmd, __pmd((unsigned long)pte));
265 BUG_ON(pte != pte_offset_kernel(pmd, 0));
276 struct maar_walk_info {
277 struct maar_config cfg[16];
278 unsigned int num_cfg;
281 static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
284 struct maar_walk_info *wi = data;
285 struct maar_config *cfg = &wi->cfg[wi->num_cfg];
286 unsigned int maar_align;
288 /* MAAR registers hold physical addresses right shifted by 4 bits */
289 maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
291 /* Fill in the MAAR config entry */
292 cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
293 cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
294 cfg->attrs = MIPS_MAAR_S;
296 /* Ensure we don't overflow the cfg array */
297 if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
304 unsigned __weak platform_maar_init(unsigned num_pairs)
306 unsigned int num_configured;
307 struct maar_walk_info wi;
310 walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
312 num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
313 if (num_configured < wi.num_cfg)
314 pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
315 num_pairs, wi.num_cfg);
317 return num_configured;
322 unsigned num_maars, used, i;
323 phys_addr_t lower, upper, attr;
325 struct maar_config cfgs[3];
327 } recorded = { { { 0 } }, 0 };
332 /* Detect the number of MAARs */
334 back_to_back_c0_hazard();
335 num_maars = read_c0_maari() + 1;
337 /* MAARs should be in pairs */
338 WARN_ON(num_maars % 2);
340 /* Set MAARs using values we recorded already */
342 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
343 BUG_ON(used != recorded.used);
345 /* Configure the required MAARs */
346 used = platform_maar_init(num_maars / 2);
349 /* Disable any further MAARs */
350 for (i = (used * 2); i < num_maars; i++) {
352 back_to_back_c0_hazard();
354 back_to_back_c0_hazard();
360 pr_info("MAAR configuration:\n");
361 for (i = 0; i < num_maars; i += 2) {
363 back_to_back_c0_hazard();
364 upper = read_c0_maar();
366 upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
369 write_c0_maari(i + 1);
370 back_to_back_c0_hazard();
371 lower = read_c0_maar();
373 lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
376 attr = lower & upper;
377 lower = (lower & MIPS_MAAR_ADDR) << 4;
378 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
380 pr_info(" [%d]: ", i / 2);
381 if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
382 pr_cont("disabled\n");
386 pr_cont("%pa-%pa", &lower, &upper);
388 if (attr & MIPS_MAAR_S)
389 pr_cont(" speculate");
393 /* Record the setup for use on secondary CPUs */
394 if (used <= ARRAY_SIZE(recorded.cfgs)) {
395 recorded.cfgs[recorded.used].lower = lower;
396 recorded.cfgs[recorded.used].upper = upper;
397 recorded.cfgs[recorded.used].attrs = attr;
404 void __init paging_init(void)
406 unsigned long max_zone_pfns[MAX_NR_ZONES];
410 #ifdef CONFIG_ZONE_DMA
411 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
413 #ifdef CONFIG_ZONE_DMA32
414 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
416 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
417 #ifdef CONFIG_HIGHMEM
418 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
420 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
421 printk(KERN_WARNING "This processor doesn't support highmem."
422 " %ldk highmem ignored\n",
423 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
424 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
428 free_area_init(max_zone_pfns);
432 static struct kcore_list kcore_kseg0;
435 static inline void __init mem_init_free_highmem(void)
437 #ifdef CONFIG_HIGHMEM
440 if (cpu_has_dc_aliases)
443 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
444 struct page *page = pfn_to_page(tmp);
446 if (!memblock_is_memory(PFN_PHYS(tmp)))
447 SetPageReserved(page);
449 free_highmem_page(page);
454 void __init mem_init(void)
457 * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
458 * bits to hold a full 32b physical address on MIPS32 systems.
460 BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
462 #ifdef CONFIG_HIGHMEM
463 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
465 max_mapnr = max_low_pfn;
467 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
471 setup_zero_pages(); /* Setup zeroed pages. */
472 mem_init_free_highmem();
475 if ((unsigned long) &_text > (unsigned long) CKSEG0)
476 /* The -4 is a hack so that user tools don't have to handle
478 kclist_add(&kcore_kseg0, (void *) CKSEG0,
479 0x80000000 - 4, KCORE_TEXT);
482 #endif /* !CONFIG_NUMA */
484 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
488 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
489 struct page *page = pfn_to_page(pfn);
490 void *addr = phys_to_virt(PFN_PHYS(pfn));
492 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
493 free_reserved_page(page);
495 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
498 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
500 void __weak __init prom_free_prom_memory(void)
505 void __ref free_initmem(void)
507 prom_free_prom_memory();
509 * Let the platform define a specific function to free the
510 * init section since EVA may have used any possible mapping
511 * between virtual and physical addresses.
513 if (free_init_pages_eva)
514 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
516 free_initmem_default(POISON_FREE_INITMEM);
519 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
520 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
521 EXPORT_SYMBOL(__per_cpu_offset);
523 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
525 return node_distance(cpu_to_node(from), cpu_to_node(to));
528 static int __init pcpu_cpu_to_node(int cpu)
530 return cpu_to_node(cpu);
533 void __init setup_per_cpu_areas(void)
540 * Always reserve area for module percpu variables. That's
541 * what the legacy allocator did.
543 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
544 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
548 panic("Failed to initialize percpu areas.");
550 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
551 for_each_possible_cpu(cpu)
552 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
556 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
557 unsigned long pgd_current[NR_CPUS];
561 * Align swapper_pg_dir in to 64K, allows its address to be loaded
562 * with a single LUI instruction in the TLB handlers. If we used
563 * __aligned(64K), its size would get rounded up to the alignment
564 * size, and waste space. So we place it in its own section and align
565 * it in the linker script.
567 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
568 #ifndef __PAGETABLE_PUD_FOLDED
569 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
571 #ifndef __PAGETABLE_PMD_FOLDED
572 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
573 EXPORT_SYMBOL_GPL(invalid_pmd_table);
575 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
576 EXPORT_SYMBOL(invalid_pte_table);