2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/initrd.h>
19 #include <linux/root_dev.h>
20 #include <linux/highmem.h>
21 #include <linux/console.h>
22 #include <linux/pfn.h>
23 #include <linux/debugfs.h>
24 #include <linux/kexec.h>
25 #include <linux/sizes.h>
26 #include <linux/device.h>
27 #include <linux/dma-contiguous.h>
28 #include <linux/decompress/generic.h>
29 #include <linux/of_fdt.h>
30 #include <linux/of_reserved_mem.h>
31 #include <linux/dmi.h>
33 #include <asm/addrspace.h>
34 #include <asm/bootinfo.h>
36 #include <asm/cache.h>
39 #include <asm/debug.h>
40 #include <asm/dma-coherence.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp-ops.h>
46 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
47 const char __section(.appended_dtb) __appended_dtb[0x100000];
48 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
50 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
52 EXPORT_SYMBOL(cpu_data);
55 struct screen_info screen_info;
61 * These are initialized so they are in the .data section
63 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
65 EXPORT_SYMBOL(mips_machtype);
67 static char __initdata command_line[COMMAND_LINE_SIZE];
68 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
70 #ifdef CONFIG_CMDLINE_BOOL
71 static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
73 static const char builtin_cmdline[] __initconst = "";
77 * mips_io_port_base is the begin of the address space to which x86 style
78 * I/O ports are mapped.
80 unsigned long mips_io_port_base = -1;
81 EXPORT_SYMBOL(mips_io_port_base);
83 static struct resource code_resource = { .name = "Kernel code", };
84 static struct resource data_resource = { .name = "Kernel data", };
85 static struct resource bss_resource = { .name = "Kernel bss", };
87 static void *detect_magic __initdata = detect_memory_region;
89 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
90 unsigned long ARCH_PFN_OFFSET;
91 EXPORT_SYMBOL(ARCH_PFN_OFFSET);
94 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
97 * Note: This function only exists for historical reason,
98 * new code should use memblock_add or memblock_add_node instead.
102 * If the region reaches the top of the physical address space, adjust
103 * the size slightly so that (start + size) doesn't overflow
105 if (start + size - 1 == PHYS_ADDR_MAX)
109 if (start + size < start) {
110 pr_warn("Trying to add an invalid memory region, skipped\n");
114 if (start < PHYS_OFFSET)
117 memblock_add(start, size);
118 /* Reserve any memory except the ordinary RAM ranges. */
123 case BOOT_MEM_NOMAP: /* Discard the range from the system. */
124 memblock_remove(start, size);
127 default: /* Reserve the rest of the memory types at boot time */
128 memblock_reserve(start, size);
133 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
135 void *dm = &detect_magic;
138 for (size = sz_min; size < sz_max; size <<= 1) {
139 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
143 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
144 ((unsigned long long) size) / SZ_1M,
145 (unsigned long long) start,
146 ((unsigned long long) sz_min) / SZ_1M,
147 ((unsigned long long) sz_max) / SZ_1M);
149 add_memory_region(start, size, BOOT_MEM_RAM);
155 #ifdef CONFIG_BLK_DEV_INITRD
157 static int __init rd_start_early(char *p)
159 unsigned long start = memparse(p, &p);
162 /* Guess if the sign extension was forgotten by bootloader */
166 initrd_start = start;
170 early_param("rd_start", rd_start_early);
172 static int __init rd_size_early(char *p)
174 initrd_end += memparse(p, &p);
177 early_param("rd_size", rd_size_early);
179 /* it returns the next free pfn after initrd */
180 static unsigned long __init init_initrd(void)
185 * Board specific code or command line parser should have
186 * already set up initrd_start and initrd_end. In these cases
187 * perfom sanity checks and use them if all looks good.
189 if (!initrd_start || initrd_end <= initrd_start)
192 if (initrd_start & ~PAGE_MASK) {
193 pr_err("initrd start must be page aligned\n");
196 if (initrd_start < PAGE_OFFSET) {
197 pr_err("initrd start < PAGE_OFFSET\n");
202 * Sanitize initrd addresses. For example firmware
203 * can't guess if they need to pass them through
204 * 64-bits values if the kernel has been built in pure
205 * 32-bit. We need also to switch from KSEG0 to XKPHYS
206 * addresses now, so the code can now safely use __pa().
208 end = __pa(initrd_end);
209 initrd_end = (unsigned long)__va(end);
210 initrd_start = (unsigned long)__va(__pa(initrd_start));
212 ROOT_DEV = Root_RAM0;
220 /* In some conditions (e.g. big endian bootloader with a little endian
221 kernel), the initrd might appear byte swapped. Try to detect this and
222 byte swap it if needed. */
223 static void __init maybe_bswap_initrd(void)
225 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
228 /* Check for CPIO signature */
229 if (!memcmp((void *)initrd_start, "070701", 6))
232 /* Check for compressed initrd */
233 if (decompress_method((unsigned char *)initrd_start, 8, NULL))
236 /* Try again with a byte swapped header */
237 buf = swab64p((u64 *)initrd_start);
238 if (!memcmp(&buf, "070701", 6) ||
239 decompress_method((unsigned char *)(&buf), 8, NULL)) {
242 pr_info("Byteswapped initrd detected\n");
243 for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
249 static void __init finalize_initrd(void)
251 unsigned long size = initrd_end - initrd_start;
254 printk(KERN_INFO "Initrd not found or empty");
257 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
258 printk(KERN_ERR "Initrd extends beyond end of memory");
262 maybe_bswap_initrd();
264 memblock_reserve(__pa(initrd_start), size);
265 initrd_below_start_ok = 1;
267 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
271 printk(KERN_CONT " - disabling initrd\n");
276 #else /* !CONFIG_BLK_DEV_INITRD */
278 static unsigned long __init init_initrd(void)
283 #define finalize_initrd() do {} while (0)
288 * Initialize the bootmem allocator. It also setup initrd related data
291 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
293 static void __init bootmem_init(void)
299 #else /* !CONFIG_SGI_IP27 */
301 static void __init bootmem_init(void)
303 struct memblock_region *mem;
304 phys_addr_t ramstart, ramend;
306 ramstart = memblock_start_of_DRAM();
307 ramend = memblock_end_of_DRAM();
310 * Sanity check any INITRD first. We don't take it into account
311 * for bootmem setup initially, rely on the end-of-kernel-code
312 * as our memory range starting point. Once bootmem is inited we
313 * will reserve the area used for the initrd.
317 /* Reserve memory occupied by kernel. */
318 memblock_reserve(__pa_symbol(&_text),
319 __pa_symbol(&_end) - __pa_symbol(&_text));
321 /* max_low_pfn is not a number of pages but the end pfn of low mem */
323 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
324 ARCH_PFN_OFFSET = PFN_UP(ramstart);
327 * Reserve any memory between the start of RAM and PHYS_OFFSET
329 if (ramstart > PHYS_OFFSET)
330 memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
332 if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
333 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
334 (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
335 (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
339 min_low_pfn = ARCH_PFN_OFFSET;
340 max_pfn = PFN_DOWN(ramend);
341 for_each_memblock(memory, mem) {
342 unsigned long start = memblock_region_memory_base_pfn(mem);
343 unsigned long end = memblock_region_memory_end_pfn(mem);
346 * Skip highmem here so we get an accurate max_low_pfn if low
347 * memory stops short of high memory.
348 * If the region overlaps HIGHMEM_START, end is clipped so
349 * max_pfn excludes the highmem portion.
351 if (memblock_is_nomap(mem))
353 if (start >= PFN_DOWN(HIGHMEM_START))
355 if (end > PFN_DOWN(HIGHMEM_START))
356 end = PFN_DOWN(HIGHMEM_START);
357 if (end > max_low_pfn)
361 if (min_low_pfn >= max_low_pfn)
362 panic("Incorrect memory mapping !!!");
364 if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
365 #ifdef CONFIG_HIGHMEM
366 highstart_pfn = PFN_DOWN(HIGHMEM_START);
367 highend_pfn = max_pfn;
369 max_low_pfn = PFN_DOWN(HIGHMEM_START);
370 max_pfn = max_low_pfn;
375 * Reserve initrd memory if needed.
380 #endif /* CONFIG_SGI_IP27 */
382 static int usermem __initdata;
384 static int __init early_parse_mem(char *p)
386 phys_addr_t start, size;
389 * If a user specifies memory size, we
390 * blow away any automatically generated
395 memblock_remove(memblock_start_of_DRAM(),
396 memblock_end_of_DRAM() - memblock_start_of_DRAM());
399 size = memparse(p, &p);
401 start = memparse(p + 1, &p);
403 add_memory_region(start, size, BOOT_MEM_RAM);
407 early_param("mem", early_parse_mem);
409 static int __init early_parse_memmap(char *p)
412 u64 start_at, mem_size;
417 if (!strncmp(p, "exactmap", 8)) {
418 pr_err("\"memmap=exactmap\" invalid on MIPS\n");
423 mem_size = memparse(p, &p);
428 start_at = memparse(p+1, &p);
429 add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
430 } else if (*p == '#') {
431 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
433 } else if (*p == '$') {
434 start_at = memparse(p+1, &p);
435 add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
437 pr_err("\"memmap\" invalid format!\n");
447 early_param("memmap", early_parse_memmap);
449 #ifdef CONFIG_PROC_VMCORE
450 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
451 static int __init early_parse_elfcorehdr(char *p)
453 struct memblock_region *mem;
455 setup_elfcorehdr = memparse(p, &p);
457 for_each_memblock(memory, mem) {
458 unsigned long start = mem->base;
459 unsigned long end = start + mem->size;
460 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
462 * Reserve from the elf core header to the end of
463 * the memory segment, that should all be kdump
466 setup_elfcorehdr_size = end - setup_elfcorehdr;
471 * If we don't find it in the memory map, then we shouldn't
472 * have to worry about it, as the new kernel won't use it.
476 early_param("elfcorehdr", early_parse_elfcorehdr);
480 static void __init mips_parse_crashkernel(void)
482 unsigned long long total_mem;
483 unsigned long long crash_size, crash_base;
486 total_mem = memblock_phys_mem_size();
487 ret = parse_crashkernel(boot_command_line, total_mem,
488 &crash_size, &crash_base);
489 if (ret != 0 || crash_size <= 0)
492 if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) {
493 pr_warn("Invalid memory region reserved for crash kernel\n");
497 crashk_res.start = crash_base;
498 crashk_res.end = crash_base + crash_size - 1;
501 static void __init request_crashkernel(struct resource *res)
505 if (crashk_res.start == crashk_res.end)
508 ret = request_resource(res, &crashk_res);
510 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
511 (unsigned long)(resource_size(&crashk_res) >> 20),
512 (unsigned long)(crashk_res.start >> 20));
514 #else /* !defined(CONFIG_KEXEC) */
515 static void __init mips_parse_crashkernel(void)
519 static void __init request_crashkernel(struct resource *res)
522 #endif /* !defined(CONFIG_KEXEC) */
524 static void __init check_kernel_sections_mem(void)
526 phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text)));
527 phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start;
529 if (!memblock_is_region_memory(start, size)) {
530 pr_info("Kernel sections are not in the memory maps\n");
531 memblock_add(start, size);
535 static void __init bootcmdline_append(const char *s, size_t max)
540 if (boot_command_line[0])
541 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
543 strlcat(boot_command_line, s, max);
546 #ifdef CONFIG_OF_EARLY_FLATTREE
548 static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
549 int depth, void *data)
551 bool *dt_bootargs = data;
555 if (depth != 1 || !data ||
556 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
559 p = of_get_flat_dt_prop(node, "bootargs", &l);
560 if (p != NULL && l > 0) {
561 bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
568 #endif /* CONFIG_OF_EARLY_FLATTREE */
570 static void __init bootcmdline_init(void)
572 bool dt_bootargs = false;
575 * If CMDLINE_OVERRIDE is enabled then initializing the command line is
576 * trivial - we simply use the built-in command line unconditionally &
579 if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
580 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
585 * If the user specified a built-in command line &
586 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
587 * prepended to arguments from the bootloader or DT so we'll copy them
588 * to the start of boot_command_line here. Otherwise, empty
589 * boot_command_line to undo anything early_init_dt_scan_chosen() did.
591 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
592 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
594 boot_command_line[0] = 0;
596 #ifdef CONFIG_OF_EARLY_FLATTREE
598 * If we're configured to take boot arguments from DT, look for those
601 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
602 IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
603 of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
607 * If we didn't get any arguments from DT (regardless of whether that's
608 * because we weren't configured to look for them, or because we looked
609 * & found none) then we'll take arguments from the bootloader.
610 * plat_mem_setup() should have filled arcs_cmdline with arguments from
613 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
614 bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
617 * If the user specified a built-in command line & we didn't already
618 * prepend it, we append it to boot_command_line here.
620 if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
621 !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
622 bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
626 * arch_mem_init - initialize memory management subsystem
628 * o plat_mem_setup() detects the memory configuration and will record detected
629 * memory areas using add_memory_region.
631 * At this stage the memory configuration of the system is known to the
632 * kernel but generic memory management system is still entirely uninitialized.
637 * o dma_contiguous_reserve()
639 * At this stage the bootmem allocator is ready to use.
641 * NOTE: historically plat_mem_setup did the entire platform initialization.
642 * This was rather impractical because it meant plat_mem_setup had to
643 * get away without any kind of memory allocator. To keep old code from
644 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
645 * initialization hook for anything else was introduced.
647 static void __init arch_mem_init(char **cmdline_p)
649 /* call board setup routine */
651 memblock_set_bottom_up(true);
654 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
655 *cmdline_p = command_line;
660 pr_info("User-defined physical RAM map overwrite\n");
662 check_kernel_sections_mem();
664 early_init_fdt_reserve_self();
665 early_init_fdt_scan_reserved_mem();
668 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
673 * Prevent memblock from allocating high memory.
674 * This cannot be done before max_low_pfn is detected, so up
675 * to this point is possible to only reserve physical memory
676 * with memblock_reserve; memblock_alloc* can be used
677 * only after this point
679 memblock_set_current_limit(PFN_PHYS(max_low_pfn));
681 #ifdef CONFIG_PROC_VMCORE
682 if (setup_elfcorehdr && setup_elfcorehdr_size) {
683 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
684 setup_elfcorehdr, setup_elfcorehdr_size);
685 memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
689 mips_parse_crashkernel();
691 if (crashk_res.start != crashk_res.end)
692 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
697 * In order to reduce the possibility of kernel panic when failed to
698 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
699 * low memory as small as possible before plat_swiotlb_setup(), so
700 * make sparse_init() using top-down allocation.
702 memblock_set_bottom_up(false);
704 memblock_set_bottom_up(true);
706 plat_swiotlb_setup();
708 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
710 /* Reserve for hibernation. */
711 memblock_reserve(__pa_symbol(&__nosave_begin),
712 __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
714 fdt_init_reserved_mem();
718 early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
721 static void __init resource_init(void)
723 struct memblock_region *region;
725 if (UNCAC_BASE != IO_BASE)
728 code_resource.start = __pa_symbol(&_text);
729 code_resource.end = __pa_symbol(&_etext) - 1;
730 data_resource.start = __pa_symbol(&_etext);
731 data_resource.end = __pa_symbol(&_edata) - 1;
732 bss_resource.start = __pa_symbol(&__bss_start);
733 bss_resource.end = __pa_symbol(&__bss_stop) - 1;
735 for_each_memblock(memory, region) {
736 phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));
737 phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
738 struct resource *res;
740 res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
742 panic("%s: Failed to allocate %zu bytes\n", __func__,
743 sizeof(struct resource));
747 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
748 res->name = "System RAM";
750 request_resource(&iomem_resource, res);
753 * We don't know which RAM region contains kernel data,
754 * so we try it repeatedly and let the resource manager
757 request_resource(res, &code_resource);
758 request_resource(res, &data_resource);
759 request_resource(res, &bss_resource);
760 request_crashkernel(res);
765 static void __init prefill_possible_map(void)
767 int i, possible = num_possible_cpus();
769 if (possible > nr_cpu_ids)
770 possible = nr_cpu_ids;
772 for (i = 0; i < possible; i++)
773 set_cpu_possible(i, true);
774 for (; i < NR_CPUS; i++)
775 set_cpu_possible(i, false);
777 nr_cpu_ids = possible;
780 static inline void prefill_possible_map(void) {}
783 void __init setup_arch(char **cmdline_p)
789 setup_early_fdc_console();
790 #ifdef CONFIG_EARLY_PRINTK
791 setup_early_printk();
796 #if defined(CONFIG_VT)
797 #if defined(CONFIG_VGA_CONSOLE)
798 conswitchp = &vga_con;
802 arch_mem_init(cmdline_p);
807 prefill_possible_map();
813 unsigned long kernelsp[NR_CPUS];
814 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
817 unsigned long fw_passed_dtb;
820 #ifdef CONFIG_DEBUG_FS
821 struct dentry *mips_debugfs_dir;
822 static int __init debugfs_mips(void)
824 mips_debugfs_dir = debugfs_create_dir("mips", NULL);
827 arch_initcall(debugfs_mips);
830 #ifdef CONFIG_DMA_MAYBE_COHERENT
831 /* User defined DMA coherency from command line. */
832 enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
833 EXPORT_SYMBOL_GPL(coherentio);
834 int hw_coherentio; /* Actual hardware supported DMA coherency setting. */
836 static int __init setcoherentio(char *str)
838 coherentio = IO_COHERENCE_ENABLED;
839 pr_info("Hardware DMA cache coherency (command line)\n");
842 early_param("coherentio", setcoherentio);
844 static int __init setnocoherentio(char *str)
846 coherentio = IO_COHERENCE_DISABLED;
847 pr_info("Software DMA cache coherency (command line)\n");
850 early_param("nocoherentio", setnocoherentio);