1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Common boot and setup code for both 32-bit and 64-bit.
4 * Extracted from arch/powerpc/kernel/setup_64.c.
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
11 #include <linux/export.h>
12 #include <linux/panic_notifier.h>
13 #include <linux/string.h>
14 #include <linux/sched.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/reboot.h>
18 #include <linux/delay.h>
19 #include <linux/initrd.h>
20 #include <linux/platform_device.h>
21 #include <linux/seq_file.h>
22 #include <linux/ioport.h>
23 #include <linux/console.h>
24 #include <linux/screen_info.h>
25 #include <linux/root_dev.h>
26 #include <linux/notifier.h>
27 #include <linux/cpu.h>
28 #include <linux/unistd.h>
29 #include <linux/serial.h>
30 #include <linux/serial_8250.h>
31 #include <linux/percpu.h>
32 #include <linux/memblock.h>
33 #include <linux/of_platform.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pgtable.h>
39 #include <asm/processor.h>
40 #include <asm/vdso_datapage.h>
43 #include <asm/machdep.h>
45 #include <asm/cputable.h>
46 #include <asm/sections.h>
47 #include <asm/firmware.h>
48 #include <asm/btext.h>
49 #include <asm/nvram.h>
50 #include <asm/setup.h>
52 #include <asm/iommu.h>
53 #include <asm/serial.h>
54 #include <asm/cache.h>
58 #include <asm/cputhreads.h>
59 #include <mm/mmu_decl.h>
60 #include <asm/fadump.h>
62 #include <asm/hugetlb.h>
63 #include <asm/livepatch.h>
64 #include <asm/mmu_context.h>
65 #include <asm/cpu_has_feature.h>
66 #include <asm/kasan.h>
72 #define DBG(fmt...) udbg_printf(fmt)
77 /* The main machine-dep calls structure
79 struct machdep_calls ppc_md;
80 EXPORT_SYMBOL(ppc_md);
81 struct machdep_calls *machine_id;
82 EXPORT_SYMBOL(machine_id);
85 EXPORT_SYMBOL_GPL(boot_cpuid);
88 * These are used in binfmt_elf.c to put aux entries on the stack
89 * for each elf executable being started.
95 * This still seems to be needed... -- paulus
97 struct screen_info screen_info = {
100 .orig_video_cols = 80,
101 .orig_video_lines = 25,
102 .orig_video_isVGA = 1,
103 .orig_video_points = 16
105 #if defined(CONFIG_FB_VGA16_MODULE)
106 EXPORT_SYMBOL(screen_info);
109 /* Variables required to store legacy IO irq routing */
110 int of_i8042_kbd_irq;
111 EXPORT_SYMBOL_GPL(of_i8042_kbd_irq);
112 int of_i8042_aux_irq;
113 EXPORT_SYMBOL_GPL(of_i8042_aux_irq);
115 #ifdef __DO_IRQ_CANON
116 /* XXX should go elsewhere eventually */
117 int ppc_do_canonicalize_irqs;
118 EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
121 #ifdef CONFIG_CRASH_CORE
122 /* This keeps a track of which one is the crashing cpu. */
123 int crashing_cpu = -1;
126 /* also used by kexec */
127 void machine_shutdown(void)
130 * if fadump is active, cleanup the fadump registration before we
135 if (ppc_md.machine_shutdown)
136 ppc_md.machine_shutdown();
139 static void machine_hang(void)
141 pr_emerg("System Halted, OK to turn off power\n");
147 void machine_restart(char *cmd)
155 do_kernel_restart(cmd);
161 void machine_power_off(void)
170 /* Used by the G5 thermal driver */
171 EXPORT_SYMBOL_GPL(machine_power_off);
173 void (*pm_power_off)(void);
174 EXPORT_SYMBOL_GPL(pm_power_off);
176 void machine_halt(void)
187 DEFINE_PER_CPU(unsigned int, cpu_pvr);
190 static void show_cpuinfo_summary(struct seq_file *m)
192 struct device_node *root;
193 const char *model = NULL;
194 unsigned long bogosum = 0;
197 if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_PPC32)) {
198 for_each_online_cpu(i)
199 bogosum += loops_per_jiffy;
200 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
201 bogosum / (500000 / HZ), bogosum / (5000 / HZ) % 100);
203 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
205 seq_printf(m, "platform\t: %s\n", ppc_md.name);
206 root = of_find_node_by_path("/");
208 model = of_get_property(root, "model", NULL);
210 seq_printf(m, "model\t\t: %s\n", model);
213 if (ppc_md.show_cpuinfo != NULL)
214 ppc_md.show_cpuinfo(m);
216 /* Display the amount of memory */
217 if (IS_ENABLED(CONFIG_PPC32))
218 seq_printf(m, "Memory\t\t: %d MB\n",
219 (unsigned int)(total_memory / (1024 * 1024)));
222 static int show_cpuinfo(struct seq_file *m, void *v)
224 unsigned long cpu_id = (unsigned long)v - 1;
226 unsigned long proc_freq;
231 pvr = per_cpu(cpu_pvr, cpu_id);
233 pvr = mfspr(SPRN_PVR);
235 maj = (pvr >> 8) & 0xFF;
238 seq_printf(m, "processor\t: %lu\ncpu\t\t: ", cpu_id);
240 if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
241 seq_puts(m, cur_cpu_spec->cpu_name);
243 seq_printf(m, "unknown (%08x)", pvr);
245 if (cpu_has_feature(CPU_FTR_ALTIVEC))
246 seq_puts(m, ", altivec supported");
251 if (cpu_has_feature(CPU_FTR_TAU)) {
252 if (IS_ENABLED(CONFIG_TAU_AVERAGE)) {
253 /* more straightforward, but potentially misleading */
254 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
257 /* show the actual temp sensor range */
259 temp = cpu_temp_both(cpu_id);
260 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
261 temp & 0xff, temp >> 16);
264 #endif /* CONFIG_TAU */
267 * Platforms that have variable clock rates, should implement
268 * the method ppc_md.get_proc_freq() that reports the clock
269 * rate of a given cpu. The rest can use ppc_proc_freq to
270 * report the clock rate that is same across all cpus.
272 if (ppc_md.get_proc_freq)
273 proc_freq = ppc_md.get_proc_freq(cpu_id);
275 proc_freq = ppc_proc_freq;
278 seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
279 proc_freq / 1000000, proc_freq % 1000000);
281 if (ppc_md.show_percpuinfo != NULL)
282 ppc_md.show_percpuinfo(m, cpu_id);
284 /* If we are a Freescale core do a simple check so
285 * we dont have to keep adding cases in the future */
286 if (PVR_VER(pvr) & 0x8000) {
287 switch (PVR_VER(pvr)) {
288 case 0x8000: /* 7441/7450/7451, Voyager */
289 case 0x8001: /* 7445/7455, Apollo 6 */
290 case 0x8002: /* 7447/7457, Apollo 7 */
291 case 0x8003: /* 7447A, Apollo 7 PM */
292 case 0x8004: /* 7448, Apollo 8 */
293 case 0x800c: /* 7410, Nitro */
294 maj = ((pvr >> 8) & 0xF);
297 default: /* e500/book-e */
303 switch (PVR_VER(pvr)) {
304 case 0x1008: /* 740P/750P ?? */
305 maj = ((pvr >> 8) & 0xFF) - 1;
308 case 0x004e: /* POWER9 bits 12-15 give chip type */
309 case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
310 maj = (pvr >> 8) & 0x0F;
314 maj = (pvr >> 8) & 0xFF;
320 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
321 maj, min, PVR_VER(pvr), PVR_REV(pvr));
323 if (IS_ENABLED(CONFIG_PPC32))
324 seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
325 (loops_per_jiffy / (5000 / HZ)) % 100);
329 /* If this is the last cpu, print the summary */
330 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
331 show_cpuinfo_summary(m);
336 static void *c_start(struct seq_file *m, loff_t *pos)
338 if (*pos == 0) /* just in case, cpu 0 is not the first */
339 *pos = cpumask_first(cpu_online_mask);
341 *pos = cpumask_next(*pos - 1, cpu_online_mask);
342 if ((*pos) < nr_cpu_ids)
343 return (void *)(unsigned long)(*pos + 1);
347 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
350 return c_start(m, pos);
353 static void c_stop(struct seq_file *m, void *v)
357 const struct seq_operations cpuinfo_op = {
361 .show = show_cpuinfo,
364 void __init check_for_initrd(void)
366 #ifdef CONFIG_BLK_DEV_INITRD
367 DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n",
368 initrd_start, initrd_end);
370 /* If we were passed an initrd, set the ROOT_DEV properly if the values
371 * look sensible. If not, clear initrd reference.
373 if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
374 initrd_end > initrd_start)
375 ROOT_DEV = Root_RAM0;
377 initrd_start = initrd_end = 0;
380 pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
382 DBG(" <- check_for_initrd()\n");
383 #endif /* CONFIG_BLK_DEV_INITRD */
388 int threads_per_core, threads_per_subcore, threads_shift __read_mostly;
389 cpumask_t threads_core_mask __read_mostly;
390 EXPORT_SYMBOL_GPL(threads_per_core);
391 EXPORT_SYMBOL_GPL(threads_per_subcore);
392 EXPORT_SYMBOL_GPL(threads_shift);
393 EXPORT_SYMBOL_GPL(threads_core_mask);
395 static void __init cpu_init_thread_core_maps(int tpc)
399 threads_per_core = tpc;
400 threads_per_subcore = tpc;
401 cpumask_clear(&threads_core_mask);
403 /* This implementation only supports power of 2 number of threads
404 * for simplicity and performance
406 threads_shift = ilog2(tpc);
407 BUG_ON(tpc != (1 << threads_shift));
409 for (i = 0; i < tpc; i++)
410 cpumask_set_cpu(i, &threads_core_mask);
412 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
413 tpc, tpc > 1 ? "s" : "");
414 printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
418 u32 *cpu_to_phys_id = NULL;
421 * setup_cpu_maps - initialize the following cpu maps:
425 * Having the possible map set up early allows us to restrict allocations
426 * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
428 * We do not initialize the online map here; cpus set their own bits in
429 * cpu_online_mask as they come up.
431 * This function is valid only for Open Firmware systems. finish_device_tree
432 * must be called before using this.
434 * While we're here, we may as well set the "physical" cpu ids in the paca.
436 * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
438 void __init smp_setup_cpu_maps(void)
440 struct device_node *dn;
444 DBG("smp_setup_cpu_maps()\n");
446 cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
449 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
450 __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
452 for_each_node_by_type(dn, "cpu") {
453 const __be32 *intserv;
457 DBG(" * %pOF...\n", dn);
459 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
462 DBG(" ibm,ppc-interrupt-server#s -> %d threads\n",
465 DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
466 intserv = of_get_property(dn, "reg", &len);
468 cpu_be = cpu_to_be32(cpu);
469 /* XXX: what is this? uninitialized?? */
470 intserv = &cpu_be; /* assume logical == phys */
475 nthreads = len / sizeof(int);
477 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
480 DBG(" thread %d -> cpu %d (hard id %d)\n",
481 j, cpu, be32_to_cpu(intserv[j]));
483 avail = of_device_is_available(dn);
485 avail = !of_property_match_string(dn,
486 "enable-method", "spin-table");
488 set_cpu_present(cpu, avail);
489 set_cpu_possible(cpu, true);
490 cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
494 if (cpu >= nr_cpu_ids) {
500 /* If no SMT supported, nthreads is forced to 1 */
501 if (!cpu_has_feature(CPU_FTR_SMT)) {
502 DBG(" SMT disabled ! nthreads forced to 1\n");
508 * On pSeries LPAR, we need to know how many cpus
509 * could possibly be added to this partition.
511 if (firmware_has_feature(FW_FEATURE_LPAR) &&
512 (dn = of_find_node_by_path("/rtas"))) {
513 int num_addr_cell, num_size_cell, maxcpus;
516 num_addr_cell = of_n_addr_cells(dn);
517 num_size_cell = of_n_size_cells(dn);
519 ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
524 maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
526 /* Double maxcpus for processors which have SMT capability */
527 if (cpu_has_feature(CPU_FTR_SMT))
530 if (maxcpus > nr_cpu_ids) {
532 "Partition configured for %d cpus, "
533 "operating system maximum is %u.\n",
534 maxcpus, nr_cpu_ids);
535 maxcpus = nr_cpu_ids;
537 printk(KERN_INFO "Partition configured for %d cpus.\n",
540 for (cpu = 0; cpu < maxcpus; cpu++)
541 set_cpu_possible(cpu, true);
545 vdso_data->processorCount = num_present_cpus();
546 #endif /* CONFIG_PPC64 */
548 /* Initialize CPU <=> thread mapping/
550 * WARNING: We assume that the number of threads is the same for
551 * every CPU in the system. If that is not the case, then some code
552 * here will have to be reworked
554 cpu_init_thread_core_maps(nthreads);
556 /* Now that possible cpus are set, set nr_cpu_ids for later use */
561 #endif /* CONFIG_SMP */
563 #ifdef CONFIG_PCSPKR_PLATFORM
564 static __init int add_pcspkr(void)
566 struct device_node *np;
567 struct platform_device *pd;
570 np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
575 pd = platform_device_alloc("pcspkr", -1);
579 ret = platform_device_add(pd);
581 platform_device_put(pd);
585 device_initcall(add_pcspkr);
586 #endif /* CONFIG_PCSPKR_PLATFORM */
588 void probe_machine(void)
590 extern struct machdep_calls __machine_desc_start;
591 extern struct machdep_calls __machine_desc_end;
595 * Iterate all ppc_md structures until we find the proper
596 * one for the current machine type
598 DBG("Probing machine type ...\n");
601 * Check ppc_md is empty, if not we have a bug, ie, we setup an
602 * entry before probe_machine() which will be overwritten
604 for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) {
605 if (((void **)&ppc_md)[i]) {
606 printk(KERN_ERR "Entry %d in ppc_md non empty before"
607 " machine probe !\n", i);
611 for (machine_id = &__machine_desc_start;
612 machine_id < &__machine_desc_end;
614 DBG(" %s ...", machine_id->name);
615 memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
616 if (ppc_md.probe()) {
622 /* What can we do if we didn't find ? */
623 if (machine_id >= &__machine_desc_end) {
624 pr_err("No suitable machine description found !\n");
628 printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
631 /* Match a class of boards, not a specific device configuration. */
632 int check_legacy_ioport(unsigned long base_port)
634 struct device_node *parent, *np = NULL;
639 if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
640 np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
642 parent = of_get_parent(np);
644 of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0);
645 if (!of_i8042_kbd_irq)
646 of_i8042_kbd_irq = 1;
648 of_i8042_aux_irq = irq_of_parse_and_map(parent, 1);
649 if (!of_i8042_aux_irq)
650 of_i8042_aux_irq = 12;
656 np = of_find_node_by_type(NULL, "8042");
657 /* Pegasos has no device_type on its 8042 node, look for the
660 np = of_find_node_by_name(NULL, "8042");
662 of_i8042_kbd_irq = 1;
663 of_i8042_aux_irq = 12;
666 case FDC_BASE: /* FDC1 */
667 np = of_find_node_by_type(NULL, "fdc");
670 /* ipmi is supposed to fail here */
675 parent = of_get_parent(np);
677 if (of_node_is_type(parent, "isa"))
684 EXPORT_SYMBOL(check_legacy_ioport);
686 static int ppc_panic_event(struct notifier_block *this,
687 unsigned long event, void *ptr)
690 * panic does a local_irq_disable, but we really
691 * want interrupts to be hard disabled.
696 * If firmware-assisted dump has been registered then trigger
697 * firmware-assisted dump and let firmware handle everything else.
699 crash_fadump(NULL, ptr);
701 ppc_md.panic(ptr); /* May not return */
705 static struct notifier_block ppc_panic_block = {
706 .notifier_call = ppc_panic_event,
707 .priority = INT_MIN /* may not return; must be done last */
711 * Dump out kernel offset information on panic.
713 static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
716 pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
717 kaslr_offset(), KERNELBASE);
722 static struct notifier_block kernel_offset_notifier = {
723 .notifier_call = dump_kernel_offset
726 void __init setup_panic(void)
728 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
729 atomic_notifier_chain_register(&panic_notifier_list,
730 &kernel_offset_notifier);
732 /* PPC64 always does a hard irq disable in its panic handler */
733 if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
735 atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
738 #ifdef CONFIG_CHECK_CACHE_COHERENCY
740 * For platforms that have configurable cache-coherency. This function
741 * checks that the cache coherency setting of the kernel matches the setting
742 * left by the firmware, as indicated in the device tree. Since a mismatch
743 * will eventually result in DMA failures, we print * and error and call
744 * BUG() in that case.
747 #define KERNEL_COHERENCY (!IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
749 static int __init check_cache_coherency(void)
751 struct device_node *np;
753 bool devtree_coherency;
755 np = of_find_node_by_path("/");
756 prop = of_get_property(np, "coherency-off", NULL);
759 devtree_coherency = prop ? false : true;
761 if (devtree_coherency != KERNEL_COHERENCY) {
763 "kernel coherency:%s != device tree_coherency:%s\n",
764 KERNEL_COHERENCY ? "on" : "off",
765 devtree_coherency ? "on" : "off");
772 late_initcall(check_cache_coherency);
773 #endif /* CONFIG_CHECK_CACHE_COHERENCY */
775 void ppc_printk_progress(char *s, unsigned short hex)
780 static __init void print_system_info(void)
782 pr_info("-----------------------------------------------------\n");
783 pr_info("phys_mem_size = 0x%llx\n",
784 (unsigned long long)memblock_phys_mem_size());
786 pr_info("dcache_bsize = 0x%x\n", dcache_bsize);
787 pr_info("icache_bsize = 0x%x\n", icache_bsize);
789 pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
790 pr_info(" possible = 0x%016lx\n",
791 (unsigned long)CPU_FTRS_POSSIBLE);
792 pr_info(" always = 0x%016lx\n",
793 (unsigned long)CPU_FTRS_ALWAYS);
794 pr_info("cpu_user_features = 0x%08x 0x%08x\n",
795 cur_cpu_spec->cpu_user_features,
796 cur_cpu_spec->cpu_user_features2);
797 pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
799 pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
800 #ifdef CONFIG_PPC_BOOK3S
801 pr_info("vmalloc start = 0x%lx\n", KERN_VIRT_START);
802 pr_info("IO start = 0x%lx\n", KERN_IO_START);
803 pr_info("vmemmap start = 0x%lx\n", (unsigned long)vmemmap);
807 if (!early_radix_enabled())
808 print_system_hash_info();
810 if (PHYSICAL_START > 0)
811 pr_info("physical_start = 0x%llx\n",
812 (unsigned long long)PHYSICAL_START);
813 pr_info("-----------------------------------------------------\n");
817 static void __init smp_setup_pacas(void)
821 for_each_possible_cpu(cpu) {
822 if (cpu == smp_processor_id())
825 set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
828 memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32));
829 cpu_to_phys_id = NULL;
834 * Called into from start_kernel this initializes memblock, which is used
835 * to manage page allocation until mem_init is called.
837 void __init setup_arch(char **cmdline_p)
841 *cmdline_p = boot_command_line;
843 /* Set a half-reasonable default so udelay does something sensible */
844 loops_per_jiffy = 500000000 / HZ;
846 /* Unflatten the device-tree passed by prom_init or kexec */
847 unflatten_device_tree();
850 * Initialize cache line/block info from device-tree (on ppc64) or
851 * just cputable (on ppc32).
853 initialize_cache_info();
855 /* Initialize RTAS if available. */
858 /* Check if we have an initrd provided via the device-tree. */
861 /* Probe the machine type, establish ppc_md. */
864 /* Setup panic notifier if requested by the platform. */
868 * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
869 * it from their respective probe() function.
873 /* Discover standard serial ports. */
874 find_legacy_serial_ports();
876 /* Register early console with the printk subsystem. */
877 register_early_udbg_console();
879 /* Setup the various CPU maps based on the device-tree. */
880 smp_setup_cpu_maps();
882 /* Initialize xmon. */
885 /* Check the SMT related command line arguments (ppc64). */
888 /* Parse memory topology */
889 mem_topology_setup();
892 * Release secondary cpus out of their spinloops at 0x60 now that
893 * we can map physical -> logical CPU ids.
895 * Freescale Book3e parts spin in a loop provided by firmware,
896 * so smp_release_cpus() does nothing for them.
901 /* On BookE, setup per-core TLB data structures. */
902 setup_tlb_core_data();
905 /* Print various info about the machine that has been gathered so far. */
908 /* Reserve large chunks of memory for use by CMA for KVM. */
911 /* Reserve large chunks of memory for us by CMA for hugetlb */
912 gigantic_hugetlb_cma_reserve();
914 klp_init_thread_info(&init_task);
916 setup_initial_init_mm(_stext, _etext, _edata, _end);
918 mm_iommu_init(&init_mm);
919 irqstack_early_init();
920 exc_lvl_early_init();
921 emergency_stack_init();
928 early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
930 if (ppc_md.setup_arch)
933 setup_barrier_nospec();
938 /* Initialize the MMU context management stuff. */
941 /* Interrupt code needs to be 64K-aligned. */
942 if (IS_ENABLED(CONFIG_PPC64) && (unsigned long)_stext & 0xffff)
943 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
944 (unsigned long)_stext);