1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
4 #include <asm/boot_data.h>
5 #include <asm/sections.h>
6 #include <asm/cpu_mf.h>
13 #include "compressed/decompressor.h"
17 unsigned long __bootdata_preserved(__kaslr_offset);
18 unsigned long __bootdata_preserved(VMALLOC_START);
19 unsigned long __bootdata_preserved(VMALLOC_END);
20 struct page *__bootdata_preserved(vmemmap);
21 unsigned long __bootdata_preserved(vmemmap_size);
22 unsigned long __bootdata_preserved(MODULES_VADDR);
23 unsigned long __bootdata_preserved(MODULES_END);
24 unsigned long __bootdata(ident_map_size);
25 int __bootdata(is_full_image) = 1;
26 struct initrd_data __bootdata(initrd_data);
28 u64 __bootdata_preserved(stfle_fac_list[16]);
29 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
30 struct oldmem_data __bootdata_preserved(oldmem_data);
34 sclp_early_printk("\n\n");
36 sclp_early_printk("\n\n -- System halted");
41 static void setup_lpp(void)
43 S390_lowcore.current_pid = 0;
44 S390_lowcore.lpp = LPP_MAGIC;
45 if (test_facility(40))
46 lpp(&S390_lowcore.lpp);
49 #ifdef CONFIG_KERNEL_UNCOMPRESSED
50 unsigned long mem_safe_offset(void)
52 return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
56 static void rescue_initrd(unsigned long addr)
58 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
60 if (!initrd_data.start || !initrd_data.size)
62 if (addr <= initrd_data.start)
64 memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
65 initrd_data.start = addr;
68 static void copy_bootdata(void)
70 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
71 error(".boot.data section size mismatch");
72 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
73 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
74 error(".boot.preserved.data section size mismatch");
75 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
78 static void handle_relocs(unsigned long offset)
80 Elf64_Rela *rela_start, *rela_end, *rela;
81 int r_type, r_sym, rc;
85 rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
86 rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
87 dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
88 for (rela = rela_start; rela < rela_end; rela++) {
89 loc = rela->r_offset + offset;
91 r_sym = ELF64_R_SYM(rela->r_info);
93 if (dynsym[r_sym].st_shndx != SHN_UNDEF)
94 val += dynsym[r_sym].st_value + offset;
97 * 0 == undefined symbol table index (STN_UNDEF),
98 * used for R_390_RELATIVE, only add KASLR offset
102 r_type = ELF64_R_TYPE(rela->r_info);
103 rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
105 error("Unknown relocation type");
110 * Merge information from several sources into a single ident_map_size value.
111 * "ident_map_size" represents the upper limit of physical memory we may ever
112 * reach. It might not be all online memory, but also include standby (offline)
113 * memory. "ident_map_size" could be lower then actual standby or even online
114 * memory present, due to limiting factors. We should never go above this limit.
115 * It is the size of our identity mapping.
117 * Consider the following factors:
118 * 1. max_physmem_end - end of physical memory online or standby.
119 * Always <= end of the last online memory block (get_mem_detect_end()).
120 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
121 * kernel is able to support.
122 * 3. "mem=" kernel command line option which limits physical memory usage.
123 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
125 * 5. "hsa" size which is a memory limit when the kernel is executed during
128 static void setup_ident_map_size(unsigned long max_physmem_end)
130 unsigned long hsa_size;
132 ident_map_size = max_physmem_end;
134 ident_map_size = min(ident_map_size, memory_limit);
135 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
137 #ifdef CONFIG_CRASH_DUMP
138 if (oldmem_data.start) {
140 ident_map_size = min(ident_map_size, oldmem_data.size);
141 } else if (ipl_block_valid && is_ipl_block_dump()) {
143 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
144 ident_map_size = min(ident_map_size, hsa_size);
149 static void setup_kernel_memory_layout(void)
151 bool vmalloc_size_verified = false;
152 unsigned long vmemmap_off;
153 unsigned long vspace_left;
154 unsigned long rte_size;
158 pages = ident_map_size / PAGE_SIZE;
159 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
160 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
162 /* choose kernel address space layout: 4 or 3 levels. */
163 vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
164 if (IS_ENABLED(CONFIG_KASAN) ||
165 vmalloc_size > _REGION2_SIZE ||
166 vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
167 vmax = _REGION1_SIZE;
169 vmax = _REGION2_SIZE;
171 /* keep vmemmap_off aligned to a top level region table entry */
172 rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
174 if (is_prot_virt_host()) {
176 * forcing modules and vmalloc area under the ultravisor
177 * secure storage limit, so that any vmalloc allocation
178 * we do could be used to back secure guest storage.
180 adjust_to_uv_max(&MODULES_END);
184 if (MODULES_END < vmax) {
185 /* force vmalloc and modules below kasan shadow */
186 MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
189 * leave vmalloc and modules above kasan shadow but make
190 * sure they don't overlap with it
192 vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
193 vmalloc_size_verified = true;
194 vspace_left = KASAN_SHADOW_START;
197 MODULES_VADDR = MODULES_END - MODULES_LEN;
198 VMALLOC_END = MODULES_VADDR;
200 if (vmalloc_size_verified) {
201 VMALLOC_START = VMALLOC_END - vmalloc_size;
203 vmemmap_off = round_up(ident_map_size, rte_size);
205 if (vmemmap_off + vmemmap_size > VMALLOC_END ||
206 vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
208 * allow vmalloc area to occupy up to 1/2 of
209 * the rest virtual space left.
211 vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
213 VMALLOC_START = VMALLOC_END - vmalloc_size;
214 vspace_left = VMALLOC_START;
217 pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
218 pages = SECTION_ALIGN_UP(pages);
219 vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
220 /* keep vmemmap left most starting from a fresh region table entry */
221 vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
222 /* take care that identity map is lower then vmemmap */
223 ident_map_size = min(ident_map_size, vmemmap_off);
224 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
225 VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
226 vmemmap = (struct page *)vmemmap_off;
230 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
232 static void clear_bss_section(void)
234 memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
238 * Set vmalloc area size to an 8th of (potential) physical memory
239 * size, unless size has been set by kernel command line parameter.
241 static void setup_vmalloc_size(void)
245 if (vmalloc_size_set)
247 size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
248 vmalloc_size = max(size, vmalloc_size);
251 static void offset_vmlinux_info(unsigned long offset)
253 vmlinux.default_lma += offset;
254 *(unsigned long *)(&vmlinux.entry) += offset;
255 vmlinux.bootdata_off += offset;
256 vmlinux.bootdata_preserved_off += offset;
257 vmlinux.rela_dyn_start += offset;
258 vmlinux.rela_dyn_end += offset;
259 vmlinux.dynsym_start += offset;
262 void startup_kernel(void)
264 unsigned long random_lma;
265 unsigned long safe_addr;
268 initrd_data.start = parmarea.initrd_start;
269 initrd_data.size = parmarea.initrd_size;
270 oldmem_data.start = parmarea.oldmem_base;
271 oldmem_data.size = parmarea.oldmem_size;
274 store_ipl_parmblock();
275 safe_addr = mem_safe_offset();
276 safe_addr = read_ipl_report(safe_addr);
278 rescue_initrd(safe_addr);
279 sclp_early_read_info();
280 setup_boot_command_line();
281 parse_boot_command_line();
282 sanitize_prot_virt_host();
283 setup_ident_map_size(detect_memory());
284 setup_vmalloc_size();
285 setup_kernel_memory_layout();
287 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
288 random_lma = get_random_base(safe_addr);
290 __kaslr_offset = random_lma - vmlinux.default_lma;
291 img = (void *)vmlinux.default_lma;
292 offset_vmlinux_info(__kaslr_offset);
296 if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
297 img = decompress_kernel();
298 memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
299 } else if (__kaslr_offset)
300 memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
304 if (IS_ENABLED(CONFIG_RELOCATABLE))
305 handle_relocs(__kaslr_offset);
307 if (__kaslr_offset) {
309 * Save KASLR offset for early dumps, before vmcore_info is set.
310 * Mark as uneven to distinguish from real vmcore_info pointer.
312 S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
313 /* Clear non-relocated kernel */
314 if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
315 memset(img, 0, vmlinux.image_size);