1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/init.h>
5 #include <asm/processor.h>
7 #include <asm/sections.h>
8 #include <asm/mem_detect.h>
9 #include <asm/sparsemem.h>
10 #include "compressed/decompressor.h"
13 struct mem_detect_info __bootdata(mem_detect);
15 /* up to 256 storage elements, 1020 subincrements each */
16 #define ENTRIES_EXTENDED_MAX \
17 (256 * (1020 / 2) * sizeof(struct mem_detect_block))
20 * To avoid corrupting old kernel memory during dump, find lowest memory
21 * chunk possible either right after the kernel end (decompressed kernel) or
22 * after initrd (if it is present and there is no hole between the kernel end
25 static void *mem_detect_alloc_extended(void)
27 unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
29 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
30 initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
31 offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
33 return (void *)offset;
36 static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
38 if (n < MEM_INLINED_ENTRIES)
39 return &mem_detect.entries[n];
40 if (unlikely(!mem_detect.entries_extended))
41 mem_detect.entries_extended = mem_detect_alloc_extended();
42 return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
46 * sequential calls to add_mem_detect_block with adjacent memory areas
47 * are merged together into single memory block.
49 void add_mem_detect_block(u64 start, u64 end)
51 struct mem_detect_block *block;
53 if (mem_detect.count) {
54 block = __get_mem_detect_block_ptr(mem_detect.count - 1);
55 if (block->end == start) {
61 block = __get_mem_detect_block_ptr(mem_detect.count);
67 static int __diag260(unsigned long rx1, unsigned long rx2)
69 unsigned long reg1, reg2, ry;
70 union register_pair rx;
76 ry = 0x10; /* storage configuration */
79 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
80 " epsw %[reg1],%[reg2]\n"
81 " st %[reg1],0(%[psw_pgm])\n"
82 " st %[reg2],4(%[psw_pgm])\n"
84 " stg %[reg1],8(%[psw_pgm])\n"
85 " diag %[rx],%[ry],0x260\n"
88 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
89 : [reg1] "=&d" (reg1),
93 "+Q" (S390_lowcore.program_new_psw),
97 [psw_pgm] "a" (&S390_lowcore.program_new_psw)
99 return rc == 0 ? ry : -1;
102 static int diag260(void)
109 } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
111 memset(storage_extents, 0, sizeof(storage_extents));
112 rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
116 for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
117 add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
121 static int tprot(unsigned long addr)
123 unsigned long reg1, reg2;
128 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
129 " epsw %[reg1],%[reg2]\n"
130 " st %[reg1],0(%[psw_pgm])\n"
131 " st %[reg2],4(%[psw_pgm])\n"
133 " stg %[reg1],8(%[psw_pgm])\n"
134 " tprot 0(%[addr]),0\n"
137 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
138 : [reg1] "=&d" (reg1),
141 "=Q" (S390_lowcore.program_new_psw.addr),
143 : [psw_old] "a" (&old),
144 [psw_pgm] "a" (&S390_lowcore.program_new_psw),
150 static void search_mem_end(void)
152 unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
153 unsigned long offset = 0;
158 pivot = offset + range;
159 if (!tprot(pivot << 20))
163 add_mem_detect_block(0, (offset + 1) << 20);
166 unsigned long detect_memory(void)
168 unsigned long max_physmem_end;
170 sclp_early_get_memsize(&max_physmem_end);
172 if (!sclp_early_read_storage_info()) {
173 mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
174 return max_physmem_end;
178 mem_detect.info_source = MEM_DETECT_DIAG260;
179 return max_physmem_end;
182 if (max_physmem_end) {
183 add_mem_detect_block(0, max_physmem_end);
184 mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
185 return max_physmem_end;
189 mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
190 return get_mem_detect_end();