1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Firmware Assisted dump: A robust mechanism to get reliable kernel crash
4 * dump with assistance from firmware. This approach does not use kexec,
5 * instead firmware assists in booting the kdump kernel while preserving
6 * memory contents. The most of the code implementation has been adapted
7 * from phyp assisted dump implementation written by Linas Vepstas and
10 * Copyright 2011 IBM Corporation
11 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
15 #define pr_fmt(fmt) "fadump: " fmt
17 #include <linux/string.h>
18 #include <linux/memblock.h>
19 #include <linux/delay.h>
20 #include <linux/seq_file.h>
21 #include <linux/crash_dump.h>
22 #include <linux/kobject.h>
23 #include <linux/sysfs.h>
24 #include <linux/slab.h>
25 #include <linux/cma.h>
26 #include <linux/hugetlb.h>
27 #include <linux/debugfs.h>
31 #include <asm/fadump.h>
32 #include <asm/fadump-internal.h>
33 #include <asm/setup.h>
34 #include <asm/interrupt.h>
37 * The CPU who acquired the lock to trigger the fadump crash should
38 * wait for other CPUs to enter.
40 * The timeout is in milliseconds.
42 #define CRASH_TIMEOUT 500
44 static struct fw_dump fw_dump;
46 static void __init fadump_reserve_crash_area(u64 base);
48 #ifndef CONFIG_PRESERVE_FA_DUMP
50 static struct kobject *fadump_kobj;
52 static atomic_t cpus_in_fadump;
53 static DEFINE_MUTEX(fadump_mutex);
55 static struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
57 #define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
58 #define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
59 sizeof(struct fadump_memory_range))
60 static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
61 static struct fadump_mrange_info
62 reserved_mrange_info = { "reserved", rngs, RESERVED_RNGS_SZ, 0, RESERVED_RNGS_CNT, true };
64 static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
67 static struct cma *fadump_cma;
70 * fadump_cma_init() - Initialize CMA area from a fadump reserved memory
72 * This function initializes CMA area from fadump reserved memory.
73 * The total size of fadump reserved memory covers for boot memory size
74 * + cpu data size + hpte size and metadata.
75 * Initialize only the area equivalent to boot memory size for CMA use.
76 * The reamining portion of fadump reserved memory will be not given
77 * to CMA and pages for thoes will stay reserved. boot memory size is
78 * aligned per CMA requirement to satisy cma_init_reserved_mem() call.
79 * But for some reason even if it fails we still have the memory reservation
80 * with us and we can still continue doing fadump.
82 static int __init fadump_cma_init(void)
84 unsigned long long base, size;
87 if (!fw_dump.fadump_enabled)
91 * Do not use CMA if user has provided fadump=nocma kernel parameter.
92 * Return 1 to continue with fadump old behaviour.
97 base = fw_dump.reserve_dump_area_start;
98 size = fw_dump.boot_memory_size;
103 rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma);
105 pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc);
107 * Though the CMA init has failed we still have memory
108 * reservation with us. The reserved memory will be
109 * blocked from production system usage. Hence return 1,
110 * so that we can continue with fadump.
116 * If CMA activation fails, keep the pages reserved, instead of
117 * exposing them to buddy allocator. Same as 'fadump=nocma' case.
119 cma_reserve_pages_on_error(fadump_cma);
122 * So we now have successfully initialized cma area for fadump.
124 pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx "
125 "bytes of memory reserved for firmware-assisted dump\n",
126 cma_get_size(fadump_cma),
127 (unsigned long)cma_get_base(fadump_cma) >> 20,
128 fw_dump.reserve_dump_area_size);
132 static int __init fadump_cma_init(void) { return 1; }
133 #endif /* CONFIG_CMA */
135 /* Scan the Firmware Assisted dump configuration details. */
136 int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
137 int depth, void *data)
140 early_init_dt_scan_reserved_ranges(node);
147 if (strcmp(uname, "rtas") == 0) {
148 rtas_fadump_dt_scan(&fw_dump, node);
152 if (strcmp(uname, "ibm,opal") == 0) {
153 opal_fadump_dt_scan(&fw_dump, node);
161 * If fadump is registered, check if the memory provided
162 * falls within boot memory area and reserved memory area.
164 int is_fadump_memory_area(u64 addr, unsigned long size)
168 if (!fw_dump.dump_registered)
174 d_start = fw_dump.reserve_dump_area_start;
175 d_end = d_start + fw_dump.reserve_dump_area_size;
176 if (((addr + size) > d_start) && (addr <= d_end))
179 return (addr <= fw_dump.boot_mem_top);
182 int should_fadump_crash(void)
184 if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
189 int is_fadump_active(void)
191 return fw_dump.dump_active;
195 * Returns true, if there are no holes in memory area between d_start to d_end,
198 static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
200 phys_addr_t reg_start, reg_end;
204 for_each_mem_range(i, ®_start, ®_end) {
205 start = max_t(u64, d_start, reg_start);
206 end = min_t(u64, d_end, reg_end);
208 /* Memory hole from d_start to start */
225 * Returns true, if there are no holes in boot memory area,
228 bool is_fadump_boot_mem_contiguous(void)
230 unsigned long d_start, d_end;
234 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
235 d_start = fw_dump.boot_mem_addr[i];
236 d_end = d_start + fw_dump.boot_mem_sz[i];
238 ret = is_fadump_mem_area_contiguous(d_start, d_end);
247 * Returns true, if there are no holes in reserved memory area,
250 bool is_fadump_reserved_mem_contiguous(void)
254 d_start = fw_dump.reserve_dump_area_start;
255 d_end = d_start + fw_dump.reserve_dump_area_size;
256 return is_fadump_mem_area_contiguous(d_start, d_end);
259 /* Print firmware assisted dump configurations for debugging purpose. */
260 static void __init fadump_show_config(void)
264 pr_debug("Support for firmware-assisted dump (fadump): %s\n",
265 (fw_dump.fadump_supported ? "present" : "no support"));
267 if (!fw_dump.fadump_supported)
270 pr_debug("Fadump enabled : %s\n",
271 (fw_dump.fadump_enabled ? "yes" : "no"));
272 pr_debug("Dump Active : %s\n",
273 (fw_dump.dump_active ? "yes" : "no"));
274 pr_debug("Dump section sizes:\n");
275 pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
276 pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
277 pr_debug(" Boot memory size : %lx\n", fw_dump.boot_memory_size);
278 pr_debug(" Boot memory top : %llx\n", fw_dump.boot_mem_top);
279 pr_debug("Boot memory regions cnt: %llx\n", fw_dump.boot_mem_regs_cnt);
280 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
281 pr_debug("[%03d] base = %llx, size = %llx\n", i,
282 fw_dump.boot_mem_addr[i], fw_dump.boot_mem_sz[i]);
287 * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM
289 * Function to find the largest memory size we need to reserve during early
290 * boot process. This will be the size of the memory that is required for a
291 * kernel to boot successfully.
293 * This function has been taken from phyp-assisted dump feature implementation.
295 * returns larger of 256MB or 5% rounded down to multiples of 256MB.
297 * TODO: Come up with better approach to find out more accurate memory size
298 * that is required for a kernel to boot successfully.
301 static __init u64 fadump_calculate_reserve_size(void)
303 u64 base, size, bootmem_min;
306 if (fw_dump.reserve_bootvar)
307 pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n");
310 * Check if the size is specified through crashkernel= cmdline
311 * option. If yes, then use that but ignore base as fadump reserves
312 * memory at a predefined offset.
314 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
316 if (ret == 0 && size > 0) {
317 unsigned long max_size;
319 if (fw_dump.reserve_bootvar)
320 pr_info("Using 'crashkernel=' parameter for memory reservation.\n");
322 fw_dump.reserve_bootvar = (unsigned long)size;
325 * Adjust if the boot memory size specified is above
328 max_size = memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO;
329 if (fw_dump.reserve_bootvar > max_size) {
330 fw_dump.reserve_bootvar = max_size;
331 pr_info("Adjusted boot memory size to %luMB\n",
332 (fw_dump.reserve_bootvar >> 20));
335 return fw_dump.reserve_bootvar;
336 } else if (fw_dump.reserve_bootvar) {
338 * 'fadump_reserve_mem=' is being used to reserve memory
339 * for firmware-assisted dump.
341 return fw_dump.reserve_bootvar;
344 /* divide by 20 to get 5% of value */
345 size = memblock_phys_mem_size() / 20;
347 /* round it down in multiples of 256 */
348 size = size & ~0x0FFFFFFFUL;
350 /* Truncate to memory_limit. We don't want to over reserve the memory.*/
351 if (memory_limit && size > memory_limit)
354 bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
355 return (size > bootmem_min ? size : bootmem_min);
359 * Calculate the total memory size required to be reserved for
360 * firmware-assisted dump registration.
362 static unsigned long __init get_fadump_area_size(void)
364 unsigned long size = 0;
366 size += fw_dump.cpu_state_data_size;
367 size += fw_dump.hpte_region_size;
368 size += fw_dump.boot_memory_size;
369 size += sizeof(struct fadump_crash_info_header);
370 size += sizeof(struct elfhdr); /* ELF core header.*/
371 size += sizeof(struct elf_phdr); /* place holder for cpu notes */
372 /* Program headers for crash memory regions. */
373 size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2);
375 size = PAGE_ALIGN(size);
377 /* This is to hold kernel metadata on platforms that support it */
378 size += (fw_dump.ops->fadump_get_metadata_size ?
379 fw_dump.ops->fadump_get_metadata_size() : 0);
383 static int __init add_boot_mem_region(unsigned long rstart,
386 int i = fw_dump.boot_mem_regs_cnt++;
388 if (fw_dump.boot_mem_regs_cnt > FADUMP_MAX_MEM_REGS) {
389 fw_dump.boot_mem_regs_cnt = FADUMP_MAX_MEM_REGS;
393 pr_debug("Added boot memory range[%d] [%#016lx-%#016lx)\n",
394 i, rstart, (rstart + rsize));
395 fw_dump.boot_mem_addr[i] = rstart;
396 fw_dump.boot_mem_sz[i] = rsize;
401 * Firmware usually has a hard limit on the data it can copy per region.
402 * Honour that by splitting a memory range into multiple regions.
404 static int __init add_boot_mem_regions(unsigned long mstart,
407 unsigned long rstart, rsize, max_size;
411 max_size = fw_dump.max_copy_size ? fw_dump.max_copy_size : msize;
413 if (msize > max_size)
418 ret = add_boot_mem_region(rstart, rsize);
429 static int __init fadump_get_boot_mem_regions(void)
431 unsigned long size, cur_size, hole_size, last_end;
432 unsigned long mem_size = fw_dump.boot_memory_size;
433 phys_addr_t reg_start, reg_end;
437 fw_dump.boot_mem_regs_cnt = 0;
442 for_each_mem_range(i, ®_start, ®_end) {
443 size = reg_end - reg_start;
444 hole_size += (reg_start - last_end);
446 if ((cur_size + size) >= mem_size) {
447 size = (mem_size - cur_size);
448 ret = add_boot_mem_regions(reg_start, size);
454 ret = add_boot_mem_regions(reg_start, size);
460 fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size);
466 * Returns true, if the given range overlaps with reserved memory ranges
467 * starting at idx. Also, updates idx to index of overlapping memory range
468 * with the given memory range.
471 static bool __init overlaps_reserved_ranges(u64 base, u64 end, int *idx)
476 for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
477 u64 rbase = reserved_mrange_info.mem_ranges[i].base;
478 u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
483 if ((end > rbase) && (base < rend)) {
494 * Locate a suitable memory area to reserve memory for FADump. While at it,
495 * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
497 static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
499 struct fadump_memory_range *mrngs;
500 phys_addr_t mstart, mend;
504 mrngs = reserved_mrange_info.mem_ranges;
505 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
506 &mstart, &mend, NULL) {
507 pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
508 i, mstart, mend, base);
511 base = PAGE_ALIGN(mstart);
513 while ((mend > base) && ((mend - base) >= size)) {
514 if (!overlaps_reserved_ranges(base, base+size, &idx)) {
519 base = mrngs[idx].base + mrngs[idx].size;
520 base = PAGE_ALIGN(base);
528 int __init fadump_reserve_mem(void)
530 u64 base, size, mem_boundary, bootmem_min;
533 if (!fw_dump.fadump_enabled)
536 if (!fw_dump.fadump_supported) {
537 pr_info("Firmware-Assisted Dump is not supported on this hardware\n");
542 * Initialize boot memory size
543 * If dump is active then we have already calculated the size during
546 if (!fw_dump.dump_active) {
547 fw_dump.boot_memory_size =
548 PAGE_ALIGN(fadump_calculate_reserve_size());
550 if (!fw_dump.nocma) {
551 fw_dump.boot_memory_size =
552 ALIGN(fw_dump.boot_memory_size,
553 CMA_MIN_ALIGNMENT_BYTES);
557 bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
558 if (fw_dump.boot_memory_size < bootmem_min) {
559 pr_err("Can't enable fadump with boot memory size (0x%lx) less than 0x%llx\n",
560 fw_dump.boot_memory_size, bootmem_min);
564 if (!fadump_get_boot_mem_regions()) {
565 pr_err("Too many holes in boot memory area to enable fadump\n");
571 * Calculate the memory boundary.
572 * If memory_limit is less than actual memory boundary then reserve
573 * the memory for fadump beyond the memory_limit and adjust the
574 * memory_limit accordingly, so that the running kernel can run with
575 * specified memory_limit.
577 if (memory_limit && memory_limit < memblock_end_of_DRAM()) {
578 size = get_fadump_area_size();
579 if ((memory_limit + size) < memblock_end_of_DRAM())
580 memory_limit += size;
582 memory_limit = memblock_end_of_DRAM();
583 printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
584 " dump, now %#016llx\n", memory_limit);
587 mem_boundary = memory_limit;
589 mem_boundary = memblock_end_of_DRAM();
591 base = fw_dump.boot_mem_top;
592 size = get_fadump_area_size();
593 fw_dump.reserve_dump_area_size = size;
594 if (fw_dump.dump_active) {
595 pr_info("Firmware-assisted dump is active.\n");
597 #ifdef CONFIG_HUGETLB_PAGE
599 * FADump capture kernel doesn't care much about hugepages.
600 * In fact, handling hugepages in capture kernel is asking for
601 * trouble. So, disable HugeTLB support when fadump is active.
603 hugetlb_disabled = true;
606 * If last boot has crashed then reserve all the memory
607 * above boot memory size so that we don't touch it until
608 * dump is written to disk by userspace tool. This memory
609 * can be released for general use by invalidating fadump.
611 fadump_reserve_crash_area(base);
613 pr_debug("fadumphdr_addr = %#016lx\n", fw_dump.fadumphdr_addr);
614 pr_debug("Reserve dump area start address: 0x%lx\n",
615 fw_dump.reserve_dump_area_start);
618 * Reserve memory at an offset closer to bottom of the RAM to
619 * minimize the impact of memory hot-remove operation.
621 base = fadump_locate_reserve_mem(base, size);
623 if (!base || (base + size > mem_boundary)) {
624 pr_err("Failed to find memory chunk for reservation!\n");
627 fw_dump.reserve_dump_area_start = base;
630 * Calculate the kernel metadata address and register it with
631 * f/w if the platform supports.
633 if (fw_dump.ops->fadump_setup_metadata &&
634 (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
637 if (memblock_reserve(base, size)) {
638 pr_err("Failed to reserve memory!\n");
642 pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
643 (size >> 20), base, (memblock_phys_mem_size() >> 20));
645 ret = fadump_cma_init();
650 fw_dump.fadump_enabled = 0;
654 /* Look for fadump= cmdline option. */
655 static int __init early_fadump_param(char *p)
660 if (strncmp(p, "on", 2) == 0)
661 fw_dump.fadump_enabled = 1;
662 else if (strncmp(p, "off", 3) == 0)
663 fw_dump.fadump_enabled = 0;
664 else if (strncmp(p, "nocma", 5) == 0) {
665 fw_dump.fadump_enabled = 1;
671 early_param("fadump", early_fadump_param);
674 * Look for fadump_reserve_mem= cmdline option
675 * TODO: Remove references to 'fadump_reserve_mem=' parameter,
676 * the sooner 'crashkernel=' parameter is accustomed to.
678 static int __init early_fadump_reserve_mem(char *p)
681 fw_dump.reserve_bootvar = memparse(p, &p);
684 early_param("fadump_reserve_mem", early_fadump_reserve_mem);
686 void crash_fadump(struct pt_regs *regs, const char *str)
689 struct fadump_crash_info_header *fdh = NULL;
690 int old_cpu, this_cpu;
691 /* Do not include first CPU */
692 unsigned int ncpus = num_online_cpus() - 1;
694 if (!should_fadump_crash())
698 * old_cpu == -1 means this is the first CPU which has come here,
699 * go ahead and trigger fadump.
701 * old_cpu != -1 means some other CPU has already on it's way
702 * to trigger fadump, just keep looping here.
704 this_cpu = smp_processor_id();
705 old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu);
708 atomic_inc(&cpus_in_fadump);
711 * We can't loop here indefinitely. Wait as long as fadump
712 * is in force. If we race with fadump un-registration this
713 * loop will break and then we go down to normal panic path
714 * and reboot. If fadump is in force the first crashing
715 * cpu will definitely trigger fadump.
717 while (fw_dump.dump_registered)
722 fdh = __va(fw_dump.fadumphdr_addr);
723 fdh->crashing_cpu = crashing_cpu;
724 crash_save_vmcoreinfo();
729 ppc_save_regs(&fdh->regs);
731 fdh->online_mask = *cpu_online_mask;
734 * If we came in via system reset, wait a while for the secondary
737 if (TRAP(&(fdh->regs)) == INTERRUPT_SYSTEM_RESET) {
738 msecs = CRASH_TIMEOUT;
739 while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0))
743 fw_dump.ops->fadump_trigger(fdh, str);
746 u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
748 struct elf_prstatus prstatus;
750 memset(&prstatus, 0, sizeof(prstatus));
752 * FIXME: How do i get PID? Do I really need it?
753 * prstatus.pr_pid = ????
755 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
756 buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
757 &prstatus, sizeof(prstatus));
761 void __init fadump_update_elfcore_header(char *bufp)
763 struct elf_phdr *phdr;
765 bufp += sizeof(struct elfhdr);
767 /* First note is a place holder for cpu notes info. */
768 phdr = (struct elf_phdr *)bufp;
770 if (phdr->p_type == PT_NOTE) {
771 phdr->p_paddr = __pa(fw_dump.cpu_notes_buf_vaddr);
772 phdr->p_offset = phdr->p_paddr;
773 phdr->p_filesz = fw_dump.cpu_notes_buf_size;
774 phdr->p_memsz = fw_dump.cpu_notes_buf_size;
779 static void *__init fadump_alloc_buffer(unsigned long size)
781 unsigned long count, i;
785 vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
789 count = PAGE_ALIGN(size) / PAGE_SIZE;
790 page = virt_to_page(vaddr);
791 for (i = 0; i < count; i++)
792 mark_page_reserved(page + i);
796 static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
798 free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
801 s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus)
803 /* Allocate buffer to hold cpu crash notes. */
804 fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
805 fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size);
806 fw_dump.cpu_notes_buf_vaddr =
807 (unsigned long)fadump_alloc_buffer(fw_dump.cpu_notes_buf_size);
808 if (!fw_dump.cpu_notes_buf_vaddr) {
809 pr_err("Failed to allocate %ld bytes for CPU notes buffer\n",
810 fw_dump.cpu_notes_buf_size);
814 pr_debug("Allocated buffer for cpu notes of size %ld at 0x%lx\n",
815 fw_dump.cpu_notes_buf_size,
816 fw_dump.cpu_notes_buf_vaddr);
820 void fadump_free_cpu_notes_buf(void)
822 if (!fw_dump.cpu_notes_buf_vaddr)
825 fadump_free_buffer(fw_dump.cpu_notes_buf_vaddr,
826 fw_dump.cpu_notes_buf_size);
827 fw_dump.cpu_notes_buf_vaddr = 0;
828 fw_dump.cpu_notes_buf_size = 0;
831 static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
833 if (mrange_info->is_static) {
834 mrange_info->mem_range_cnt = 0;
838 kfree(mrange_info->mem_ranges);
839 memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
840 (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
844 * Allocate or reallocate mem_ranges array in incremental units
847 static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
849 struct fadump_memory_range *new_array;
852 new_size = mrange_info->mem_ranges_sz + PAGE_SIZE;
853 pr_debug("Allocating %llu bytes of memory for %s memory ranges\n",
854 new_size, mrange_info->name);
856 new_array = krealloc(mrange_info->mem_ranges, new_size, GFP_KERNEL);
857 if (new_array == NULL) {
858 pr_err("Insufficient memory for setting up %s memory ranges\n",
860 fadump_free_mem_ranges(mrange_info);
864 mrange_info->mem_ranges = new_array;
865 mrange_info->mem_ranges_sz = new_size;
866 mrange_info->max_mem_ranges = (new_size /
867 sizeof(struct fadump_memory_range));
871 static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
874 struct fadump_memory_range *mem_ranges = mrange_info->mem_ranges;
875 bool is_adjacent = false;
882 * Fold adjacent memory ranges to bring down the memory ranges/
883 * PT_LOAD segments count.
885 if (mrange_info->mem_range_cnt) {
886 start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
887 size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
889 if ((start + size) == base)
893 /* resize the array on reaching the limit */
894 if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
897 if (mrange_info->is_static) {
898 pr_err("Reached array size limit for %s memory ranges\n",
903 ret = fadump_alloc_mem_ranges(mrange_info);
907 /* Update to the new resized array */
908 mem_ranges = mrange_info->mem_ranges;
912 mem_ranges[mrange_info->mem_range_cnt].base = start;
913 mrange_info->mem_range_cnt++;
916 mem_ranges[mrange_info->mem_range_cnt - 1].size = (end - start);
917 pr_debug("%s_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
918 mrange_info->name, (mrange_info->mem_range_cnt - 1),
919 start, end - 1, (end - start));
923 static int fadump_exclude_reserved_area(u64 start, u64 end)
925 u64 ra_start, ra_end;
928 ra_start = fw_dump.reserve_dump_area_start;
929 ra_end = ra_start + fw_dump.reserve_dump_area_size;
931 if ((ra_start < end) && (ra_end > start)) {
932 if ((start < ra_start) && (end > ra_end)) {
933 ret = fadump_add_mem_range(&crash_mrange_info,
938 ret = fadump_add_mem_range(&crash_mrange_info,
940 } else if (start < ra_start) {
941 ret = fadump_add_mem_range(&crash_mrange_info,
943 } else if (ra_end < end) {
944 ret = fadump_add_mem_range(&crash_mrange_info,
948 ret = fadump_add_mem_range(&crash_mrange_info, start, end);
953 static int fadump_init_elfcore_header(char *bufp)
957 elf = (struct elfhdr *) bufp;
958 bufp += sizeof(struct elfhdr);
959 memcpy(elf->e_ident, ELFMAG, SELFMAG);
960 elf->e_ident[EI_CLASS] = ELF_CLASS;
961 elf->e_ident[EI_DATA] = ELF_DATA;
962 elf->e_ident[EI_VERSION] = EV_CURRENT;
963 elf->e_ident[EI_OSABI] = ELF_OSABI;
964 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
965 elf->e_type = ET_CORE;
966 elf->e_machine = ELF_ARCH;
967 elf->e_version = EV_CURRENT;
969 elf->e_phoff = sizeof(struct elfhdr);
971 #if defined(_CALL_ELF)
972 elf->e_flags = _CALL_ELF;
976 elf->e_ehsize = sizeof(struct elfhdr);
977 elf->e_phentsize = sizeof(struct elf_phdr);
979 elf->e_shentsize = 0;
987 * Traverse through memblock structure and setup crash memory ranges. These
988 * ranges will be used create PT_LOAD program headers in elfcore header.
990 static int fadump_setup_crash_memory_ranges(void)
995 pr_debug("Setup crash memory ranges.\n");
996 crash_mrange_info.mem_range_cnt = 0;
999 * Boot memory region(s) registered with firmware are moved to
1000 * different location at the time of crash. Create separate program
1001 * header(s) for this memory chunk(s) with the correct offset.
1003 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
1004 start = fw_dump.boot_mem_addr[i];
1005 end = start + fw_dump.boot_mem_sz[i];
1006 ret = fadump_add_mem_range(&crash_mrange_info, start, end);
1011 for_each_mem_range(i, &start, &end) {
1013 * skip the memory chunk that is already added
1014 * (0 through boot_memory_top).
1016 if (start < fw_dump.boot_mem_top) {
1017 if (end > fw_dump.boot_mem_top)
1018 start = fw_dump.boot_mem_top;
1023 /* add this range excluding the reserved dump area. */
1024 ret = fadump_exclude_reserved_area(start, end);
1033 * If the given physical address falls within the boot memory region then
1034 * return the relocated address that points to the dump region reserved
1035 * for saving initial boot memory contents.
1037 static inline unsigned long fadump_relocate(unsigned long paddr)
1039 unsigned long raddr, rstart, rend, rlast, hole_size;
1045 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
1046 rstart = fw_dump.boot_mem_addr[i];
1047 rend = rstart + fw_dump.boot_mem_sz[i];
1048 hole_size += (rstart - rlast);
1050 if (paddr >= rstart && paddr < rend) {
1051 raddr += fw_dump.boot_mem_dest_addr - hole_size;
1058 pr_debug("vmcoreinfo: paddr = 0x%lx, raddr = 0x%lx\n", paddr, raddr);
1062 static int fadump_create_elfcore_headers(char *bufp)
1064 unsigned long long raddr, offset;
1065 struct elf_phdr *phdr;
1069 fadump_init_elfcore_header(bufp);
1070 elf = (struct elfhdr *)bufp;
1071 bufp += sizeof(struct elfhdr);
1074 * setup ELF PT_NOTE, place holder for cpu notes info. The notes info
1075 * will be populated during second kernel boot after crash. Hence
1076 * this PT_NOTE will always be the first elf note.
1078 * NOTE: Any new ELF note addition should be placed after this note.
1080 phdr = (struct elf_phdr *)bufp;
1081 bufp += sizeof(struct elf_phdr);
1082 phdr->p_type = PT_NOTE;
1094 /* setup ELF PT_NOTE for vmcoreinfo */
1095 phdr = (struct elf_phdr *)bufp;
1096 bufp += sizeof(struct elf_phdr);
1097 phdr->p_type = PT_NOTE;
1102 phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
1103 phdr->p_offset = phdr->p_paddr;
1104 phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
1106 /* Increment number of program headers. */
1109 /* setup PT_LOAD sections. */
1112 raddr = fw_dump.boot_mem_addr[0];
1113 for (i = 0; i < crash_mrange_info.mem_range_cnt; i++) {
1116 mbase = crash_mrange_info.mem_ranges[i].base;
1117 msize = crash_mrange_info.mem_ranges[i].size;
1121 phdr = (struct elf_phdr *)bufp;
1122 bufp += sizeof(struct elf_phdr);
1123 phdr->p_type = PT_LOAD;
1124 phdr->p_flags = PF_R|PF_W|PF_X;
1125 phdr->p_offset = mbase;
1127 if (mbase == raddr) {
1129 * The entire real memory region will be moved by
1130 * firmware to the specified destination_address.
1131 * Hence set the correct offset.
1133 phdr->p_offset = fw_dump.boot_mem_dest_addr + offset;
1134 if (j < (fw_dump.boot_mem_regs_cnt - 1)) {
1135 offset += fw_dump.boot_mem_sz[j];
1136 raddr = fw_dump.boot_mem_addr[++j];
1140 phdr->p_paddr = mbase;
1141 phdr->p_vaddr = (unsigned long)__va(mbase);
1142 phdr->p_filesz = msize;
1143 phdr->p_memsz = msize;
1146 /* Increment number of program headers. */
1152 static unsigned long init_fadump_header(unsigned long addr)
1154 struct fadump_crash_info_header *fdh;
1160 addr += sizeof(struct fadump_crash_info_header);
1162 memset(fdh, 0, sizeof(struct fadump_crash_info_header));
1163 fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
1164 fdh->elfcorehdr_addr = addr;
1165 /* We will set the crashing cpu id in crash_fadump() during crash. */
1166 fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
1171 static int register_fadump(void)
1178 * If no memory is reserved then we can not register for firmware-
1181 if (!fw_dump.reserve_dump_area_size)
1184 ret = fadump_setup_crash_memory_ranges();
1188 addr = fw_dump.fadumphdr_addr;
1190 /* Initialize fadump crash info header. */
1191 addr = init_fadump_header(addr);
1194 pr_debug("Creating ELF core headers at %#016lx\n", addr);
1195 fadump_create_elfcore_headers(vaddr);
1197 /* register the future kernel dump with firmware. */
1198 pr_debug("Registering for firmware-assisted kernel dump...\n");
1199 return fw_dump.ops->fadump_register(&fw_dump);
1202 void fadump_cleanup(void)
1204 if (!fw_dump.fadump_supported)
1207 /* Invalidate the registration only if dump is active. */
1208 if (fw_dump.dump_active) {
1209 pr_debug("Invalidating firmware-assisted dump registration\n");
1210 fw_dump.ops->fadump_invalidate(&fw_dump);
1211 } else if (fw_dump.dump_registered) {
1212 /* Un-register Firmware-assisted dump if it was registered. */
1213 fw_dump.ops->fadump_unregister(&fw_dump);
1214 fadump_free_mem_ranges(&crash_mrange_info);
1217 if (fw_dump.ops->fadump_cleanup)
1218 fw_dump.ops->fadump_cleanup(&fw_dump);
1221 static void fadump_free_reserved_memory(unsigned long start_pfn,
1222 unsigned long end_pfn)
1225 unsigned long time_limit = jiffies + HZ;
1227 pr_info("freeing reserved memory (0x%llx - 0x%llx)\n",
1228 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
1230 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1231 free_reserved_page(pfn_to_page(pfn));
1233 if (time_after(jiffies, time_limit)) {
1235 time_limit = jiffies + HZ;
1241 * Skip memory holes and free memory that was actually reserved.
1243 static void fadump_release_reserved_area(u64 start, u64 end)
1245 unsigned long reg_spfn, reg_epfn;
1246 u64 tstart, tend, spfn, epfn;
1249 spfn = PHYS_PFN(start);
1250 epfn = PHYS_PFN(end);
1252 for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) {
1253 tstart = max_t(u64, spfn, reg_spfn);
1254 tend = min_t(u64, epfn, reg_epfn);
1256 if (tstart < tend) {
1257 fadump_free_reserved_memory(tstart, tend);
1268 * Sort the mem ranges in-place and merge adjacent ranges
1269 * to minimize the memory ranges count.
1271 static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
1273 struct fadump_memory_range *mem_ranges;
1274 struct fadump_memory_range tmp_range;
1278 if (!reserved_mrange_info.mem_range_cnt)
1281 /* Sort the memory ranges */
1282 mem_ranges = mrange_info->mem_ranges;
1283 for (i = 0; i < mrange_info->mem_range_cnt; i++) {
1285 for (j = (i + 1); j < mrange_info->mem_range_cnt; j++) {
1286 if (mem_ranges[idx].base > mem_ranges[j].base)
1290 tmp_range = mem_ranges[idx];
1291 mem_ranges[idx] = mem_ranges[i];
1292 mem_ranges[i] = tmp_range;
1296 /* Merge adjacent reserved ranges */
1298 for (i = 1; i < mrange_info->mem_range_cnt; i++) {
1299 base = mem_ranges[i-1].base;
1300 size = mem_ranges[i-1].size;
1301 if (mem_ranges[i].base == (base + size))
1302 mem_ranges[idx].size += mem_ranges[i].size;
1308 mem_ranges[idx] = mem_ranges[i];
1311 mrange_info->mem_range_cnt = idx + 1;
1315 * Scan reserved-ranges to consider them while reserving/releasing
1316 * memory for FADump.
1318 static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
1324 /* reserved-ranges already scanned */
1325 if (reserved_mrange_info.mem_range_cnt != 0)
1328 prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
1333 * Each reserved range is an (address,size) pair, 2 cells each,
1334 * totalling 4 cells per range.
1336 for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
1339 base = of_read_number(prop + (i * 4) + 0, 2);
1340 size = of_read_number(prop + (i * 4) + 2, 2);
1343 ret = fadump_add_mem_range(&reserved_mrange_info,
1346 pr_warn("some reserved ranges are ignored!\n");
1352 /* Compact reserved ranges */
1353 sort_and_merge_mem_ranges(&reserved_mrange_info);
1357 * Release the memory that was reserved during early boot to preserve the
1358 * crash'ed kernel's memory contents except reserved dump area (permanent
1359 * reservation) and reserved ranges used by F/W. The released memory will
1360 * be available for general use.
1362 static void fadump_release_memory(u64 begin, u64 end)
1364 u64 ra_start, ra_end, tstart;
1367 ra_start = fw_dump.reserve_dump_area_start;
1368 ra_end = ra_start + fw_dump.reserve_dump_area_size;
1371 * If reserved ranges array limit is hit, overwrite the last reserved
1372 * memory range with reserved dump area to ensure it is excluded from
1373 * the memory being released (reused for next FADump registration).
1375 if (reserved_mrange_info.mem_range_cnt ==
1376 reserved_mrange_info.max_mem_ranges)
1377 reserved_mrange_info.mem_range_cnt--;
1379 ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
1383 /* Get the reserved ranges list in order first. */
1384 sort_and_merge_mem_ranges(&reserved_mrange_info);
1386 /* Exclude reserved ranges and release remaining memory */
1388 for (i = 0; i < reserved_mrange_info.mem_range_cnt; i++) {
1389 ra_start = reserved_mrange_info.mem_ranges[i].base;
1390 ra_end = ra_start + reserved_mrange_info.mem_ranges[i].size;
1392 if (tstart >= ra_end)
1395 if (tstart < ra_start)
1396 fadump_release_reserved_area(tstart, ra_start);
1401 fadump_release_reserved_area(tstart, end);
1404 static void fadump_invalidate_release_mem(void)
1406 mutex_lock(&fadump_mutex);
1407 if (!fw_dump.dump_active) {
1408 mutex_unlock(&fadump_mutex);
1413 mutex_unlock(&fadump_mutex);
1415 fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM());
1416 fadump_free_cpu_notes_buf();
1419 * Setup kernel metadata and initialize the kernel dump
1420 * memory structure for FADump re-registration.
1422 if (fw_dump.ops->fadump_setup_metadata &&
1423 (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
1424 pr_warn("Failed to setup kernel metadata!\n");
1425 fw_dump.ops->fadump_init_mem_struct(&fw_dump);
1428 static ssize_t release_mem_store(struct kobject *kobj,
1429 struct kobj_attribute *attr,
1430 const char *buf, size_t count)
1434 if (!fw_dump.dump_active)
1437 if (kstrtoint(buf, 0, &input))
1442 * Take away the '/proc/vmcore'. We are releasing the dump
1443 * memory, hence it will not be valid anymore.
1445 #ifdef CONFIG_PROC_VMCORE
1448 fadump_invalidate_release_mem();
1455 /* Release the reserved memory and disable the FADump */
1456 static void __init unregister_fadump(void)
1459 fadump_release_memory(fw_dump.reserve_dump_area_start,
1460 fw_dump.reserve_dump_area_size);
1461 fw_dump.fadump_enabled = 0;
1462 kobject_put(fadump_kobj);
1465 static ssize_t enabled_show(struct kobject *kobj,
1466 struct kobj_attribute *attr,
1469 return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
1472 static ssize_t mem_reserved_show(struct kobject *kobj,
1473 struct kobj_attribute *attr,
1476 return sprintf(buf, "%ld\n", fw_dump.reserve_dump_area_size);
1479 static ssize_t registered_show(struct kobject *kobj,
1480 struct kobj_attribute *attr,
1483 return sprintf(buf, "%d\n", fw_dump.dump_registered);
1486 static ssize_t registered_store(struct kobject *kobj,
1487 struct kobj_attribute *attr,
1488 const char *buf, size_t count)
1493 if (!fw_dump.fadump_enabled || fw_dump.dump_active)
1496 if (kstrtoint(buf, 0, &input))
1499 mutex_lock(&fadump_mutex);
1503 if (fw_dump.dump_registered == 0) {
1507 /* Un-register Firmware-assisted dump */
1508 pr_debug("Un-register firmware-assisted dump\n");
1509 fw_dump.ops->fadump_unregister(&fw_dump);
1512 if (fw_dump.dump_registered == 1) {
1513 /* Un-register Firmware-assisted dump */
1514 fw_dump.ops->fadump_unregister(&fw_dump);
1516 /* Register Firmware-assisted dump */
1517 ret = register_fadump();
1525 mutex_unlock(&fadump_mutex);
1526 return ret < 0 ? ret : count;
1529 static int fadump_region_show(struct seq_file *m, void *private)
1531 if (!fw_dump.fadump_enabled)
1534 mutex_lock(&fadump_mutex);
1535 fw_dump.ops->fadump_region_show(&fw_dump, m);
1536 mutex_unlock(&fadump_mutex);
1540 static struct kobj_attribute release_attr = __ATTR_WO(release_mem);
1541 static struct kobj_attribute enable_attr = __ATTR_RO(enabled);
1542 static struct kobj_attribute register_attr = __ATTR_RW(registered);
1543 static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved);
1545 static struct attribute *fadump_attrs[] = {
1547 ®ister_attr.attr,
1548 &mem_reserved_attr.attr,
1552 ATTRIBUTE_GROUPS(fadump);
1554 DEFINE_SHOW_ATTRIBUTE(fadump_region);
1556 static void __init fadump_init_files(void)
1560 fadump_kobj = kobject_create_and_add("fadump", kernel_kobj);
1562 pr_err("failed to create fadump kobject\n");
1566 debugfs_create_file("fadump_region", 0444, arch_debugfs_dir, NULL,
1567 &fadump_region_fops);
1569 if (fw_dump.dump_active) {
1570 rc = sysfs_create_file(fadump_kobj, &release_attr.attr);
1572 pr_err("unable to create release_mem sysfs file (%d)\n",
1576 rc = sysfs_create_groups(fadump_kobj, fadump_groups);
1578 pr_err("sysfs group creation failed (%d), unregistering FADump",
1580 unregister_fadump();
1585 * The FADump sysfs are moved from kernel_kobj to fadump_kobj need to
1586 * create symlink at old location to maintain backward compatibility.
1588 * - fadump_enabled -> fadump/enabled
1589 * - fadump_registered -> fadump/registered
1590 * - fadump_release_mem -> fadump/release_mem
1592 rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
1593 "enabled", "fadump_enabled");
1595 pr_err("unable to create fadump_enabled symlink (%d)", rc);
1599 rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
1601 "fadump_registered");
1603 pr_err("unable to create fadump_registered symlink (%d)", rc);
1604 sysfs_remove_link(kernel_kobj, "fadump_enabled");
1608 if (fw_dump.dump_active) {
1609 rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj,
1612 "fadump_release_mem");
1614 pr_err("unable to create fadump_release_mem symlink (%d)",
1621 * Prepare for firmware-assisted dump.
1623 int __init setup_fadump(void)
1625 if (!fw_dump.fadump_supported)
1628 fadump_init_files();
1629 fadump_show_config();
1631 if (!fw_dump.fadump_enabled)
1635 * If dump data is available then see if it is valid and prepare for
1636 * saving it to the disk.
1638 if (fw_dump.dump_active) {
1640 * if dump process fails then invalidate the registration
1641 * and release memory before proceeding for re-registration.
1643 if (fw_dump.ops->fadump_process(&fw_dump) < 0)
1644 fadump_invalidate_release_mem();
1646 /* Initialize the kernel dump memory structure and register with f/w */
1647 else if (fw_dump.reserve_dump_area_size) {
1648 fw_dump.ops->fadump_init_mem_struct(&fw_dump);
1653 * In case of panic, fadump is triggered via ppc_panic_event()
1654 * panic notifier. Setting crash_kexec_post_notifiers to 'true'
1655 * lets panic() function take crash friendly path before panic
1656 * notifiers are invoked.
1658 crash_kexec_post_notifiers = true;
1663 * Use subsys_initcall_sync() here because there is dependency with
1664 * crash_save_vmcoreinfo_init(), which mush run first to ensure vmcoreinfo initialization
1665 * is done before regisering with f/w.
1667 subsys_initcall_sync(setup_fadump);
1668 #else /* !CONFIG_PRESERVE_FA_DUMP */
1670 /* Scan the Firmware Assisted dump configuration details. */
1671 int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
1672 int depth, void *data)
1674 if ((depth != 1) || (strcmp(uname, "ibm,opal") != 0))
1677 opal_fadump_dt_scan(&fw_dump, node);
1682 * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel,
1683 * preserve crash data. The subsequent memory preserving kernel boot
1684 * is likely to process this crash data.
1686 int __init fadump_reserve_mem(void)
1688 if (fw_dump.dump_active) {
1690 * If last boot has crashed then reserve all the memory
1691 * above boot memory to preserve crash data.
1693 pr_info("Preserving crash data for processing in next boot.\n");
1694 fadump_reserve_crash_area(fw_dump.boot_mem_top);
1696 pr_debug("FADump-aware kernel..\n");
1700 #endif /* CONFIG_PRESERVE_FA_DUMP */
1702 /* Preserve everything above the base address */
1703 static void __init fadump_reserve_crash_area(u64 base)
1705 u64 i, mstart, mend, msize;
1707 for_each_mem_range(i, &mstart, &mend) {
1708 msize = mend - mstart;
1710 if ((mstart + msize) < base)
1713 if (mstart < base) {
1714 msize -= (base - mstart);
1718 pr_info("Reserving %lluMB of memory at %#016llx for preserving crash data",
1719 (msize >> 20), mstart);
1720 memblock_reserve(mstart, msize);
1724 unsigned long __init arch_reserved_kernel_pages(void)
1726 return memblock_reserved_size() / PAGE_SIZE;