1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_KEXEC_H
3 #define _ASM_X86_KEXEC_H
6 # define PA_CONTROL_PAGE 0
7 # define VA_CONTROL_PAGE 1
9 # define PA_SWAP_PAGE 3
12 # define PA_CONTROL_PAGE 0
13 # define VA_CONTROL_PAGE 1
14 # define PA_TABLE_PAGE 2
15 # define PA_SWAP_PAGE 3
19 # define KEXEC_CONTROL_CODE_MAX_SIZE 2048
23 #include <linux/string.h>
24 #include <linux/kernel.h>
27 #include <asm/ptrace.h>
28 #include <asm/bootparam.h>
33 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
34 * I.e. Maximum page that is mapped directly into kernel memory,
35 * and kmap is not required.
37 * So far x86_64 is limited to 40 physical address bits.
40 /* Maximum physical address we can use pages from */
41 # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
42 /* Maximum address we can reach in physical address mode */
43 # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
44 /* Maximum address we can use for the control code buffer */
45 # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
47 # define KEXEC_CONTROL_PAGE_SIZE 4096
49 /* The native architecture */
50 # define KEXEC_ARCH KEXEC_ARCH_386
52 /* We can also handle crash dumps from 64 bit kernel. */
53 # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
55 /* Maximum physical address we can use pages from */
56 # define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
57 /* Maximum address we can reach in physical address mode */
58 # define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
59 /* Maximum address we can use for the control pages */
60 # define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
62 /* Allocate one page for the pdp and the second for the code */
63 # define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
65 /* The native architecture */
66 # define KEXEC_ARCH KEXEC_ARCH_X86_64
69 /* Memory to backup during crash kdump */
70 #define KEXEC_BACKUP_SRC_START (0UL)
71 #define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */
74 * CPU does not save ss and sp on stack if execution is already
75 * running in kernel mode at the time of NMI occurrence. This code
78 static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
79 struct pt_regs *oldregs)
82 newregs->sp = (unsigned long)&(oldregs->sp);
83 asm volatile("xorl %%eax, %%eax\n\t"
90 * This function is responsible for capturing register states if coming
91 * via panic otherwise just fix up the ss and sp if coming via kernel
94 static inline void crash_setup_regs(struct pt_regs *newregs,
95 struct pt_regs *oldregs)
98 memcpy(newregs, oldregs, sizeof(*newregs));
99 crash_fixup_ss_esp(newregs, oldregs);
102 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
103 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
104 asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
105 asm volatile("movl %%esi,%0" : "=m"(newregs->si));
106 asm volatile("movl %%edi,%0" : "=m"(newregs->di));
107 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
108 asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
109 asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
110 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
111 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
112 asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
113 asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
114 asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
116 asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
117 asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
118 asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
119 asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
120 asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
121 asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
122 asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
123 asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
124 asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
125 asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
126 asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
127 asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
128 asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
129 asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
130 asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
131 asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
132 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
133 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
134 asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
136 newregs->ip = _THIS_IP_;
141 asmlinkage unsigned long
142 relocate_kernel(unsigned long indirection_page,
143 unsigned long control_page,
144 unsigned long start_address,
145 unsigned int has_pae,
146 unsigned int preserve_context);
149 relocate_kernel(unsigned long indirection_page,
150 unsigned long page_list,
151 unsigned long start_address,
152 unsigned int preserve_context,
153 unsigned int sme_active);
156 #define ARCH_HAS_KIMAGE_ARCH
161 #ifdef CONFIG_X86_PAE
174 /* Details of backup region */
175 unsigned long backup_src_start;
176 unsigned long backup_src_sz;
178 /* Physical address of backup segment */
179 unsigned long backup_load_addr;
181 /* Core ELF header buffer */
183 unsigned long elf_headers_sz;
184 unsigned long elf_load_addr;
186 #endif /* CONFIG_X86_32 */
190 * Number of elements and order of elements in this structure should match
191 * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
192 * make an appropriate change in purgatory too.
194 struct kexec_entry64_regs {
214 extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
216 #define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
218 extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
219 #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
223 typedef void crash_vmclear_fn(void);
224 extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
225 extern void kdump_nmi_shootdown_cpus(void);
227 #endif /* __ASSEMBLY__ */
229 #endif /* _ASM_X86_KEXEC_H */