1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Asm versions of Xen pv-ops, suitable for direct use.
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
6 * operations here; the indirect forms are better handled in C.
10 #include <asm/asm-offsets.h>
11 #include <asm/percpu.h>
12 #include <asm/processor-flags.h>
13 #include <asm/segment.h>
14 #include <asm/thread_info.h>
16 #include <asm/frame.h>
18 #include <xen/interface/xen.h>
20 #include <linux/init.h>
21 #include <linux/linkage.h>
24 * Enable events. This clears the event mask and tests the pending
25 * event status with one and operation. If there are pending events,
26 * then enter the hypervisor to get them handled.
28 SYM_FUNC_START(xen_irq_enable_direct)
31 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
34 * Preempt here doesn't matter because that will deal with any
35 * pending interrupts. The pending check may end up being run
36 * on the wrong CPU, but that doesn't hurt.
39 /* Test for pending */
40 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
47 SYM_FUNC_END(xen_irq_enable_direct)
51 * Disabling events is simply a matter of making the event mask
54 SYM_FUNC_START(xen_irq_disable_direct)
55 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
57 SYM_FUNC_END(xen_irq_disable_direct)
60 * (xen_)save_fl is used to get the current interrupt enable status.
61 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
62 * may be set in the return value. We take advantage of this by
63 * making sure that X86_EFLAGS_IF has the right value (and other bits
64 * in that byte are 0), but other bits in the return value are
65 * undefined. We need to toggle the state of the bit, because Xen and
66 * x86 use opposite senses (mask vs enable).
68 SYM_FUNC_START(xen_save_fl_direct)
69 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
73 SYM_FUNC_END(xen_save_fl_direct)
77 * In principle the caller should be passing us a value return from
78 * xen_save_fl_direct, but for robustness sake we test only the
79 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
80 * interrupt mask state, it checks for unmasked pending events and
81 * enters the hypervisor to get them delivered if so.
83 SYM_FUNC_START(xen_restore_fl_direct)
85 testw $X86_EFLAGS_IF, %di
86 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
88 * Preempt here doesn't matter because that will deal with any
89 * pending interrupts. The pending check may end up being run
90 * on the wrong CPU, but that doesn't hurt.
93 /* check for unmasked and pending */
94 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
100 SYM_FUNC_END(xen_restore_fl_direct)
104 * Force an event check by making a hypercall, but preserve regs
105 * before making the call.
107 SYM_FUNC_START(check_events)
118 call xen_force_evtchn_callback
130 SYM_FUNC_END(check_events)
132 SYM_FUNC_START(xen_read_cr2)
134 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
135 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
138 SYM_FUNC_END(xen_read_cr2);
140 SYM_FUNC_START(xen_read_cr2_direct)
142 _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
145 SYM_FUNC_END(xen_read_cr2_direct);
147 .macro xen_pv_trap name
148 SYM_CODE_START(xen_\name)
152 SYM_CODE_END(xen_\name)
153 _ASM_NOKPROBE(xen_\name)
156 xen_pv_trap asm_exc_divide_error
157 xen_pv_trap asm_xenpv_exc_debug
158 xen_pv_trap asm_exc_int3
159 xen_pv_trap asm_xenpv_exc_nmi
160 xen_pv_trap asm_exc_overflow
161 xen_pv_trap asm_exc_bounds
162 xen_pv_trap asm_exc_invalid_op
163 xen_pv_trap asm_exc_device_not_available
164 xen_pv_trap asm_exc_double_fault
165 xen_pv_trap asm_exc_coproc_segment_overrun
166 xen_pv_trap asm_exc_invalid_tss
167 xen_pv_trap asm_exc_segment_not_present
168 xen_pv_trap asm_exc_stack_segment
169 xen_pv_trap asm_exc_general_protection
170 xen_pv_trap asm_exc_page_fault
171 xen_pv_trap asm_exc_spurious_interrupt_bug
172 xen_pv_trap asm_exc_coprocessor_error
173 xen_pv_trap asm_exc_alignment_check
174 #ifdef CONFIG_X86_MCE
175 xen_pv_trap asm_exc_machine_check
176 #endif /* CONFIG_X86_MCE */
177 xen_pv_trap asm_exc_simd_coprocessor_error
178 #ifdef CONFIG_IA32_EMULATION
179 xen_pv_trap entry_INT80_compat
181 xen_pv_trap asm_exc_xen_hypervisor_callback
184 SYM_CODE_START(xen_early_idt_handler_array)
186 .rept NUM_EXCEPTION_VECTORS
189 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
191 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
193 SYM_CODE_END(xen_early_idt_handler_array)
196 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
204 * rip <-- standard iret frame
209 * r11 }<-- pushed by hypercall page
212 SYM_CODE_START(xen_iret)
215 SYM_CODE_END(xen_iret)
217 SYM_CODE_START(xen_sysret64)
219 * We're already on the usermode stack at this point, but
220 * still with the kernel gs, so we can easily switch back.
222 * tss.sp2 is scratch space.
224 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
225 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
228 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
233 pushq $VGCF_in_syscall
235 SYM_CODE_END(xen_sysret64)
238 * Xen handles syscall callbacks much like ordinary exceptions, which
242 * - an iret-like stack frame on the stack (including rcx and r11):
252 /* Normal 64-bit system call target */
253 SYM_FUNC_START(xen_syscall_target)
258 * Neither Xen nor the kernel really knows what the old SS and
259 * CS were. The kernel expects __USER_DS and __USER_CS, so
260 * report those values even though Xen will guess its own values.
262 movq $__USER_DS, 4*8(%rsp)
263 movq $__USER_CS, 1*8(%rsp)
265 jmp entry_SYSCALL_64_after_hwframe
266 SYM_FUNC_END(xen_syscall_target)
268 #ifdef CONFIG_IA32_EMULATION
270 /* 32-bit compat syscall target */
271 SYM_FUNC_START(xen_syscall32_target)
276 * Neither Xen nor the kernel really knows what the old SS and
277 * CS were. The kernel expects __USER32_DS and __USER32_CS, so
278 * report those values even though Xen will guess its own values.
280 movq $__USER32_DS, 4*8(%rsp)
281 movq $__USER32_CS, 1*8(%rsp)
283 jmp entry_SYSCALL_compat_after_hwframe
284 SYM_FUNC_END(xen_syscall32_target)
286 /* 32-bit compat sysenter target */
287 SYM_FUNC_START(xen_sysenter_target)
289 * NB: Xen is polite and clears TF from EFLAGS for us. This means
290 * that we don't need to guard against single step exceptions here.
296 * Neither Xen nor the kernel really knows what the old SS and
297 * CS were. The kernel expects __USER32_DS and __USER32_CS, so
298 * report those values even though Xen will guess its own values.
300 movq $__USER32_DS, 4*8(%rsp)
301 movq $__USER32_CS, 1*8(%rsp)
303 jmp entry_SYSENTER_compat_after_hwframe
304 SYM_FUNC_END(xen_sysenter_target)
306 #else /* !CONFIG_IA32_EMULATION */
308 SYM_FUNC_START_ALIAS(xen_syscall32_target)
309 SYM_FUNC_START(xen_sysenter_target)
310 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
314 SYM_FUNC_END(xen_sysenter_target)
315 SYM_FUNC_END_ALIAS(xen_syscall32_target)
317 #endif /* CONFIG_IA32_EMULATION */