1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_TYPES_H
3 #define _ASM_X86_PARAVIRT_TYPES_H
5 /* Bitmask of what can be clobbered: usually at least eax. */
7 #define CLBR_EAX (1 << 0)
8 #define CLBR_ECX (1 << 1)
9 #define CLBR_EDX (1 << 2)
10 #define CLBR_EDI (1 << 3)
13 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
14 #define CLBR_ANY ((1 << 4) - 1)
16 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
17 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
18 #define CLBR_SCRATCH (0)
20 #define CLBR_RAX CLBR_EAX
21 #define CLBR_RCX CLBR_ECX
22 #define CLBR_RDX CLBR_EDX
23 #define CLBR_RDI CLBR_EDI
24 #define CLBR_RSI (1 << 4)
25 #define CLBR_R8 (1 << 5)
26 #define CLBR_R9 (1 << 6)
27 #define CLBR_R10 (1 << 7)
28 #define CLBR_R11 (1 << 8)
30 #define CLBR_ANY ((1 << 9) - 1)
32 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
33 CLBR_RCX | CLBR_R8 | CLBR_R9)
34 #define CLBR_RET_REG (CLBR_RAX)
35 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
39 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
43 #include <asm/desc_defs.h>
44 #include <asm/kmap_types.h>
45 #include <asm/pgtable_types.h>
46 #include <asm/nospec-branch.h>
56 struct flush_tlb_info;
60 * Wrapper type for pointers to code which uses the non-standard
61 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
63 struct paravirt_callee_save {
69 unsigned int kernel_rpl;
70 int shared_kernel_pmd;
73 u16 extra_user_64bit_cs; /* __USER_CS if none */
81 * Patch may replace one of the defined code sequences with
82 * arbitrary code, subject to the same register constraints.
83 * This generally means the code is not free to clobber any
84 * registers other than EAX. The patch function should return
85 * the number of bytes of code generated, as we nop pad the
86 * rest in generic code.
88 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
89 unsigned long addr, unsigned len);
90 } __no_randomize_layout;
94 /* Set deferred update mode, used for batching operations. */
98 } __no_randomize_layout;
101 unsigned long long (*sched_clock)(void);
102 unsigned long long (*steal_clock)(int cpu);
103 } __no_randomize_layout;
106 /* hooks for various privileged instructions */
107 unsigned long (*get_debugreg)(int regno);
108 void (*set_debugreg)(int regno, unsigned long value);
110 unsigned long (*read_cr0)(void);
111 void (*write_cr0)(unsigned long);
113 void (*write_cr4)(unsigned long);
116 unsigned long (*read_cr8)(void);
117 void (*write_cr8)(unsigned long);
120 /* Segment descriptor handling */
121 void (*load_tr_desc)(void);
122 void (*load_gdt)(const struct desc_ptr *);
123 void (*load_idt)(const struct desc_ptr *);
124 void (*set_ldt)(const void *desc, unsigned entries);
125 unsigned long (*store_tr)(void);
126 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
128 void (*load_gs_index)(unsigned int idx);
130 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
132 void (*write_gdt_entry)(struct desc_struct *,
133 int entrynum, const void *desc, int size);
134 void (*write_idt_entry)(gate_desc *,
135 int entrynum, const gate_desc *gate);
136 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
137 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
139 void (*load_sp0)(unsigned long sp0);
141 void (*set_iopl_mask)(unsigned mask);
143 void (*wbinvd)(void);
144 void (*io_delay)(void);
146 /* cpuid emulation, mostly so that caps bits can be disabled */
147 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
148 unsigned int *ecx, unsigned int *edx);
150 /* Unsafe MSR operations. These will warn or panic on failure. */
151 u64 (*read_msr)(unsigned int msr);
152 void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
155 * Safe MSR operations.
156 * read sets err to 0 or -EIO. write returns 0 or -EIO.
158 u64 (*read_msr_safe)(unsigned int msr, int *err);
159 int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
161 u64 (*read_pmc)(int counter);
164 * Switch to usermode gs and return to 64-bit usermode using
165 * sysret. Only used in 64-bit kernels to return to 64-bit
166 * processes. Usermode register state, including %rsp, must
167 * already be restored.
169 void (*usergs_sysret64)(void);
171 /* Normal iret. Jump to this with the standard iret stack
175 void (*swapgs)(void);
177 void (*start_context_switch)(struct task_struct *prev);
178 void (*end_context_switch)(struct task_struct *next);
179 } __no_randomize_layout;
183 * Get/set interrupt state. save_fl and restore_fl are only
184 * expected to use X86_EFLAGS_IF; all other bits
185 * returned from save_fl are undefined, and may be ignored by
188 * NOTE: These functions callers expect the callee to preserve
189 * more registers than the standard C calling convention.
191 struct paravirt_callee_save save_fl;
192 struct paravirt_callee_save restore_fl;
193 struct paravirt_callee_save irq_disable;
194 struct paravirt_callee_save irq_enable;
196 void (*safe_halt)(void);
199 } __no_randomize_layout;
202 unsigned long (*read_cr2)(void);
203 void (*write_cr2)(unsigned long);
205 unsigned long (*read_cr3)(void);
206 void (*write_cr3)(unsigned long);
209 * Hooks for intercepting the creation/use/destruction of an
212 void (*activate_mm)(struct mm_struct *prev,
213 struct mm_struct *next);
214 void (*dup_mmap)(struct mm_struct *oldmm,
215 struct mm_struct *mm);
216 void (*exit_mmap)(struct mm_struct *mm);
220 void (*flush_tlb_user)(void);
221 void (*flush_tlb_kernel)(void);
222 void (*flush_tlb_one_user)(unsigned long addr);
223 void (*flush_tlb_others)(const struct cpumask *cpus,
224 const struct flush_tlb_info *info);
226 void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
228 /* Hooks for allocating and freeing a pagetable top-level */
229 int (*pgd_alloc)(struct mm_struct *mm);
230 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
233 * Hooks for allocating/releasing pagetable pages when they're
234 * attached to a pagetable
236 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
237 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
238 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
239 void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
240 void (*release_pte)(unsigned long pfn);
241 void (*release_pmd)(unsigned long pfn);
242 void (*release_pud)(unsigned long pfn);
243 void (*release_p4d)(unsigned long pfn);
245 /* Pagetable manipulation functions */
246 void (*set_pte)(pte_t *ptep, pte_t pteval);
247 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
248 pte_t *ptep, pte_t pteval);
249 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
251 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
253 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
254 pte_t *ptep, pte_t pte);
256 struct paravirt_callee_save pte_val;
257 struct paravirt_callee_save make_pte;
259 struct paravirt_callee_save pgd_val;
260 struct paravirt_callee_save make_pgd;
262 #if CONFIG_PGTABLE_LEVELS >= 3
263 #ifdef CONFIG_X86_PAE
264 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
265 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
267 void (*pmd_clear)(pmd_t *pmdp);
269 #endif /* CONFIG_X86_PAE */
271 void (*set_pud)(pud_t *pudp, pud_t pudval);
273 struct paravirt_callee_save pmd_val;
274 struct paravirt_callee_save make_pmd;
276 #if CONFIG_PGTABLE_LEVELS >= 4
277 struct paravirt_callee_save pud_val;
278 struct paravirt_callee_save make_pud;
280 void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval);
282 #if CONFIG_PGTABLE_LEVELS >= 5
283 struct paravirt_callee_save p4d_val;
284 struct paravirt_callee_save make_p4d;
286 void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
287 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
289 #endif /* CONFIG_PGTABLE_LEVELS >= 4 */
291 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
293 struct pv_lazy_ops lazy_mode;
297 /* Sometimes the physical address is a pfn, and sometimes its
298 an mfn. We can tell which is which from the index. */
299 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
300 phys_addr_t phys, pgprot_t flags);
301 } __no_randomize_layout;
303 struct arch_spinlock;
305 #include <asm/spinlock_types.h>
311 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
312 struct paravirt_callee_save queued_spin_unlock;
314 void (*wait)(u8 *ptr, u8 val);
315 void (*kick)(int cpu);
317 struct paravirt_callee_save vcpu_is_preempted;
318 } __no_randomize_layout;
320 /* This contains all the paravirt structures: we get a convenient
321 * number for each function using the offset which we use to indicate
323 struct paravirt_patch_template {
324 struct pv_init_ops pv_init_ops;
325 struct pv_time_ops pv_time_ops;
326 struct pv_cpu_ops pv_cpu_ops;
327 struct pv_irq_ops pv_irq_ops;
328 struct pv_mmu_ops pv_mmu_ops;
329 struct pv_lock_ops pv_lock_ops;
330 } __no_randomize_layout;
332 extern struct pv_info pv_info;
333 extern struct pv_init_ops pv_init_ops;
334 extern struct pv_time_ops pv_time_ops;
335 extern struct pv_cpu_ops pv_cpu_ops;
336 extern struct pv_irq_ops pv_irq_ops;
337 extern struct pv_mmu_ops pv_mmu_ops;
338 extern struct pv_lock_ops pv_lock_ops;
340 #define PARAVIRT_PATCH(x) \
341 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
343 #define paravirt_type(op) \
344 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
345 [paravirt_opptr] "i" (&(op))
346 #define paravirt_clobber(clobber) \
347 [paravirt_clobber] "i" (clobber)
349 /* Generate patchable code, with the default asm parameters. */
350 #define paravirt_call \
351 "PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \
352 " clobber=\"%c[paravirt_clobber]\"" \
353 " pv_opptr=\"%c[paravirt_opptr]\";"
355 /* Simple instruction patching code. */
356 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
358 #define DEF_NATIVE(ops, name, code) \
359 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
360 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
362 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
363 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
364 unsigned paravirt_patch_call(void *insnbuf,
365 const void *target, u16 tgt_clobbers,
366 unsigned long addr, u16 site_clobbers,
368 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
369 unsigned long addr, unsigned len);
370 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
371 unsigned long addr, unsigned len);
373 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
374 const char *start, const char *end);
376 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
377 unsigned long addr, unsigned len);
379 int paravirt_disable_iospace(void);
382 * These macros are intended to wrap calls through one of the paravirt
383 * ops structs, so that they can be later identified and patched at
386 * Normally, a call to a pv_op function is a simple indirect call:
387 * (pv_op_struct.operations)(args...).
389 * Unfortunately, this is a relatively slow operation for modern CPUs,
390 * because it cannot necessarily determine what the destination
391 * address is. In this case, the address is a runtime constant, so at
392 * the very least we can patch the call to e a simple direct call, or
393 * ideally, patch an inline implementation into the callsite. (Direct
394 * calls are essentially free, because the call and return addresses
395 * are completely predictable.)
397 * For i386, these macros rely on the standard gcc "regparm(3)" calling
398 * convention, in which the first three arguments are placed in %eax,
399 * %edx, %ecx (in that order), and the remaining arguments are placed
400 * on the stack. All caller-save registers (eax,edx,ecx) are expected
401 * to be modified (either clobbered or used for return values).
402 * X86_64, on the other hand, already specifies a register-based calling
403 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
404 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
405 * special handling for dealing with 4 arguments, unlike i386.
406 * However, x86_64 also have to clobber all caller saved registers, which
407 * unfortunately, are quite a bit (r8 - r11)
409 * The call instruction itself is marked by placing its start address
410 * and size into the .parainstructions section, so that
411 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
412 * appropriate patching under the control of the backend pv_init_ops
415 * Unfortunately there's no way to get gcc to generate the args setup
416 * for the call, and then allow the call itself to be generated by an
417 * inline asm. Because of this, we must do the complete arg setup and
418 * return value handling from within these macros. This is fairly
421 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
422 * It could be extended to more arguments, but there would be little
423 * to be gained from that. For each number of arguments, there are
424 * the two VCALL and CALL variants for void and non-void functions.
426 * When there is a return value, the invoker of the macro must specify
427 * the return type. The macro then uses sizeof() on that type to
428 * determine whether its a 32 or 64 bit value, and places the return
429 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
430 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
431 * the return value size.
433 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
434 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
437 * Small structures are passed and returned in registers. The macro
438 * calling convention can't directly deal with this, so the wrapper
439 * functions must do this.
441 * These PVOP_* macros are only defined within this header. This
442 * means that all uses must be wrapped in inline functions. This also
443 * makes sure the incoming and outgoing types are always correct.
446 #define PVOP_VCALL_ARGS \
447 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
449 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
451 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
452 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
453 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
455 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
457 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
459 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
460 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
462 #define EXTRA_CLOBBERS
463 #define VEXTRA_CLOBBERS
464 #else /* CONFIG_X86_64 */
465 /* [re]ax isn't an arg, but the return val */
466 #define PVOP_VCALL_ARGS \
467 unsigned long __edi = __edi, __esi = __esi, \
468 __edx = __edx, __ecx = __ecx, __eax = __eax;
470 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
472 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
473 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
474 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
475 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
477 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
478 "=S" (__esi), "=d" (__edx), \
480 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
482 /* void functions are still allowed [re]ax for scratch */
483 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
484 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
486 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
487 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
488 #endif /* CONFIG_X86_32 */
490 #ifdef CONFIG_PARAVIRT_DEBUG
491 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
493 #define PVOP_TEST_NULL(op) ((void)op)
496 #define PVOP_RETMASK(rettype) \
497 ({ unsigned long __mask = ~0UL; \
498 switch (sizeof(rettype)) { \
499 case 1: __mask = 0xffUL; break; \
500 case 2: __mask = 0xffffUL; break; \
501 case 4: __mask = 0xffffffffUL; break; \
508 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
513 PVOP_TEST_NULL(op); \
514 /* This is 32-bit specific, but is okay in 64-bit */ \
515 /* since this condition will never hold */ \
516 if (sizeof(rettype) > sizeof(unsigned long)) { \
520 : call_clbr, ASM_CALL_CONSTRAINT \
521 : paravirt_type(op), \
522 paravirt_clobber(clbr), \
524 : "memory", "cc" extra_clbr); \
525 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
530 : call_clbr, ASM_CALL_CONSTRAINT \
531 : paravirt_type(op), \
532 paravirt_clobber(clbr), \
534 : "memory", "cc" extra_clbr); \
535 __ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \
540 #define __PVOP_CALL(rettype, op, pre, post, ...) \
541 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
542 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
544 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
545 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
546 PVOP_CALLEE_CLOBBERS, , \
547 pre, post, ##__VA_ARGS__)
550 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
553 PVOP_TEST_NULL(op); \
557 : call_clbr, ASM_CALL_CONSTRAINT \
558 : paravirt_type(op), \
559 paravirt_clobber(clbr), \
561 : "memory", "cc" extra_clbr); \
564 #define __PVOP_VCALL(op, pre, post, ...) \
565 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
567 pre, post, ##__VA_ARGS__)
569 #define __PVOP_VCALLEESAVE(op, pre, post, ...) \
570 ____PVOP_VCALL(op.func, CLBR_RET_REG, \
571 PVOP_VCALLEE_CLOBBERS, , \
572 pre, post, ##__VA_ARGS__)
576 #define PVOP_CALL0(rettype, op) \
577 __PVOP_CALL(rettype, op, "", "")
578 #define PVOP_VCALL0(op) \
579 __PVOP_VCALL(op, "", "")
581 #define PVOP_CALLEE0(rettype, op) \
582 __PVOP_CALLEESAVE(rettype, op, "", "")
583 #define PVOP_VCALLEE0(op) \
584 __PVOP_VCALLEESAVE(op, "", "")
587 #define PVOP_CALL1(rettype, op, arg1) \
588 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
589 #define PVOP_VCALL1(op, arg1) \
590 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
592 #define PVOP_CALLEE1(rettype, op, arg1) \
593 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
594 #define PVOP_VCALLEE1(op, arg1) \
595 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
598 #define PVOP_CALL2(rettype, op, arg1, arg2) \
599 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
600 PVOP_CALL_ARG2(arg2))
601 #define PVOP_VCALL2(op, arg1, arg2) \
602 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
603 PVOP_CALL_ARG2(arg2))
605 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \
606 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
607 PVOP_CALL_ARG2(arg2))
608 #define PVOP_VCALLEE2(op, arg1, arg2) \
609 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
610 PVOP_CALL_ARG2(arg2))
613 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
614 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
615 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
616 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
617 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
618 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
620 /* This is the only difference in x86_64. We can make it much simpler */
622 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
623 __PVOP_CALL(rettype, op, \
624 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
625 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
626 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
627 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
629 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
630 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
631 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
633 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
634 __PVOP_CALL(rettype, op, "", "", \
635 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
636 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
637 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
638 __PVOP_VCALL(op, "", "", \
639 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
640 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
643 /* Lazy mode for batching updates / context switch */
644 enum paravirt_lazy_mode {
650 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
651 void paravirt_start_context_switch(struct task_struct *prev);
652 void paravirt_end_context_switch(struct task_struct *next);
654 void paravirt_enter_lazy_mmu(void);
655 void paravirt_leave_lazy_mmu(void);
656 void paravirt_flush_lazy_mmu(void);
658 void _paravirt_nop(void);
659 u32 _paravirt_ident_32(u32);
660 u64 _paravirt_ident_64(u64);
662 #define paravirt_nop ((void *)_paravirt_nop)
664 /* These all sit in the .parainstructions section to tell us what to patch. */
665 struct paravirt_patch_site {
666 u8 *instr; /* original instructions */
667 u8 instrtype; /* type of this instruction */
668 u8 len; /* length of original instruction */
669 u16 clobbers; /* what registers you may clobber */
672 extern struct paravirt_patch_site __parainstructions[],
673 __parainstructions_end[];
675 #else /* __ASSEMBLY__ */
678 * This generates an indirect call based on the operation type number.
679 * The type number, computed in PARAVIRT_PATCH, is derived from the
680 * offset into the paravirt_patch_template structure, and can therefore be
681 * freely converted back into a structure offset.
683 .macro PARAVIRT_CALL type:req clobber:req pv_opptr:req
684 771: ANNOTATE_RETPOLINE_SAFE
686 772: .pushsection .parainstructions,"a"
695 #endif /* __ASSEMBLY__ */
697 #endif /* _ASM_X86_PARAVIRT_TYPES_H */