1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_H
3 #define _ASM_X86_PARAVIRT_H
4 /* Various instructions on x86 need to be replaced for
5 * para-virtualization: those hooks are defined here. */
8 #include <asm/pgtable_types.h>
10 #include <asm/nospec-branch.h>
12 #include <asm/paravirt_types.h>
15 #include <linux/bug.h>
16 #include <linux/types.h>
17 #include <linux/cpumask.h>
18 #include <asm/frame.h>
20 static inline unsigned long long paravirt_sched_clock(void)
22 return PVOP_CALL0(unsigned long long, time.sched_clock);
26 extern struct static_key paravirt_steal_enabled;
27 extern struct static_key paravirt_steal_rq_enabled;
29 __visible void __native_queued_spin_unlock(struct qspinlock *lock);
30 bool pv_is_native_spin_unlock(void);
31 __visible bool __native_vcpu_is_preempted(long cpu);
32 bool pv_is_native_vcpu_is_preempted(void);
34 static inline u64 paravirt_steal_clock(int cpu)
36 return PVOP_CALL1(u64, time.steal_clock, cpu);
39 /* The paravirtualized I/O functions */
40 static inline void slow_down_io(void)
42 pv_ops.cpu.io_delay();
44 pv_ops.cpu.io_delay();
45 pv_ops.cpu.io_delay();
46 pv_ops.cpu.io_delay();
50 void native_flush_tlb_local(void);
51 void native_flush_tlb_global(void);
52 void native_flush_tlb_one_user(unsigned long addr);
53 void native_flush_tlb_others(const struct cpumask *cpumask,
54 const struct flush_tlb_info *info);
56 static inline void __flush_tlb_local(void)
58 PVOP_VCALL0(mmu.flush_tlb_user);
61 static inline void __flush_tlb_global(void)
63 PVOP_VCALL0(mmu.flush_tlb_kernel);
66 static inline void __flush_tlb_one_user(unsigned long addr)
68 PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
71 static inline void __flush_tlb_others(const struct cpumask *cpumask,
72 const struct flush_tlb_info *info)
74 PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
77 static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
79 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
82 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
84 PVOP_VCALL1(mmu.exit_mmap, mm);
87 #ifdef CONFIG_PARAVIRT_XXL
88 static inline void load_sp0(unsigned long sp0)
90 PVOP_VCALL1(cpu.load_sp0, sp0);
93 /* The paravirtualized CPUID instruction. */
94 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
95 unsigned int *ecx, unsigned int *edx)
97 PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
101 * These special macros can be used to get or set a debugging register
103 static inline unsigned long paravirt_get_debugreg(int reg)
105 return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
107 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
108 static inline void set_debugreg(unsigned long val, int reg)
110 PVOP_VCALL2(cpu.set_debugreg, reg, val);
113 static inline unsigned long read_cr0(void)
115 return PVOP_CALL0(unsigned long, cpu.read_cr0);
118 static inline void write_cr0(unsigned long x)
120 PVOP_VCALL1(cpu.write_cr0, x);
123 static inline unsigned long read_cr2(void)
125 return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
128 static inline void write_cr2(unsigned long x)
130 PVOP_VCALL1(mmu.write_cr2, x);
133 static inline unsigned long __read_cr3(void)
135 return PVOP_CALL0(unsigned long, mmu.read_cr3);
138 static inline void write_cr3(unsigned long x)
140 PVOP_VCALL1(mmu.write_cr3, x);
143 static inline void __write_cr4(unsigned long x)
145 PVOP_VCALL1(cpu.write_cr4, x);
148 static inline void arch_safe_halt(void)
150 PVOP_VCALL0(irq.safe_halt);
153 static inline void halt(void)
155 PVOP_VCALL0(irq.halt);
158 static inline void wbinvd(void)
160 PVOP_VCALL0(cpu.wbinvd);
163 #define get_kernel_rpl() (pv_info.kernel_rpl)
165 static inline u64 paravirt_read_msr(unsigned msr)
167 return PVOP_CALL1(u64, cpu.read_msr, msr);
170 static inline void paravirt_write_msr(unsigned msr,
171 unsigned low, unsigned high)
173 PVOP_VCALL3(cpu.write_msr, msr, low, high);
176 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
178 return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
181 static inline int paravirt_write_msr_safe(unsigned msr,
182 unsigned low, unsigned high)
184 return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
187 #define rdmsr(msr, val1, val2) \
189 u64 _l = paravirt_read_msr(msr); \
194 #define wrmsr(msr, val1, val2) \
196 paravirt_write_msr(msr, val1, val2); \
199 #define rdmsrl(msr, val) \
201 val = paravirt_read_msr(msr); \
204 static inline void wrmsrl(unsigned msr, u64 val)
206 wrmsr(msr, (u32)val, (u32)(val>>32));
209 #define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
211 /* rdmsr with exception handling */
212 #define rdmsr_safe(msr, a, b) \
215 u64 _l = paravirt_read_msr_safe(msr, &_err); \
221 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
225 *p = paravirt_read_msr_safe(msr, &err);
229 static inline unsigned long long paravirt_read_pmc(int counter)
231 return PVOP_CALL1(u64, cpu.read_pmc, counter);
234 #define rdpmc(counter, low, high) \
236 u64 _l = paravirt_read_pmc(counter); \
241 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
243 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
245 PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
248 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
250 PVOP_VCALL2(cpu.free_ldt, ldt, entries);
253 static inline void load_TR_desc(void)
255 PVOP_VCALL0(cpu.load_tr_desc);
257 static inline void load_gdt(const struct desc_ptr *dtr)
259 PVOP_VCALL1(cpu.load_gdt, dtr);
261 static inline void load_idt(const struct desc_ptr *dtr)
263 PVOP_VCALL1(cpu.load_idt, dtr);
265 static inline void set_ldt(const void *addr, unsigned entries)
267 PVOP_VCALL2(cpu.set_ldt, addr, entries);
269 static inline unsigned long paravirt_store_tr(void)
271 return PVOP_CALL0(unsigned long, cpu.store_tr);
274 #define store_tr(tr) ((tr) = paravirt_store_tr())
275 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
277 PVOP_VCALL2(cpu.load_tls, t, cpu);
281 static inline void load_gs_index(unsigned int gs)
283 PVOP_VCALL1(cpu.load_gs_index, gs);
287 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
290 PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
293 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
294 void *desc, int type)
296 PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
299 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
301 PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
304 #ifdef CONFIG_X86_IOPL_IOPERM
305 static inline void tss_invalidate_io_bitmap(void)
307 PVOP_VCALL0(cpu.invalidate_io_bitmap);
310 static inline void tss_update_io_bitmap(void)
312 PVOP_VCALL0(cpu.update_io_bitmap);
316 static inline void paravirt_activate_mm(struct mm_struct *prev,
317 struct mm_struct *next)
319 PVOP_VCALL2(mmu.activate_mm, prev, next);
322 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
323 struct mm_struct *mm)
325 PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
328 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
330 return PVOP_CALL1(int, mmu.pgd_alloc, mm);
333 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
335 PVOP_VCALL2(mmu.pgd_free, mm, pgd);
338 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
340 PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
342 static inline void paravirt_release_pte(unsigned long pfn)
344 PVOP_VCALL1(mmu.release_pte, pfn);
347 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
349 PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
352 static inline void paravirt_release_pmd(unsigned long pfn)
354 PVOP_VCALL1(mmu.release_pmd, pfn);
357 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
359 PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
361 static inline void paravirt_release_pud(unsigned long pfn)
363 PVOP_VCALL1(mmu.release_pud, pfn);
366 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
368 PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
371 static inline void paravirt_release_p4d(unsigned long pfn)
373 PVOP_VCALL1(mmu.release_p4d, pfn);
376 static inline pte_t __pte(pteval_t val)
380 if (sizeof(pteval_t) > sizeof(long))
381 ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
383 ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
385 return (pte_t) { .pte = ret };
388 static inline pteval_t pte_val(pte_t pte)
392 if (sizeof(pteval_t) > sizeof(long))
393 ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
394 pte.pte, (u64)pte.pte >> 32);
396 ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
401 static inline pgd_t __pgd(pgdval_t val)
405 if (sizeof(pgdval_t) > sizeof(long))
406 ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
408 ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
410 return (pgd_t) { ret };
413 static inline pgdval_t pgd_val(pgd_t pgd)
417 if (sizeof(pgdval_t) > sizeof(long))
418 ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
419 pgd.pgd, (u64)pgd.pgd >> 32);
421 ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
426 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
427 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
432 ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
434 return (pte_t) { .pte = ret };
437 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
438 pte_t *ptep, pte_t old_pte, pte_t pte)
441 if (sizeof(pteval_t) > sizeof(long))
443 pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
445 PVOP_VCALL4(mmu.ptep_modify_prot_commit,
446 vma, addr, ptep, pte.pte);
449 static inline void set_pte(pte_t *ptep, pte_t pte)
451 if (sizeof(pteval_t) > sizeof(long))
452 PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
454 PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
457 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
458 pte_t *ptep, pte_t pte)
460 if (sizeof(pteval_t) > sizeof(long))
462 pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
464 PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
467 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
469 pmdval_t val = native_pmd_val(pmd);
471 if (sizeof(pmdval_t) > sizeof(long))
472 PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
474 PVOP_VCALL2(mmu.set_pmd, pmdp, val);
477 #if CONFIG_PGTABLE_LEVELS >= 3
478 static inline pmd_t __pmd(pmdval_t val)
482 if (sizeof(pmdval_t) > sizeof(long))
483 ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
485 ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
487 return (pmd_t) { ret };
490 static inline pmdval_t pmd_val(pmd_t pmd)
494 if (sizeof(pmdval_t) > sizeof(long))
495 ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
496 pmd.pmd, (u64)pmd.pmd >> 32);
498 ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
503 static inline void set_pud(pud_t *pudp, pud_t pud)
505 pudval_t val = native_pud_val(pud);
507 if (sizeof(pudval_t) > sizeof(long))
508 PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
510 PVOP_VCALL2(mmu.set_pud, pudp, val);
512 #if CONFIG_PGTABLE_LEVELS >= 4
513 static inline pud_t __pud(pudval_t val)
517 ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
519 return (pud_t) { ret };
522 static inline pudval_t pud_val(pud_t pud)
524 return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
527 static inline void pud_clear(pud_t *pudp)
529 set_pud(pudp, __pud(0));
532 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
534 p4dval_t val = native_p4d_val(p4d);
536 PVOP_VCALL2(mmu.set_p4d, p4dp, val);
539 #if CONFIG_PGTABLE_LEVELS >= 5
541 static inline p4d_t __p4d(p4dval_t val)
543 p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
545 return (p4d_t) { ret };
548 static inline p4dval_t p4d_val(p4d_t p4d)
550 return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
553 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
555 PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
558 #define set_pgd(pgdp, pgdval) do { \
559 if (pgtable_l5_enabled()) \
560 __set_pgd(pgdp, pgdval); \
562 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \
565 #define pgd_clear(pgdp) do { \
566 if (pgtable_l5_enabled()) \
567 set_pgd(pgdp, __pgd(0)); \
570 #endif /* CONFIG_PGTABLE_LEVELS == 5 */
572 static inline void p4d_clear(p4d_t *p4dp)
574 set_p4d(p4dp, __p4d(0));
577 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
579 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
581 #ifdef CONFIG_X86_PAE
582 /* Special-case pte-setting operations for PAE, which can't update a
583 64-bit pte atomically */
584 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
586 PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
589 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
592 PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
595 static inline void pmd_clear(pmd_t *pmdp)
597 PVOP_VCALL1(mmu.pmd_clear, pmdp);
599 #else /* !CONFIG_X86_PAE */
600 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
605 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
608 set_pte_at(mm, addr, ptep, __pte(0));
611 static inline void pmd_clear(pmd_t *pmdp)
613 set_pmd(pmdp, __pmd(0));
615 #endif /* CONFIG_X86_PAE */
617 #define __HAVE_ARCH_START_CONTEXT_SWITCH
618 static inline void arch_start_context_switch(struct task_struct *prev)
620 PVOP_VCALL1(cpu.start_context_switch, prev);
623 static inline void arch_end_context_switch(struct task_struct *next)
625 PVOP_VCALL1(cpu.end_context_switch, next);
628 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
629 static inline void arch_enter_lazy_mmu_mode(void)
631 PVOP_VCALL0(mmu.lazy_mode.enter);
634 static inline void arch_leave_lazy_mmu_mode(void)
636 PVOP_VCALL0(mmu.lazy_mode.leave);
639 static inline void arch_flush_lazy_mmu_mode(void)
641 PVOP_VCALL0(mmu.lazy_mode.flush);
644 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
645 phys_addr_t phys, pgprot_t flags)
647 pv_ops.mmu.set_fixmap(idx, phys, flags);
651 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
653 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
656 PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
659 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
661 PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
664 static __always_inline void pv_wait(u8 *ptr, u8 val)
666 PVOP_VCALL2(lock.wait, ptr, val);
669 static __always_inline void pv_kick(int cpu)
671 PVOP_VCALL1(lock.kick, cpu);
674 static __always_inline bool pv_vcpu_is_preempted(long cpu)
676 return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
679 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
680 bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
682 #endif /* SMP && PARAVIRT_SPINLOCKS */
685 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
686 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
688 /* save and restore all caller-save registers, except return value */
689 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
690 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
692 #define PV_FLAGS_ARG "0"
693 #define PV_EXTRA_CLOBBERS
694 #define PV_VEXTRA_CLOBBERS
696 /* save and restore all caller-save registers, except return value */
697 #define PV_SAVE_ALL_CALLER_REGS \
706 #define PV_RESTORE_ALL_CALLER_REGS \
716 /* We save some registers, but all of them, that's too much. We clobber all
717 * caller saved registers but the argument parameter */
718 #define PV_SAVE_REGS "pushq %%rdi;"
719 #define PV_RESTORE_REGS "popq %%rdi;"
720 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
721 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
722 #define PV_FLAGS_ARG "D"
726 * Generate a thunk around a function which saves all caller-save
727 * registers except for the return value. This allows C functions to
728 * be called from assembler code where fewer than normal registers are
729 * available. It may also help code generation around calls from C
730 * code if the common case doesn't use many registers.
732 * When a callee is wrapped in a thunk, the caller can assume that all
733 * arg regs and all scratch registers are preserved across the
734 * call. The return value in rax/eax will not be saved, even for void
737 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
738 #define PV_CALLEE_SAVE_REGS_THUNK(func) \
739 extern typeof(func) __raw_callee_save_##func; \
741 asm(".pushsection .text;" \
742 ".globl " PV_THUNK_NAME(func) ";" \
743 ".type " PV_THUNK_NAME(func) ", @function;" \
744 PV_THUNK_NAME(func) ":" \
746 PV_SAVE_ALL_CALLER_REGS \
748 PV_RESTORE_ALL_CALLER_REGS \
751 ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
754 /* Get a reference to a callee-save function */
755 #define PV_CALLEE_SAVE(func) \
756 ((struct paravirt_callee_save) { __raw_callee_save_##func })
758 /* Promise that "func" already uses the right calling convention */
759 #define __PV_IS_CALLEE_SAVE(func) \
760 ((struct paravirt_callee_save) { func })
762 #ifdef CONFIG_PARAVIRT_XXL
763 static inline notrace unsigned long arch_local_save_flags(void)
765 return PVOP_CALLEE0(unsigned long, irq.save_fl);
768 static inline notrace void arch_local_irq_restore(unsigned long f)
770 PVOP_VCALLEE1(irq.restore_fl, f);
773 static inline notrace void arch_local_irq_disable(void)
775 PVOP_VCALLEE0(irq.irq_disable);
778 static inline notrace void arch_local_irq_enable(void)
780 PVOP_VCALLEE0(irq.irq_enable);
783 static inline notrace unsigned long arch_local_irq_save(void)
787 f = arch_local_save_flags();
788 arch_local_irq_disable();
794 /* Make sure as little as possible of this mess escapes. */
809 extern void default_banner(void);
811 #else /* __ASSEMBLY__ */
813 #define _PVSITE(ptype, ops, word, algn) \
817 .pushsection .parainstructions,"a"; \
825 #define COND_PUSH(set, mask, reg) \
826 .if ((~(set)) & mask); push %reg; .endif
827 #define COND_POP(set, mask, reg) \
828 .if ((~(set)) & mask); pop %reg; .endif
832 #define PV_SAVE_REGS(set) \
833 COND_PUSH(set, CLBR_RAX, rax); \
834 COND_PUSH(set, CLBR_RCX, rcx); \
835 COND_PUSH(set, CLBR_RDX, rdx); \
836 COND_PUSH(set, CLBR_RSI, rsi); \
837 COND_PUSH(set, CLBR_RDI, rdi); \
838 COND_PUSH(set, CLBR_R8, r8); \
839 COND_PUSH(set, CLBR_R9, r9); \
840 COND_PUSH(set, CLBR_R10, r10); \
841 COND_PUSH(set, CLBR_R11, r11)
842 #define PV_RESTORE_REGS(set) \
843 COND_POP(set, CLBR_R11, r11); \
844 COND_POP(set, CLBR_R10, r10); \
845 COND_POP(set, CLBR_R9, r9); \
846 COND_POP(set, CLBR_R8, r8); \
847 COND_POP(set, CLBR_RDI, rdi); \
848 COND_POP(set, CLBR_RSI, rsi); \
849 COND_POP(set, CLBR_RDX, rdx); \
850 COND_POP(set, CLBR_RCX, rcx); \
851 COND_POP(set, CLBR_RAX, rax)
853 #define PARA_PATCH(off) ((off) / 8)
854 #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
855 #define PARA_INDIRECT(addr) *addr(%rip)
857 #define PV_SAVE_REGS(set) \
858 COND_PUSH(set, CLBR_EAX, eax); \
859 COND_PUSH(set, CLBR_EDI, edi); \
860 COND_PUSH(set, CLBR_ECX, ecx); \
861 COND_PUSH(set, CLBR_EDX, edx)
862 #define PV_RESTORE_REGS(set) \
863 COND_POP(set, CLBR_EDX, edx); \
864 COND_POP(set, CLBR_ECX, ecx); \
865 COND_POP(set, CLBR_EDI, edi); \
866 COND_POP(set, CLBR_EAX, eax)
868 #define PARA_PATCH(off) ((off) / 4)
869 #define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4)
870 #define PARA_INDIRECT(addr) *%cs:addr
873 #ifdef CONFIG_PARAVIRT_XXL
874 #define INTERRUPT_RETURN \
875 PARA_SITE(PARA_PATCH(PV_CPU_iret), \
876 ANNOTATE_RETPOLINE_SAFE; \
877 jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
879 #define DISABLE_INTERRUPTS(clobbers) \
880 PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
881 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
882 ANNOTATE_RETPOLINE_SAFE; \
883 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \
884 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
886 #define ENABLE_INTERRUPTS(clobbers) \
887 PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \
888 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
889 ANNOTATE_RETPOLINE_SAFE; \
890 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \
891 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
895 #ifdef CONFIG_PARAVIRT_XXL
897 * If swapgs is used while the userspace stack is still current,
898 * there's no way to call a pvop. The PV replacement *must* be
899 * inlined, or the swapgs instruction must be trapped and emulated.
901 #define SWAPGS_UNSAFE_STACK \
902 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
905 * Note: swapgs is very special, and in practise is either going to be
906 * implemented with a single "swapgs" instruction or something very
907 * special. Either way, we don't need to save any registers for
911 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \
912 ANNOTATE_RETPOLINE_SAFE; \
913 call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \
916 #define USERGS_SYSRET64 \
917 PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
918 ANNOTATE_RETPOLINE_SAFE; \
919 jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
921 #ifdef CONFIG_DEBUG_ENTRY
922 #define SAVE_FLAGS(clobbers) \
923 PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
924 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
925 ANNOTATE_RETPOLINE_SAFE; \
926 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
927 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
929 #endif /* CONFIG_PARAVIRT_XXL */
930 #endif /* CONFIG_X86_64 */
932 #ifdef CONFIG_PARAVIRT_XXL
934 #define GET_CR2_INTO_AX \
935 PARA_SITE(PARA_PATCH(PV_MMU_read_cr2), \
936 ANNOTATE_RETPOLINE_SAFE; \
937 call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); \
940 #endif /* CONFIG_PARAVIRT_XXL */
943 #endif /* __ASSEMBLY__ */
944 #else /* CONFIG_PARAVIRT */
945 # define default_banner x86_init_noop
946 #endif /* !CONFIG_PARAVIRT */
949 #ifndef CONFIG_PARAVIRT_XXL
950 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
951 struct mm_struct *mm)
956 #ifndef CONFIG_PARAVIRT
957 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
961 #endif /* __ASSEMBLY__ */
962 #endif /* _ASM_X86_PARAVIRT_H */