x86/sev: Move GHCB MSR protocol and NAE definitions in a common header
[linux-2.6-microblaze.git] / arch / x86 / kvm / kvm_cache_regs.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4
5 #include <linux/kvm_host.h>
6
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS                               \
9         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
10          | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11
12 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                                 \
13 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
14 {                                                                             \
15         return vcpu->arch.regs[VCPU_REGS_##uname];                            \
16 }                                                                             \
17 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,        \
18                                                 unsigned long val)            \
19 {                                                                             \
20         vcpu->arch.regs[VCPU_REGS_##uname] = val;                             \
21 }
22 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
23 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
24 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
25 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
26 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
27 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
28 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
29 #ifdef CONFIG_X86_64
30 BUILD_KVM_GPR_ACCESSORS(r8,  R8)
31 BUILD_KVM_GPR_ACCESSORS(r9,  R9)
32 BUILD_KVM_GPR_ACCESSORS(r10, R10)
33 BUILD_KVM_GPR_ACCESSORS(r11, R11)
34 BUILD_KVM_GPR_ACCESSORS(r12, R12)
35 BUILD_KVM_GPR_ACCESSORS(r13, R13)
36 BUILD_KVM_GPR_ACCESSORS(r14, R14)
37 BUILD_KVM_GPR_ACCESSORS(r15, R15)
38 #endif
39
40 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
41                                              enum kvm_reg reg)
42 {
43         return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
44 }
45
46 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
47                                          enum kvm_reg reg)
48 {
49         return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
50 }
51
52 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
53                                                enum kvm_reg reg)
54 {
55         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
56 }
57
58 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
59                                            enum kvm_reg reg)
60 {
61         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
62         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
63 }
64
65 /*
66  * The "raw" register helpers are only for cases where the full 64 bits of a
67  * register are read/written irrespective of current vCPU mode.  In other words,
68  * odds are good you shouldn't be using the raw variants.
69  */
70 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
71 {
72         if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
73                 return 0;
74
75         if (!kvm_register_is_available(vcpu, reg))
76                 static_call(kvm_x86_cache_reg)(vcpu, reg);
77
78         return vcpu->arch.regs[reg];
79 }
80
81 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
82                                           unsigned long val)
83 {
84         if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
85                 return;
86
87         vcpu->arch.regs[reg] = val;
88         kvm_register_mark_dirty(vcpu, reg);
89 }
90
91 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
92 {
93         return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
94 }
95
96 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
97 {
98         kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
99 }
100
101 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
102 {
103         return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
104 }
105
106 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
107 {
108         kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
109 }
110
111 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
112 {
113         might_sleep();  /* on svm */
114
115         if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
116                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
117
118         return vcpu->arch.walk_mmu->pdptrs[index];
119 }
120
121 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
122 {
123         ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
124         if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
125             !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
126                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
127         return vcpu->arch.cr0 & mask;
128 }
129
130 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
131 {
132         return kvm_read_cr0_bits(vcpu, ~0UL);
133 }
134
135 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
136 {
137         ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
138         if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
139             !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
140                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
141         return vcpu->arch.cr4 & mask;
142 }
143
144 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
145 {
146         if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
147                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
148         return vcpu->arch.cr3;
149 }
150
151 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
152 {
153         return kvm_read_cr4_bits(vcpu, ~0UL);
154 }
155
156 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
157 {
158         return (kvm_rax_read(vcpu) & -1u)
159                 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
160 }
161
162 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
163 {
164         vcpu->arch.hflags |= HF_GUEST_MASK;
165 }
166
167 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
168 {
169         vcpu->arch.hflags &= ~HF_GUEST_MASK;
170
171         if (vcpu->arch.load_eoi_exitmap_pending) {
172                 vcpu->arch.load_eoi_exitmap_pending = false;
173                 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
174         }
175 }
176
177 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
178 {
179         return vcpu->arch.hflags & HF_GUEST_MASK;
180 }
181
182 static inline bool is_smm(struct kvm_vcpu *vcpu)
183 {
184         return vcpu->arch.hflags & HF_SMM_MASK;
185 }
186
187 #endif