KVM: vmx, svm: clean up mass updates to regs_avail/regs_dirty bits
[linux-2.6-microblaze.git] / arch / x86 / kvm / kvm_cache_regs.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4
5 #include <linux/kvm_host.h>
6
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS                               \
9         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
10          | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11
12 #define X86_CR0_PDPTR_BITS    (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
13 #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
14 #define X86_CR4_PDPTR_BITS    (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
15
16 static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
17
18 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                                 \
19 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
20 {                                                                             \
21         return vcpu->arch.regs[VCPU_REGS_##uname];                            \
22 }                                                                             \
23 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,        \
24                                                 unsigned long val)            \
25 {                                                                             \
26         vcpu->arch.regs[VCPU_REGS_##uname] = val;                             \
27 }
28 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
29 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
30 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
31 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
32 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
33 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
34 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
35 #ifdef CONFIG_X86_64
36 BUILD_KVM_GPR_ACCESSORS(r8,  R8)
37 BUILD_KVM_GPR_ACCESSORS(r9,  R9)
38 BUILD_KVM_GPR_ACCESSORS(r10, R10)
39 BUILD_KVM_GPR_ACCESSORS(r11, R11)
40 BUILD_KVM_GPR_ACCESSORS(r12, R12)
41 BUILD_KVM_GPR_ACCESSORS(r13, R13)
42 BUILD_KVM_GPR_ACCESSORS(r14, R14)
43 BUILD_KVM_GPR_ACCESSORS(r15, R15)
44 #endif
45
46 /*
47  * avail  dirty
48  * 0      0       register in VMCS/VMCB
49  * 0      1       *INVALID*
50  * 1      0       register in vcpu->arch
51  * 1      1       register in vcpu->arch, needs to be stored back
52  */
53 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
54                                              enum kvm_reg reg)
55 {
56         return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
57 }
58
59 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
60                                          enum kvm_reg reg)
61 {
62         return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
63 }
64
65 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
66                                                enum kvm_reg reg)
67 {
68         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
69 }
70
71 static inline void kvm_register_clear_available(struct kvm_vcpu *vcpu,
72                                                enum kvm_reg reg)
73 {
74         __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
75         __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
76 }
77
78 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
79                                            enum kvm_reg reg)
80 {
81         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
82         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
83 }
84
85 /*
86  * The "raw" register helpers are only for cases where the full 64 bits of a
87  * register are read/written irrespective of current vCPU mode.  In other words,
88  * odds are good you shouldn't be using the raw variants.
89  */
90 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
91 {
92         if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
93                 return 0;
94
95         if (!kvm_register_is_available(vcpu, reg))
96                 static_call(kvm_x86_cache_reg)(vcpu, reg);
97
98         return vcpu->arch.regs[reg];
99 }
100
101 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
102                                           unsigned long val)
103 {
104         if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
105                 return;
106
107         vcpu->arch.regs[reg] = val;
108         kvm_register_mark_dirty(vcpu, reg);
109 }
110
111 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
112 {
113         return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
114 }
115
116 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
117 {
118         kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
119 }
120
121 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
122 {
123         return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
124 }
125
126 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
127 {
128         kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
129 }
130
131 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
132 {
133         might_sleep();  /* on svm */
134
135         if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
136                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
137
138         return vcpu->arch.walk_mmu->pdptrs[index];
139 }
140
141 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
142 {
143         vcpu->arch.walk_mmu->pdptrs[index] = value;
144 }
145
146 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
147 {
148         ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
149         if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
150             !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
151                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
152         return vcpu->arch.cr0 & mask;
153 }
154
155 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
156 {
157         return kvm_read_cr0_bits(vcpu, ~0UL);
158 }
159
160 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
161 {
162         ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
163         if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
164             !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
165                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
166         return vcpu->arch.cr4 & mask;
167 }
168
169 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
170 {
171         if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
172                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
173         return vcpu->arch.cr3;
174 }
175
176 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
177 {
178         return kvm_read_cr4_bits(vcpu, ~0UL);
179 }
180
181 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
182 {
183         return (kvm_rax_read(vcpu) & -1u)
184                 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
185 }
186
187 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
188 {
189         vcpu->arch.hflags |= HF_GUEST_MASK;
190         vcpu->stat.guest_mode = 1;
191 }
192
193 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
194 {
195         vcpu->arch.hflags &= ~HF_GUEST_MASK;
196
197         if (vcpu->arch.load_eoi_exitmap_pending) {
198                 vcpu->arch.load_eoi_exitmap_pending = false;
199                 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
200         }
201
202         vcpu->stat.guest_mode = 0;
203 }
204
205 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
206 {
207         return vcpu->arch.hflags & HF_GUEST_MASK;
208 }
209
210 static inline bool is_smm(struct kvm_vcpu *vcpu)
211 {
212         return vcpu->arch.hflags & HF_SMM_MASK;
213 }
214
215 #endif