KVM: X86: Remove kvm_register_clear_available()
[linux-2.6-microblaze.git] / arch / x86 / kvm / kvm_cache_regs.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4
5 #include <linux/kvm_host.h>
6
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS                               \
9         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
10          | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11
12 #define X86_CR0_PDPTR_BITS    (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
13 #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
14 #define X86_CR4_PDPTR_BITS    (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
15
16 static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
17
18 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                                 \
19 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
20 {                                                                             \
21         return vcpu->arch.regs[VCPU_REGS_##uname];                            \
22 }                                                                             \
23 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,        \
24                                                 unsigned long val)            \
25 {                                                                             \
26         vcpu->arch.regs[VCPU_REGS_##uname] = val;                             \
27 }
28 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
29 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
30 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
31 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
32 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
33 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
34 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
35 #ifdef CONFIG_X86_64
36 BUILD_KVM_GPR_ACCESSORS(r8,  R8)
37 BUILD_KVM_GPR_ACCESSORS(r9,  R9)
38 BUILD_KVM_GPR_ACCESSORS(r10, R10)
39 BUILD_KVM_GPR_ACCESSORS(r11, R11)
40 BUILD_KVM_GPR_ACCESSORS(r12, R12)
41 BUILD_KVM_GPR_ACCESSORS(r13, R13)
42 BUILD_KVM_GPR_ACCESSORS(r14, R14)
43 BUILD_KVM_GPR_ACCESSORS(r15, R15)
44 #endif
45
46 /*
47  * avail  dirty
48  * 0      0       register in VMCS/VMCB
49  * 0      1       *INVALID*
50  * 1      0       register in vcpu->arch
51  * 1      1       register in vcpu->arch, needs to be stored back
52  */
53 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
54                                              enum kvm_reg reg)
55 {
56         return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
57 }
58
59 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
60                                          enum kvm_reg reg)
61 {
62         return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
63 }
64
65 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
66                                                enum kvm_reg reg)
67 {
68         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
69 }
70
71 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
72                                            enum kvm_reg reg)
73 {
74         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
75         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
76 }
77
78 /*
79  * The "raw" register helpers are only for cases where the full 64 bits of a
80  * register are read/written irrespective of current vCPU mode.  In other words,
81  * odds are good you shouldn't be using the raw variants.
82  */
83 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
84 {
85         if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
86                 return 0;
87
88         if (!kvm_register_is_available(vcpu, reg))
89                 static_call(kvm_x86_cache_reg)(vcpu, reg);
90
91         return vcpu->arch.regs[reg];
92 }
93
94 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
95                                           unsigned long val)
96 {
97         if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
98                 return;
99
100         vcpu->arch.regs[reg] = val;
101         kvm_register_mark_dirty(vcpu, reg);
102 }
103
104 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
105 {
106         return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
107 }
108
109 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
110 {
111         kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
112 }
113
114 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
115 {
116         return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
117 }
118
119 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
120 {
121         kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
122 }
123
124 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
125 {
126         might_sleep();  /* on svm */
127
128         if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
129                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
130
131         return vcpu->arch.walk_mmu->pdptrs[index];
132 }
133
134 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
135 {
136         vcpu->arch.walk_mmu->pdptrs[index] = value;
137 }
138
139 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
140 {
141         ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
142         if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
143             !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
144                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
145         return vcpu->arch.cr0 & mask;
146 }
147
148 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
149 {
150         return kvm_read_cr0_bits(vcpu, ~0UL);
151 }
152
153 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
154 {
155         ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
156         if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
157             !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
158                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
159         return vcpu->arch.cr4 & mask;
160 }
161
162 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
163 {
164         if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
165                 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
166         return vcpu->arch.cr3;
167 }
168
169 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
170 {
171         return kvm_read_cr4_bits(vcpu, ~0UL);
172 }
173
174 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
175 {
176         return (kvm_rax_read(vcpu) & -1u)
177                 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
178 }
179
180 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
181 {
182         vcpu->arch.hflags |= HF_GUEST_MASK;
183         vcpu->stat.guest_mode = 1;
184 }
185
186 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
187 {
188         vcpu->arch.hflags &= ~HF_GUEST_MASK;
189
190         if (vcpu->arch.load_eoi_exitmap_pending) {
191                 vcpu->arch.load_eoi_exitmap_pending = false;
192                 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
193         }
194
195         vcpu->stat.guest_mode = 0;
196 }
197
198 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
199 {
200         return vcpu->arch.hflags & HF_GUEST_MASK;
201 }
202
203 static inline bool is_smm(struct kvm_vcpu *vcpu)
204 {
205         return vcpu->arch.hflags & HF_SMM_MASK;
206 }
207
208 #endif