1 #ifndef _ASM_X86_MSHYPER_H
2 #define _ASM_X86_MSHYPER_H
4 #include <linux/types.h>
5 #include <linux/atomic.h>
8 #include <asm/hyperv.h>
11 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
12 * is set by CPUID(HVCPUID_VERSION_FEATURES).
14 enum hv_cpuid_function {
15 HVCPUID_VERSION_FEATURES = 0x00000001,
16 HVCPUID_VENDOR_MAXFUNCTION = 0x40000000,
17 HVCPUID_INTERFACE = 0x40000001,
20 * The remaining functions depend on the value of
23 HVCPUID_VERSION = 0x40000002,
24 HVCPUID_FEATURES = 0x40000003,
25 HVCPUID_ENLIGHTENMENT_INFO = 0x40000004,
26 HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005,
29 struct ms_hyperv_info {
37 extern struct ms_hyperv_info ms_hyperv;
40 * Declare the MSR used to setup pages used to communicate with the hypervisor.
42 union hv_x64_msr_hypercall_contents {
47 u64 guest_physical_address:52;
55 struct ms_hyperv_tsc_page {
56 volatile u32 tsc_sequence;
58 volatile u64 tsc_scale;
59 volatile s64 tsc_offset;
64 * The guest OS needs to register the guest ID with the hypervisor.
65 * The guest ID is a 64 bit entity and the structure of this ID is
66 * specified in the Hyper-V specification:
68 * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
70 * While the current guideline does not specify how Linux guest ID(s)
71 * need to be generated, our plan is to publish the guidelines for
72 * Linux and other guest operating systems that currently are hosted
73 * on Hyper-V. The implementation here conforms to this yet
74 * unpublished guidelines.
78 * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
79 * 62:56 - Os Type; Linux is 0x100
80 * 55:48 - Distro specific identification
81 * 47:16 - Linux kernel version number
82 * 15:0 - Distro specific identification
87 #define HV_LINUX_VENDOR_ID 0x8100
90 * Generate the guest ID based on the guideline described above.
93 static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
98 guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
99 guest_id |= (d_info1 << 48);
100 guest_id |= (kernel_version << 16);
107 /* Free the message slot and signal end-of-message if required */
108 static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
111 * On crash we're reading some other CPU's message page and we need
112 * to be careful: this other CPU may already had cleared the header
113 * and the host may already had delivered some other message there.
114 * In case we blindly write msg->header.message_type we're going
115 * to lose it. We can still lose a message of the same type but
116 * we count on the fact that there can only be one
117 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
120 if (cmpxchg(&msg->header.message_type, old_msg_type,
121 HVMSG_NONE) != old_msg_type)
125 * Make sure the write to MessageType (ie set to
126 * HVMSG_NONE) happens before we read the
127 * MessagePending and EOMing. Otherwise, the EOMing
128 * will not deliver any more messages since there is
133 if (msg->header.message_flags.msg_pending) {
135 * This will cause message queue rescan to
136 * possibly deliver another msg from the
139 wrmsrl(HV_X64_MSR_EOM, 0);
143 #define hv_init_timer(timer, tick) wrmsrl(timer, tick)
144 #define hv_init_timer_config(config, val) wrmsrl(config, val)
146 #define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
147 #define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
149 #define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
150 #define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
152 #define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
153 #define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
155 #define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
157 #define hv_get_synint_state(int_num, val) rdmsrl(int_num, val)
158 #define hv_set_synint_state(int_num, val) wrmsrl(int_num, val)
160 void hyperv_callback_vector(void);
161 #ifdef CONFIG_TRACING
162 #define trace_hyperv_callback_vector hyperv_callback_vector
164 void hyperv_vector_handler(struct pt_regs *regs);
165 void hv_setup_vmbus_irq(void (*handler)(void));
166 void hv_remove_vmbus_irq(void);
168 void hv_setup_kexec_handler(void (*handler)(void));
169 void hv_remove_kexec_handler(void);
170 void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
171 void hv_remove_crash_handler(void);
173 #if IS_ENABLED(CONFIG_HYPERV)
174 extern struct clocksource *hyperv_cs;
175 extern void *hv_hypercall_pg;
177 static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
179 u64 input_address = input ? virt_to_phys(input) : 0;
180 u64 output_address = output ? virt_to_phys(output) : 0;
184 if (!hv_hypercall_pg)
187 __asm__ __volatile__("mov %4, %%r8\n"
189 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
190 "+c" (control), "+d" (input_address)
191 : "r" (output_address), "m" (hv_hypercall_pg)
192 : "cc", "memory", "r8", "r9", "r10", "r11");
194 u32 input_address_hi = upper_32_bits(input_address);
195 u32 input_address_lo = lower_32_bits(input_address);
196 u32 output_address_hi = upper_32_bits(output_address);
197 u32 output_address_lo = lower_32_bits(output_address);
199 if (!hv_hypercall_pg)
202 __asm__ __volatile__("call *%7"
204 "+c" (input_address_lo), ASM_CALL_CONSTRAINT
206 "b" (input_address_hi),
207 "D"(output_address_hi), "S"(output_address_lo),
208 "m" (hv_hypercall_pg)
214 #define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
215 #define HV_HYPERCALL_FAST_BIT BIT(16)
216 #define HV_HYPERCALL_VARHEAD_OFFSET 17
217 #define HV_HYPERCALL_REP_COMP_OFFSET 32
218 #define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
219 #define HV_HYPERCALL_REP_START_OFFSET 48
220 #define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
222 /* Fast hypercall with 8 bytes of input and no output */
223 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
225 u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
229 __asm__ __volatile__("call *%4"
230 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
231 "+c" (control), "+d" (input1)
232 : "m" (hv_hypercall_pg)
233 : "cc", "r8", "r9", "r10", "r11");
237 u32 input1_hi = upper_32_bits(input1);
238 u32 input1_lo = lower_32_bits(input1);
240 __asm__ __volatile__ ("call *%5"
246 "m" (hv_hypercall_pg)
247 : "cc", "edi", "esi");
254 * Rep hypercalls. Callers of this functions are supposed to ensure that
255 * rep_count and varhead_size comply with Hyper-V hypercall definition.
257 static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
258 void *input, void *output)
264 control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
265 control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
268 status = hv_do_hypercall(control, input, output);
269 if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
272 /* Bits 32-43 of status have 'Reps completed' data. */
273 rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
274 HV_HYPERCALL_REP_COMP_OFFSET;
276 control &= ~HV_HYPERCALL_REP_START_MASK;
277 control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
279 touch_nmi_watchdog();
280 } while (rep_comp < rep_count);
286 * Hypervisor's notion of virtual processor ID is different from
287 * Linux' notion of CPU ID. This information can only be retrieved
288 * in the context of the calling CPU. Setup a map for easy access
289 * to this information.
291 extern u32 *hv_vp_index;
294 * hv_cpu_number_to_vp_number() - Map CPU to VP.
295 * @cpu_number: CPU number in Linux terms
297 * This function returns the mapping between the Linux processor
298 * number and the hypervisor's virtual processor number, useful
299 * in making hypercalls and such that talk about specific
302 * Return: Virtual processor number in Hyper-V terms
304 static inline int hv_cpu_number_to_vp_number(int cpu_number)
306 return hv_vp_index[cpu_number];
309 void hyperv_init(void);
310 void hyperv_setup_mmu_ops(void);
311 void hyper_alloc_mmu(void);
312 void hyperv_report_panic(struct pt_regs *regs);
313 bool hv_is_hypercall_page_setup(void);
314 void hyperv_cleanup(void);
315 #else /* CONFIG_HYPERV */
316 static inline void hyperv_init(void) {}
317 static inline bool hv_is_hypercall_page_setup(void) { return false; }
318 static inline void hyperv_cleanup(void) {}
319 static inline void hyperv_setup_mmu_ops(void) {}
320 #endif /* CONFIG_HYPERV */
322 #ifdef CONFIG_HYPERV_TSCPAGE
323 struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
324 static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
326 u64 scale, offset, cur_tsc;
330 * The protocol for reading Hyper-V TSC page is specified in Hypervisor
331 * Top-Level Functional Specification ver. 3.0 and above. To get the
332 * reference time we must do the following:
333 * - READ ReferenceTscSequence
334 * A special '0' value indicates the time source is unreliable and we
335 * need to use something else. The currently published specification
336 * versions (up to 4.0b) contain a mistake and wrongly claim '-1'
337 * instead of '0' as the special value, see commit c35b82ef0294.
339 * ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset
340 * - READ ReferenceTscSequence again. In case its value has changed
341 * since our first reading we need to discard ReferenceTime and repeat
342 * the whole sequence as the hypervisor was updating the page in
346 sequence = READ_ONCE(tsc_pg->tsc_sequence);
350 * Make sure we read sequence before we read other values from
355 scale = READ_ONCE(tsc_pg->tsc_scale);
356 offset = READ_ONCE(tsc_pg->tsc_offset);
357 cur_tsc = rdtsc_ordered();
360 * Make sure we read sequence after we read all other values
365 } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
367 return mul_u64_u64_shr(cur_tsc, scale, 64) + offset;
371 static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)