Handle TDX PV HLT hypercall and the interrupt status due to it.
TDX guest status is protected, KVM can't get the interrupt status
of TDX guest and it assumes interrupt is always allowed unless TDX
guest calls TDVMCALL with HLT, which passes the interrupt blocked flag.
If the guest halted with interrupt enabled, also query pending RVI by
checking bit 0 of TD_VCPU_STATE_DETAILS_NON_ARCH field via a seamcall.
Update vt_interrupt_allowed() for TDX based on interrupt blocked flag
passed by HLT TDVMCALL. Do not wakeup TD vCPU if interrupt is blocked
for VT-d PI.
For NMIs, KVM cannot determine the NMI blocking status for TDX guests,
so KVM always assumes NMIs are not blocked. In the unlikely scenario
where a guest invokes the PV HLT hypercall within an NMI handler, this
could result in a spurious wakeup. The guest should implement the PV
HLT hypercall within a loop if it truly requires no interruptions, since
NMI could be unblocked by an IRET due to an exception occurring before
the PV HLT is executed in the NMI handler.
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Co-developed-by: Binbin Wu <binbin.wu@linux.intel.com>
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Message-ID: <
20250227012021.
1778144-7-binbin.wu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static int vt_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
if (is_td_vcpu(vcpu))
- return true;
+ return tdx_interrupt_allowed(vcpu);
return vmx_interrupt_allowed(vcpu, for_injection);
}
return;
if (kvm_vcpu_is_blocking(vcpu) &&
- (is_td_vcpu(vcpu) || !vmx_interrupt_blocked(vcpu)))
+ ((is_td_vcpu(vcpu) && tdx_interrupt_allowed(vcpu)) ||
+ (!is_td_vcpu(vcpu) && !vmx_interrupt_blocked(vcpu))))
pi_enable_wakeup_handler(vcpu);
/*
local_irq_enable();
}
+bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ /*
+ * KVM can't get the interrupt status of TDX guest and it assumes
+ * interrupt is always allowed unless TDX guest calls TDVMCALL with HLT,
+ * which passes the interrupt blocked flag.
+ */
+ return vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT ||
+ !to_tdx(vcpu)->vp_enter_args.r12;
+}
+
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
- return pi_has_pending_interrupt(vcpu);
+ u64 vcpu_state_details;
+
+ if (pi_has_pending_interrupt(vcpu))
+ return true;
+
+ /*
+ * Only check RVI pending for HALTED case with IRQ enabled.
+ * For non-HLT cases, KVM doesn't care about STI/SS shadows. And if the
+ * interrupt was pending before TD exit, then it _must_ be blocked,
+ * otherwise the interrupt would have been serviced at the instruction
+ * boundary.
+ */
+ if (vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT ||
+ to_tdx(vcpu)->vp_enter_args.r12)
+ return false;
+
+ vcpu_state_details =
+ td_state_non_arch_read64(to_tdx(vcpu), TD_VCPU_STATE_DETAILS_NON_ARCH);
+
+ return tdx_vcpu_state_details_intr_pending(vcpu_state_details);
}
/*
{
switch (tdvmcall_leaf(vcpu)) {
case EXIT_REASON_CPUID:
+ case EXIT_REASON_HLT:
case EXIT_REASON_IO_INSTRUCTION:
return tdvmcall_leaf(vcpu);
case EXIT_REASON_EPT_VIOLATION:
/*
* Stop processing the remaining part if there is a pending interrupt,
* which could be qualified to deliver. Skip checking pending RVI for
- * TDVMCALL_MAP_GPA.
- * TODO: Add a comment to link the reason when the target function is
- * implemented.
+ * TDVMCALL_MAP_GPA, see comments in tdx_protected_apic_has_interrupt().
*/
if (kvm_vcpu_has_events(vcpu)) {
tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_RETRY);
return 1;
case EXIT_REASON_CPUID:
return tdx_emulate_cpuid(vcpu);
+ case EXIT_REASON_HLT:
+ return kvm_emulate_halt_noskip(vcpu);
case EXIT_REASON_TDCALL:
return handle_tdvmcall(vcpu);
case EXIT_REASON_VMCALL:
}
static __always_inline void tdvps_management_check(u64 field, u8 bits) {}
+static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {}
#define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \
static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \
tdh_vp_wr_failed(tdx, #uclass, " &= ~", field, bit, err);\
}
+
+bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu);
+
TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
+TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
#else
static inline int tdx_bringup(void) { return 0; }
struct kvm_vcpu vcpu;
};
+static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; }
+
#endif
#endif
TD_TDCS_EXEC_TSC_MULTIPLIER = 11,
};
+enum tdx_vcpu_guest_other_state {
+ TD_VCPU_STATE_DETAILS_NON_ARCH = 0x100,
+};
+
+#define TDX_VCPU_STATE_DETAILS_INTR_PENDING BIT_ULL(0)
+
+static inline bool tdx_vcpu_state_details_intr_pending(u64 vcpu_state_details)
+{
+ return !!(vcpu_state_details & TDX_VCPU_STATE_DETAILS_INTR_PENDING);
+}
+
/* @field is any of enum tdx_tdcs_execution_control */
#define TDCS_EXEC(field) BUILD_TDX_FIELD(TD_CLASS_EXECUTION_CONTROLS, (field))