KVM: TDX: Handle SMI request as !CONFIG_KVM_SMM
authorIsaku Yamahata <isaku.yamahata@intel.com>
Sat, 22 Feb 2025 01:47:49 +0000 (09:47 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 14 Mar 2025 18:20:56 +0000 (14:20 -0400)
Handle SMI request as what KVM does for CONFIG_KVM_SMM=n, i.e. return
-ENOTTY, and add KVM_BUG_ON() to SMI related OPs for TD.

TDX doesn't support system-management mode (SMM) and system-management
interrupt (SMI) in guest TDs.  Because guest state (vCPU state, memory
state) is protected, it must go through the TDX module APIs to change
guest state.  However, the TDX module doesn't provide a way for VMM to
inject SMI into guest TD or a way for VMM to switch guest vCPU mode into
SMM.

MSR_IA32_SMBASE will not be emulated for TDX guest, -ENOTTY will be
returned when SMI is requested.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Co-developed-by: Binbin Wu <binbin.wu@linux.intel.com>
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Message-ID: <20250222014757.897978-9-binbin.wu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/smm.h
arch/x86/kvm/vmx/main.c

index a1cf2ac..551703f 100644 (file)
@@ -142,6 +142,9 @@ union kvm_smram {
 
 static inline int kvm_inject_smi(struct kvm_vcpu *vcpu)
 {
+       if (!kvm_x86_call(has_emulated_msr)(vcpu->kvm, MSR_IA32_SMBASE))
+               return -ENOTTY;
+
        kvm_make_request(KVM_REQ_SMI, vcpu);
        return 0;
 }
index 4021c00..e19e357 100644 (file)
@@ -191,6 +191,41 @@ static int vt_handle_exit(struct kvm_vcpu *vcpu,
        return vmx_handle_exit(vcpu, fastpath);
 }
 
+#ifdef CONFIG_KVM_SMM
+static int vt_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+{
+       if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+               return 0;
+
+       return vmx_smi_allowed(vcpu, for_injection);
+}
+
+static int vt_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
+{
+       if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+               return 0;
+
+       return vmx_enter_smm(vcpu, smram);
+}
+
+static int vt_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
+{
+       if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+               return 0;
+
+       return vmx_leave_smm(vcpu, smram);
+}
+
+static void vt_enable_smi_window(struct kvm_vcpu *vcpu)
+{
+       if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+               return;
+
+       /* RSM will cause a vmexit anyway.  */
+       vmx_enable_smi_window(vcpu);
+}
+#endif
+
 static void vt_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
 {
        struct pi_desc *pi = vcpu_to_pi_desc(vcpu);
@@ -549,10 +584,10 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
        .setup_mce = vmx_setup_mce,
 
 #ifdef CONFIG_KVM_SMM
-       .smi_allowed = vmx_smi_allowed,
-       .enter_smm = vmx_enter_smm,
-       .leave_smm = vmx_leave_smm,
-       .enable_smi_window = vmx_enable_smi_window,
+       .smi_allowed = vt_smi_allowed,
+       .enter_smm = vt_enter_smm,
+       .leave_smm = vt_leave_smm,
+       .enable_smi_window = vt_enable_smi_window,
 #endif
 
        .check_emulate_instruction = vmx_check_emulate_instruction,