Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 May 2021 17:14:08 +0000 (10:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 May 2021 17:14:08 +0000 (10:14 -0700)
Pull kvm updates from Paolo Bonzini:
 "This is a large update by KVM standards, including AMD PSP (Platform
  Security Processor, aka "AMD Secure Technology") and ARM CoreSight
  (debug and trace) changes.

  ARM:

   - CoreSight: Add support for ETE and TRBE

   - Stage-2 isolation for the host kernel when running in protected
     mode

   - Guest SVE support when running in nVHE mode

   - Force W^X hypervisor mappings in nVHE mode

   - ITS save/restore for guests using direct injection with GICv4.1

   - nVHE panics now produce readable backtraces

   - Guest support for PTP using the ptp_kvm driver

   - Performance improvements in the S2 fault handler

  x86:

   - AMD PSP driver changes

   - Optimizations and cleanup of nested SVM code

   - AMD: Support for virtual SPEC_CTRL

   - Optimizations of the new MMU code: fast invalidation, zap under
     read lock, enable/disably dirty page logging under read lock

   - /dev/kvm API for AMD SEV live migration (guest API coming soon)

   - support SEV virtual machines sharing the same encryption context

   - support SGX in virtual machines

   - add a few more statistics

   - improved directed yield heuristics

   - Lots and lots of cleanups

  Generic:

   - Rework of MMU notifier interface, simplifying and optimizing the
     architecture-specific code

   - a handful of "Get rid of oprofile leftovers" patches

   - Some selftests improvements"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (379 commits)
  KVM: selftests: Speed up set_memory_region_test
  selftests: kvm: Fix the check of return value
  KVM: x86: Take advantage of kvm_arch_dy_has_pending_interrupt()
  KVM: SVM: Skip SEV cache flush if no ASIDs have been used
  KVM: SVM: Remove an unnecessary prototype declaration of sev_flush_asids()
  KVM: SVM: Drop redundant svm_sev_enabled() helper
  KVM: SVM: Move SEV VMCB tracking allocation to sev.c
  KVM: SVM: Explicitly check max SEV ASID during sev_hardware_setup()
  KVM: SVM: Unconditionally invoke sev_hardware_teardown()
  KVM: SVM: Enable SEV/SEV-ES functionality by default (when supported)
  KVM: SVM: Condition sev_enabled and sev_es_enabled on CONFIG_KVM_AMD_SEV=y
  KVM: SVM: Append "_enabled" to module-scoped SEV/SEV-ES control variables
  KVM: SEV: Mask CPUID[0x8000001F].eax according to supported features
  KVM: SVM: Move SEV module params/variables to sev.c
  KVM: SVM: Disable SEV/SEV-ES if NPT is disabled
  KVM: SVM: Free sev_asid_bitmap during init if SEV setup fails
  KVM: SVM: Zero out the VMCB array used to track SEV ASID association
  x86/sev: Drop redundant and potentially misleading 'sev_enabled'
  KVM: x86: Move reverse CPUID helpers to separate header file
  KVM: x86: Rename GPR accessors to make mode-aware variants the defaults
  ...

50 files changed:
1  2 
Documentation/virt/kvm/api.rst
MAINTAINERS
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/kvm/vgic/vgic-mmio-v3.c
arch/arm64/mm/init.c
arch/mips/include/asm/kvm_host.h
arch/mips/kvm/mips.c
arch/mips/kvm/mmu.c
arch/mips/kvm/vz.c
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/book3s_hv.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/kvm.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/mem_encrypt.c
arch/x86/mm/mem_encrypt_identity.c
drivers/clocksource/arm_arch_timer.c
drivers/crypto/ccp/sev-dev.c
drivers/firmware/psci/psci.c
drivers/hwtracing/coresight/coresight-core.c
drivers/hwtracing/coresight/coresight-etm-perf.c
drivers/hwtracing/coresight/coresight-etm4x-core.c
drivers/irqchip/irq-gic-v3-its.c
include/linux/clocksource.h
include/linux/perf_event.h
include/uapi/linux/perf_event.h
kernel/events/core.c
kernel/time/clocksource.c
kernel/time/timekeeping.c
tools/testing/selftests/kvm/Makefile

Simple merge
diff --cc MAINTAINERS
Simple merge
Simple merge
  #define dsb(opt)      asm volatile("dsb " #opt : : : "memory")
  
  #define psb_csync()   asm volatile("hint #17" : : : "memory")
+ #define tsb_csync()   asm volatile("hint #18" : : : "memory")
  #define csdb()                asm volatile("hint #20" : : : "memory")
  
 -#define spec_bar()    asm volatile(ALTERNATIVE("dsb nsh\nisb\n",              \
 -                                               SB_BARRIER_INSN"nop\n",        \
 -                                               ARM64_HAS_SB))
 -
  #ifdef CONFIG_ARM64_PSEUDO_NMI
  #define pmr_sync()                                            \
        do {                                                    \
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -822,15 -942,27 +815,10 @@@ bool kvm_mips_flush_gpa_pt(struct kvm *
  int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
  pgd_t *kvm_pgd_alloc(void);
  void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
 -void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
 -                                bool user);
 -void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
 -void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
 -
 -enum kvm_mips_fault_result {
 -      KVM_MIPS_MAPPED = 0,
 -      KVM_MIPS_GVA,
 -      KVM_MIPS_GPA,
 -      KVM_MIPS_TLB,
 -      KVM_MIPS_TLBINV,
 -      KVM_MIPS_TLBMOD,
 -};
 -enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
 -                                                 unsigned long gva,
 -                                                 bool write);
  
  #define KVM_ARCH_WANT_MMU_NOTIFIER
- int kvm_unmap_hva_range(struct kvm *kvm,
-                       unsigned long start, unsigned long end, unsigned flags);
- int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
- int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
- int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
  
  /* Emulation */
 -int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
  int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -574,6 -574,49 +574,54 @@@ static void kvm_smp_send_call_func_ipi(
        }
  }
  
 -static void kvm_flush_tlb_others(const struct cpumask *cpumask,
++static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
+                       const struct flush_tlb_info *info)
+ {
+       u8 state;
+       int cpu;
+       struct kvm_steal_time *src;
+       struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
+       cpumask_copy(flushmask, cpumask);
+       /*
+        * We have to call flush only on online vCPUs. And
+        * queue flush_on_enter for pre-empted vCPUs
+        */
+       for_each_cpu(cpu, flushmask) {
++              /*
++               * The local vCPU is never preempted, so we do not explicitly
++               * skip check for local vCPU - it will never be cleared from
++               * flushmask.
++               */
+               src = &per_cpu(steal_time, cpu);
+               state = READ_ONCE(src->preempted);
+               if ((state & KVM_VCPU_PREEMPTED)) {
+                       if (try_cmpxchg(&src->preempted, &state,
+                                       state | KVM_VCPU_FLUSH_TLB))
+                               __cpumask_clear_cpu(cpu, flushmask);
+               }
+       }
 -      native_flush_tlb_others(flushmask, info);
++      native_flush_tlb_multi(flushmask, info);
+ }
+ static __init int kvm_alloc_cpumask(void)
+ {
+       int cpu;
+       if (!kvm_para_available() || nopv)
+               return 0;
+       if (pv_tlb_flush_supported() || pv_ipi_supported())
+               for_each_possible_cpu(cpu) {
+                       zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
+                               GFP_KERNEL, cpu_to_node(cpu));
+               }
+       return 0;
+ }
+ arch_initcall(kvm_alloc_cpumask);
  static void __init kvm_smp_prepare_boot_cpu(void)
  {
        /*
@@@ -655,15 -668,9 +673,9 @@@ static void __init kvm_guest_init(void
  
        if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
                has_steal_clock = 1;
 -              pv_ops.time.steal_clock = kvm_steal_clock;
 +              static_call_update(pv_steal_clock, kvm_steal_clock);
        }
  
-       if (pv_tlb_flush_supported()) {
-               pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
-               pv_ops.mmu.tlb_remove_table = tlb_remove_table;
-               pr_info("KVM setup pv remote TLB flush\n");
-       }
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
                apic_set_eoi_write(kvm_guest_apic_eoi_write);
  
        }
  
  #ifdef CONFIG_SMP
 -              pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
+       if (pv_tlb_flush_supported()) {
++              pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
+               pv_ops.mmu.tlb_remove_table = tlb_remove_table;
+               pr_info("KVM setup pv remote TLB flush\n");
+       }
        smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
        if (pv_sched_yield_supported()) {
                smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -546,9 -535,16 +546,8 @@@ void __init sme_enable(struct boot_para
                if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
                        return;
        } else {
 -              /* For SEV, check the SEV MSR */
 -              msr = __rdmsr(MSR_AMD64_SEV);
 -              if (!(msr & MSR_AMD64_SEV_ENABLED))
 -                      return;
 -
 -              /* Save SEV_STATUS to avoid reading MSR again */
 -              sev_status = msr;
 -
                /* SEV state cannot be controlled by a command line option */
                sme_me_mask = me_mask;
-               sev_enabled = true;
                physical_mask &= ~sme_me_mask;
                return;
        }
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge