Merge branch 'kvm-hv-xmm-hypercall-fixes' into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Feb 2022 11:28:10 +0000 (06:28 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Feb 2022 13:20:08 +0000 (08:20 -0500)
The fixes for 5.17 conflict with cleanups made in the same area
earlier in the 5.18 development cycle.

1  2 
arch/x86/kvm/hyperv.c

@@@ -1769,41 -1749,23 +1769,60 @@@ struct kvm_hv_hcall 
        sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
  };
  
 -static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
 +static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
++                               int consumed_xmm_halves,
 +                               u64 *sparse_banks, gpa_t offset)
  {
 -      gpa_t gpa;
 +      u16 var_cnt;
+       int i;
- static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
 +
 +      if (hc->var_cnt > 64)
 +              return -EINVAL;
 +
 +      /* Ignore banks that cannot possibly contain a legal VP index. */
 +      var_cnt = min_t(u16, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS);
 +
++      if (hc->fast) {
++              /*
++               * Each XMM holds two sparse banks, but do not count halves that
++               * have already been consumed for hypercall parameters.
++               */
++              if (hc->var_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - consumed_xmm_halves)
++                      return HV_STATUS_INVALID_HYPERCALL_INPUT;
++              for (i = 0; i < var_cnt; i++) {
++                      int j = i + consumed_xmm_halves;
++                      if (j % 2)
++                              sparse_banks[i] = sse128_hi(hc->xmm[j / 2]);
++                      else
++                              sparse_banks[i] = sse128_lo(hc->xmm[j / 2]);
++              }
++              return 0;
++      }
++
 +      return kvm_read_guest(kvm, hc->ingpa + offset, sparse_banks,
 +                            var_cnt * sizeof(*sparse_banks));
 +}
 +
-       int i;
++static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
 +{
        struct kvm *kvm = vcpu->kvm;
        struct hv_tlb_flush_ex flush_ex;
        struct hv_tlb_flush flush;
 -      u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
 -      DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
 -      unsigned long *vcpu_mask;
 +      DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
        u64 valid_bank_mask;
 -      u64 sparse_banks[64];
 -      int sparse_banks_len;
 +      u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
        bool all_cpus;
  
-       if (!ex) {
 +      /*
 +       * The Hyper-V TLFS doesn't allow more than 64 sparse banks, e.g. the
 +       * valid mask is a u64.  Fail the build if KVM's max allowed number of
 +       * vCPUs (>4096) would exceed this limit, KVM will additional changes
 +       * for Hyper-V support to avoid setting the guest up to fail.
 +       */
 +      BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > 64);
 +
+       if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
+           hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
                if (hc->fast) {
                        flush.address_space = hc->ingpa;
                        flush.flags = hc->outgpa;
                all_cpus = flush_ex.hv_vp_set.format !=
                        HV_GENERIC_SET_SPARSE_4K;
  
 -              sparse_banks_len = bitmap_weight((unsigned long *)&valid_bank_mask, 64);
 +              if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64))
 +                      return HV_STATUS_INVALID_HYPERCALL_INPUT;
  
 -              if (!sparse_banks_len && !all_cpus)
 +              if (all_cpus)
 +                      goto do_flush;
 +
 +              if (!hc->var_cnt)
                        goto ret_success;
  
-               if (hc->fast) {
-                       if (hc->var_cnt > HV_HYPERCALL_MAX_XMM_REGISTERS - 1)
-                               return HV_STATUS_INVALID_HYPERCALL_INPUT;
-                       for (i = 0; i < hc->var_cnt; i += 2) {
-                               sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
-                               sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]);
 -              if (!all_cpus) {
 -                      if (hc->fast) {
 -                              /* XMM0 is already consumed, each XMM holds two sparse banks. */
 -                              if (sparse_banks_len > 2 * (HV_HYPERCALL_MAX_XMM_REGISTERS - 1))
 -                                      return HV_STATUS_INVALID_HYPERCALL_INPUT;
 -                              for (i = 0; i < sparse_banks_len; i += 2) {
 -                                      sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
 -                                      sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]);
 -                              }
 -                      } else {
 -                              gpa = hc->ingpa + offsetof(struct hv_tlb_flush_ex,
 -                                                         hv_vp_set.bank_contents);
 -                              if (unlikely(kvm_read_guest(kvm, gpa, sparse_banks,
 -                                                          sparse_banks_len *
 -                                                          sizeof(sparse_banks[0]))))
 -                                      return HV_STATUS_INVALID_HYPERCALL_INPUT;
--                      }
-                       goto do_flush;
--              }
-               if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks,
++              if (kvm_get_sparse_vp_set(kvm, hc, 2, sparse_banks,
 +                                        offsetof(struct hv_tlb_flush_ex,
 +                                                 hv_vp_set.bank_contents)))
 +                      return HV_STATUS_INVALID_HYPERCALL_INPUT;
        }
  
 +do_flush:
        /*
         * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
         * analyze it here, flush TLB regardless of the specified address space.
@@@ -1918,13 -1881,17 +1927,13 @@@ static u64 kvm_hv_send_ipi(struct kvm_v
        struct kvm *kvm = vcpu->kvm;
        struct hv_send_ipi_ex send_ipi_ex;
        struct hv_send_ipi send_ipi;
 -      u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
 -      DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
 -      unsigned long *vcpu_mask;
 +      DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
        unsigned long valid_bank_mask;
 -      u64 sparse_banks[64];
 -      int sparse_banks_len;
 +      u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
        u32 vector;
        bool all_cpus;
 -      int i;
  
-       if (!ex) {
+       if (hc->code == HVCALL_SEND_IPI) {
                if (!hc->fast) {
                        if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
                                                    sizeof(send_ipi))))
                if (all_cpus)
                        goto check_and_send_ipi;
  
 -              if (!sparse_banks_len)
 +              if (!hc->var_cnt)
                        goto ret_success;
  
-               if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks,
 -              if (!hc->fast) {
 -                      if (kvm_read_guest(kvm,
 -                                         hc->ingpa + offsetof(struct hv_send_ipi_ex,
 -                                                              vp_set.bank_contents),
 -                                         sparse_banks,
 -                                         sparse_banks_len * sizeof(sparse_banks[0])))
 -                              return HV_STATUS_INVALID_HYPERCALL_INPUT;
 -              } else {
 -                      /*
 -                       * The lower half of XMM0 is already consumed, each XMM holds
 -                       * two sparse banks.
 -                       */
 -                      if (sparse_banks_len > (2 * HV_HYPERCALL_MAX_XMM_REGISTERS - 1))
 -                              return HV_STATUS_INVALID_HYPERCALL_INPUT;
 -                      for (i = 0; i < sparse_banks_len; i++) {
 -                              if (i % 2)
 -                                      sparse_banks[i] = sse128_lo(hc->xmm[(i + 1) / 2]);
 -                              else
 -                                      sparse_banks[i] = sse128_hi(hc->xmm[i / 2]);
 -                      }
 -              }
++              if (kvm_get_sparse_vp_set(kvm, hc, 1, sparse_banks,
 +                                        offsetof(struct hv_send_ipi_ex,
 +                                                 vp_set.bank_contents)))
 +                      return HV_STATUS_INVALID_HYPERCALL_INPUT;
        }
  
  check_and_send_ipi:
@@@ -2283,42 -2270,24 +2299,39 @@@ int kvm_hv_hypercall(struct kvm_vcpu *v
                                kvm_hv_hypercall_complete_userspace;
                return 0;
        case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
-               if (unlikely(!hc.rep_cnt || hc.rep_idx || hc.var_cnt)) {
++              if (unlikely(hc.var_cnt)) {
 +                      ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
 +                      break;
 +              }
-               ret = kvm_hv_flush_tlb(vcpu, &hc, false);
-               break;
-       case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
-               if (unlikely(hc.rep || hc.var_cnt)) {
++              fallthrough;
+       case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+               if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
                        ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
                        break;
                }
-               ret = kvm_hv_flush_tlb(vcpu, &hc, false);
+               ret = kvm_hv_flush_tlb(vcpu, &hc);
                break;
-       case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
-               if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
+       case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
++              if (unlikely(hc.var_cnt)) {
 +                      ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
 +                      break;
 +              }
-               ret = kvm_hv_flush_tlb(vcpu, &hc, true);
-               break;
++              fallthrough;
        case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
                if (unlikely(hc.rep)) {
                        ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
                        break;
                }
-               ret = kvm_hv_flush_tlb(vcpu, &hc, true);
+               ret = kvm_hv_flush_tlb(vcpu, &hc);
                break;
        case HVCALL_SEND_IPI:
-               if (unlikely(hc.rep || hc.var_cnt)) {
++              if (unlikely(hc.var_cnt)) {
 +                      ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
 +                      break;
 +              }
-               ret = kvm_hv_send_ipi(vcpu, &hc, false);
-               break;
++              fallthrough;
        case HVCALL_SEND_IPI_EX:
-               if (unlikely(hc.fast || hc.rep)) {
+               if (unlikely(hc.rep)) {
                        ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
                        break;
                }