struct kvm_memory_slot *slot,
                                   gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
 
 
        return gen;
 }
 
-static unsigned int kvm_current_mmio_generation(struct kvm *kvm)
+static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
 {
-       return kvm_memslots(kvm)->generation & MMIO_GEN_MASK;
+       return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
 }
 
-static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn,
+static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
                           unsigned access)
 {
-       unsigned int gen = kvm_current_mmio_generation(kvm);
+       unsigned int gen = kvm_current_mmio_generation(vcpu);
        u64 mask = generation_mmio_spte_mask(gen);
 
        access &= ACC_WRITE_MASK | ACC_USER_MASK;
        return (spte & ~mask) & ~PAGE_MASK;
 }
 
-static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
+static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
                          pfn_t pfn, unsigned access)
 {
        if (unlikely(is_noslot_pfn(pfn))) {
-               mark_mmio_spte(kvm, sptep, gfn, access);
+               mark_mmio_spte(vcpu, sptep, gfn, access);
                return true;
        }
 
        return false;
 }
 
-static bool check_mmio_spte(struct kvm *kvm, u64 spte)
+static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
 {
        unsigned int kvm_gen, spte_gen;
 
-       kvm_gen = kvm_current_mmio_generation(kvm);
+       kvm_gen = kvm_current_mmio_generation(vcpu);
        spte_gen = get_mmio_spte_generation(spte);
 
        trace_check_mmio_spte(spte, kvm_gen, spte_gen);
        kvm->arch.indirect_shadow_pages--;
 }
 
-static int has_wrprotected_page(struct kvm *kvm,
+static int has_wrprotected_page(struct kvm_vcpu *vcpu,
                                gfn_t gfn,
                                int level)
 {
        struct kvm_memory_slot *slot;
        struct kvm_lpage_info *linfo;
 
-       slot = gfn_to_memslot(kvm, gfn);
+       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
        if (slot) {
                linfo = lpage_info_slot(gfn, slot, level);
                return linfo->write_count;
 {
        struct kvm_memory_slot *slot;
 
-       slot = gfn_to_memslot(vcpu->kvm, gfn);
+       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
        if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
              (no_dirty_log && slot->dirty_bitmap))
                slot = NULL;
        max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
 
        for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
-               if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
+               if (has_wrprotected_page(vcpu, large_gfn, level))
                        break;
 
        return level - 1;
                kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
 }
 
-static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
+static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
 {
        struct kvm_memory_slot *slot;
        unsigned long *rmapp;
        int i;
        bool write_protected = false;
 
-       slot = gfn_to_memslot(kvm, gfn);
+       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
 
        for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
                rmapp = __gfn_to_rmap(gfn, i, slot);
-               write_protected |= __rmap_write_protect(kvm, rmapp, true);
+               write_protected |= __rmap_write_protect(vcpu->kvm, rmapp, true);
        }
 
        return write_protected;
                bool protected = false;
 
                for_each_sp(pages, sp, parents, i)
-                       protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
+                       protected |= rmap_write_protect(vcpu, sp->gfn);
 
                if (protected)
                        kvm_flush_remote_tlbs(vcpu->kvm);
        hlist_add_head(&sp->hash_link,
                &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
        if (!direct) {
-               if (rmap_write_protect(vcpu->kvm, gfn))
+               if (rmap_write_protect(vcpu, gfn))
                        kvm_flush_remote_tlbs(vcpu->kvm);
                if (level > PT_PAGE_TABLE_LEVEL && need_sync)
                        kvm_sync_pages(vcpu, gfn);
        u64 spte;
        int ret = 0;
 
-       if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access))
+       if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
                return 0;
 
        spte = PT_PRESENT_MASK;
                 * be fixed if guest refault.
                 */
                if (level > PT_PAGE_TABLE_LEVEL &&
-                   has_wrprotected_page(vcpu->kvm, gfn, level))
+                   has_wrprotected_page(vcpu, gfn, level))
                        goto done;
 
                spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
        }
 
        if (pte_access & ACC_WRITE_MASK) {
-               mark_page_dirty(vcpu->kvm, gfn);
+               kvm_vcpu_mark_page_dirty(vcpu, gfn);
                spte |= shadow_dirty_mask;
        }
 
                return 1;
 
        if (pfn == KVM_PFN_ERR_HWPOISON) {
-               kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
+               kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
                return 0;
        }
 
        if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
            level == PT_PAGE_TABLE_LEVEL &&
            PageTransCompound(pfn_to_page(pfn)) &&
-           !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
+           !has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
                unsigned long mask;
                /*
                 * mmu_notifier_retry was successful and we hold the
         * Compare with set_spte where instead shadow_dirty_mask is set.
         */
        if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
-               mark_page_dirty(vcpu->kvm, gfn);
+               kvm_vcpu_mark_page_dirty(vcpu, gfn);
 
        return true;
 }
                gfn_t gfn = get_mmio_spte_gfn(spte);
                unsigned access = get_mmio_spte_access(spte);
 
-               if (!check_mmio_spte(vcpu->kvm, spte))
+               if (!check_mmio_spte(vcpu, spte))
                        return RET_MMIO_PF_INVALID;
 
                if (direct)
        arch.direct_map = vcpu->arch.mmu.direct_map;
        arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
 
-       return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch);
+       return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
 }
 
 static bool can_do_async_pf(struct kvm_vcpu *vcpu)
        struct kvm_memory_slot *slot;
        bool async;
 
-       slot = gfn_to_memslot(vcpu->kvm, gfn);
+       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
        async = false;
        *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
        if (!async)
        vcpu->arch.mmu.inject_page_fault(vcpu, fault);
 }
 
-static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
+static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
                           unsigned access, int *nr_present)
 {
        if (unlikely(is_mmio_spte(*sptep))) {
                }
 
                (*nr_present)++;
-               mark_mmio_spte(kvm, sptep, gfn, access);
+               mark_mmio_spte(vcpu, sptep, gfn, access);
                return true;
        }
 
                /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
                *gpa &= ~(gpa_t)7;
                *bytes = 8;
-               r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8);
+               r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
                if (r)
                        gentry = 0;
                new = (const u8 *)&gentry;
        return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
 }
 
-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
 {
        /*
         * The very rare case: if the generation-number is round,
         * zap all shadow pages.
         */
-       if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
+       if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
                printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
                kvm_mmu_invalidate_zap_all_pages(kvm);
        }
 
                return;
 
        gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
-       pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
+       pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);
 
        if (is_error_pfn(pfn))
                return;
 
                if (ret)
                        return ret;
 
-               mark_page_dirty(vcpu->kvm, table_gfn);
+               kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
                walker->ptes[level] = pte;
        }
        return 0;
 
                real_gfn = gpa_to_gfn(real_gfn);
 
-               host_addr = gfn_to_hva_prot(vcpu->kvm, real_gfn,
+               host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
                                            &walker->pte_writable[walker->level - 1]);
                if (unlikely(kvm_is_error_hva(host_addr)))
                        goto error;
                base_gpa = pte_gpa & ~mask;
                index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
 
-               r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
+               r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
                                gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
                curr_pte = gw->prefetch_ptes[index];
        } else
-               r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
+               r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
                                  &curr_pte, sizeof(curr_pte));
 
        return r || curr_pte != gw->ptes[level - 1];
                        if (!rmap_can_add(vcpu))
                                break;
 
-                       if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
-                                                 sizeof(pt_element_t)))
+                       if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
+                                                      sizeof(pt_element_t)))
                                break;
 
                        FNAME(update_pte)(vcpu, sp, sptep, &gpte);
 
                pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 
-               if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
-                                         sizeof(pt_element_t)))
+               if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
+                                              sizeof(pt_element_t)))
                        return -EINVAL;
 
                if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
                pte_access &= FNAME(gpte_access)(vcpu, gpte);
                FNAME(protect_clean_gpte)(&pte_access, gpte);
 
-               if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
+               if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
                      &nr_present))
                        continue;
 
 
        u64 pdpte;
        int ret;
 
-       ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
-                                 offset_in_page(cr3) + index * 8, 8);
+       ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
+                                      offset_in_page(cr3) + index * 8, 8);
        if (ret)
                return 0;
        return pdpte;
 
        might_sleep();
 
-       page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
+       page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
        if (is_error_page(page))
                goto error;
 
        mask = (0xf >> (4 - size)) << start_bit;
        val = 0;
 
-       if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len))
+       if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
                return NESTED_EXIT_DONE;
 
        return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
        /* Offset is in 32 bit units but need in 8 bit units */
        offset *= 4;
 
-       if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
+       if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
                return NESTED_EXIT_DONE;
 
        return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
                p      = msrpm_offsets[i];
                offset = svm->nested.vmcb_msrpm + (p * 4);
 
-               if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
+               if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
                        return false;
 
                svm->nested.msrpm[p] = svm->msrpm[p] | value;
 
 
 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
 {
-       struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
+       struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT);
        if (is_error_page(page))
                return NULL;
 
                bitmap += (port & 0x7fff) / 8;
 
                if (last_bitmap != bitmap)
-                       if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
+                       if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
                                return true;
                if (b & (1 << (port & 7)))
                        return true;
        /* Then read the msr_index'th bit from this bitmap: */
        if (msr_index < 1024*8) {
                unsigned char b;
-               if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
+               if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
                        return true;
                return 1 & (b >> (msr_index & 7));
        } else
        vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
 }
 
-static void vmx_flush_pml_buffer(struct vcpu_vmx *vmx)
+static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = vmx->vcpu.kvm;
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
        u64 *pml_buf;
        u16 pml_idx;
 
 
                gpa = pml_buf[pml_idx];
                WARN_ON(gpa & (PAGE_SIZE - 1));
-               mark_page_dirty(kvm, gpa >> PAGE_SHIFT);
+               kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
        }
 
        /* reset PML index */
         * flushed already.
         */
        if (enable_pml)
-               vmx_flush_pml_buffer(vmx);
+               vmx_flush_pml_buffer(vcpu);
 
        /* If guest state is invalid, start emulating */
        if (vmx->emulation_required)
 
        msr.host_initiated = false;
        for (i = 0; i < count; i++) {
-               if (kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e),
-                                  &e, sizeof(e))) {
+               if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
+                                       &e, sizeof(e))) {
                        pr_warn_ratelimited(
                                "%s cannot read MSR entry (%u, 0x%08llx)\n",
                                __func__, i, gpa + i * sizeof(e));
 
        for (i = 0; i < count; i++) {
                struct msr_data msr_info;
-               if (kvm_read_guest(vcpu->kvm,
-                                  gpa + i * sizeof(e),
-                                  &e, 2 * sizeof(u32))) {
+               if (kvm_vcpu_read_guest(vcpu,
+                                       gpa + i * sizeof(e),
+                                       &e, 2 * sizeof(u32))) {
                        pr_warn_ratelimited(
                                "%s cannot read MSR entry (%u, 0x%08llx)\n",
                                __func__, i, gpa + i * sizeof(e));
                                __func__, i, e.index);
                        return -EINVAL;
                }
-               if (kvm_write_guest(vcpu->kvm,
-                                   gpa + i * sizeof(e) +
-                                       offsetof(struct vmx_msr_entry, value),
-                                   &msr_info.data, sizeof(msr_info.data))) {
+               if (kvm_vcpu_write_guest(vcpu,
+                                        gpa + i * sizeof(e) +
+                                            offsetof(struct vmx_msr_entry, value),
+                                        &msr_info.data, sizeof(msr_info.data))) {
                        pr_warn_ratelimited(
                                "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
                                __func__, i, e.index, msr_info.data);
 
 
 /*
  * This function will be used to read from the physical memory of the currently
- * running guest. The difference to kvm_read_guest_page is that this function
+ * running guest. The difference to kvm_vcpu_read_guest_page is that this function
  * can read from guest physical or from the guest's guest physical memory.
  */
 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 
        real_gfn = gpa_to_gfn(real_gfn);
 
-       return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
+       return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
 }
 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
 
                r = PTR_ERR(page);
                goto out;
        }
-       if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
+       if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
                goto out_free;
        r = 0;
 out_free:
                        break;
                }
                gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
-               addr = gfn_to_hva(vcpu->kvm, gfn);
+               addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
                if (kvm_is_error_hva(addr))
                        return 1;
                if (__clear_user((void __user *)addr, PAGE_SIZE))
                        return 1;
                vcpu->arch.hv_vapic = data;
-               mark_page_dirty(vcpu->kvm, gfn);
+               kvm_vcpu_mark_page_dirty(vcpu, gfn);
                if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
                        return 1;
                break;
 
                if (gpa == UNMAPPED_GVA)
                        return X86EMUL_PROPAGATE_FAULT;
-               ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data,
-                                         offset, toread);
+               ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
+                                              offset, toread);
                if (ret < 0) {
                        r = X86EMUL_IO_NEEDED;
                        goto out;
        offset = addr & (PAGE_SIZE-1);
        if (WARN_ON(offset + bytes > PAGE_SIZE))
                bytes = (unsigned)PAGE_SIZE - offset;
-       ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val,
-                                 offset, bytes);
+       ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
+                                      offset, bytes);
        if (unlikely(ret < 0))
                return X86EMUL_IO_NEEDED;
 
 
                if (gpa == UNMAPPED_GVA)
                        return X86EMUL_PROPAGATE_FAULT;
-               ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
+               ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
                if (ret < 0) {
                        r = X86EMUL_IO_NEEDED;
                        goto out;
 {
        int ret;
 
-       ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
+       ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
        if (ret < 0)
                return 0;
        kvm_mmu_pte_write(vcpu, gpa, val, bytes);
 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
                        void *val, int bytes)
 {
-       return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
+       return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
 }
 
 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
        if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
                goto emul_write;
 
-       page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+       page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
        if (is_error_page(page))
                goto emul_write;
 
        if (!exchanged)
                return X86EMUL_CMPXCHG_FAILED;
 
-       mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
+       kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
        kvm_mmu_pte_write(vcpu, gpa, new, bytes);
 
        return X86EMUL_CONTINUE;
        else
                process_smi_save_state_32(vcpu, buf);
 
-       kvm_write_guest(vcpu->kvm, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
+       kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
 
        if (kvm_x86_ops->get_nmi_mask(vcpu))
                vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
         * memslots->generation has been incremented.
         * mmio generation may have reached its maximum value.
         */
-       kvm_mmu_invalidate_mmio_sptes(kvm);
+       kvm_mmu_invalidate_mmio_sptes(kvm, slots);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,