Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx.c
index 37eae55..4a4eec3 100644 (file)
@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
 {
        u64 host_tsc, tsc_offset;
 
-       rdtscll(host_tsc);
+       host_tsc = rdtsc();
        tsc_offset = vmcs_read64(TSC_OFFSET);
        return host_tsc + tsc_offset;
 }
@@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
 
 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
 {
-       return target_tsc - native_read_tsc();
+       return target_tsc - rdtsc();
 }
 
 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
@@ -3423,12 +3423,12 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
        vmx_segment_cache_clear(to_vmx(vcpu));
 
        guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
-       if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
+       if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
                pr_debug_ratelimited("%s: tss fixup for long mode. \n",
                                     __func__);
                vmcs_write32(GUEST_TR_AR_BYTES,
-                            (guest_tr_ar & ~AR_TYPE_MASK)
-                            | AR_TYPE_BUSY_64_TSS);
+                            (guest_tr_ar & ~VMX_AR_TYPE_MASK)
+                            | VMX_AR_TYPE_BUSY_64_TSS);
        }
        vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
 }
@@ -3719,7 +3719,7 @@ static int vmx_get_cpl(struct kvm_vcpu *vcpu)
                return 0;
        else {
                int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
-               return AR_DPL(ar);
+               return VMX_AR_DPL(ar);
        }
 }
 
@@ -3847,11 +3847,11 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu)
 
        if (cs.unusable)
                return false;
-       if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
+       if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
                return false;
        if (!cs.s)
                return false;
-       if (cs.type & AR_TYPE_WRITEABLE_MASK) {
+       if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
                if (cs.dpl > cs_rpl)
                        return false;
        } else {
@@ -3901,7 +3901,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
                return false;
        if (!var.present)
                return false;
-       if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
+       if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
                if (var.dpl < rpl) /* DPL < RPL */
                        return false;
        }