{
 }
 
+void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
+               ulong pc = kvmppc_get_pc(vcpu);
+               if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
+                       kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
+               vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
+       }
+}
+EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
+
 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
 {
        if (!is_kvmppc_hv_enabled(vcpu->kvm))
 
 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
 {
+       kvmppc_unfixup_split_real(vcpu);
        kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
        kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
        kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
                pte->may_write = true;
                pte->may_execute = true;
                r = 0;
+
+               if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
+                   !data) {
+                       if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
+                           ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
+                       pte->raddr &= ~SPLIT_HACK_MASK;
+               }
        }
 
        return r;
 
 #define HW_PAGE_SIZE PAGE_SIZE
 #endif
 
+static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
+{
+       ulong msr = kvmppc_get_msr(vcpu);
+       return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
+}
+
+static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
+{
+       ulong msr = kvmppc_get_msr(vcpu);
+       ulong pc = kvmppc_get_pc(vcpu);
+
+       /* We are in DR only split real mode */
+       if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
+               return;
+
+       /* We have not fixed up the guest already */
+       if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
+               return;
+
+       /* The code is in fixupable address space */
+       if (pc & SPLIT_HACK_MASK)
+               return;
+
+       vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
+       kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
+}
+
+void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
+
 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
 #ifdef CONFIG_PPC_BOOK3S_32
        current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
 #endif
+
+       if (kvmppc_is_split_real(vcpu))
+               kvmppc_fixup_split_real(vcpu);
 }
 
 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
        svcpu_put(svcpu);
 #endif
 
+       if (kvmppc_is_split_real(vcpu))
+               kvmppc_unfixup_split_real(vcpu);
+
        kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
        kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
 
                }
        }
 
+       if (kvmppc_is_split_real(vcpu))
+               kvmppc_fixup_split_real(vcpu);
+       else
+               kvmppc_unfixup_split_real(vcpu);
+
        if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
                   (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
                kvmppc_mmu_flush_segments(vcpu);
                pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
                break;
        case MSR_DR:
+               if (!data &&
+                   (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
+                   ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
+                       pte.raddr &= ~SPLIT_HACK_MASK;
+               /* fall through */
        case MSR_IR:
                vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
 
                ulong shadow_srr1 = vcpu->arch.shadow_srr1;
                vcpu->stat.pf_instruc++;
 
+               if (kvmppc_is_split_real(vcpu))
+                       kvmppc_fixup_split_real(vcpu);
+
 #ifdef CONFIG_PPC_BOOK3S_32
                /* We set segments as unused segments when invalidating them. So
                 * treat the respective fault as segment fault. */