KVM: PPC: Book3S HV P9: Fixes for TM softpatch interrupt NIP
authorNicholas Piggin <npiggin@gmail.com>
Wed, 11 Aug 2021 16:00:37 +0000 (02:00 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 25 Aug 2021 06:37:17 +0000 (16:37 +1000)
The softpatch interrupt sets HSRR0 to the faulting instruction +4, so
it should subtract 4 for the faulting instruction address in the case
it is a TM softpatch interrupt (the instruction was not executed) and
it was not emulated.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210811160134.904987-4-npiggin@gmail.com
arch/powerpc/kvm/book3s_hv_tm.c

index cc90b8b823291594c9d1c6fdd9a921f8b9bea0fa..e7c36f8bf2059b95d7454565aa355db2f6a664b2 100644 (file)
@@ -46,6 +46,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
        u64 newmsr, bescr;
        int ra, rs;
 
+       /*
+        * The TM softpatch interrupt sets NIP to the instruction following
+        * the faulting instruction, which is not executed. Rewind nip to the
+        * faulting instruction so it looks like a normal synchronous
+        * interrupt, then update nip in the places where the instruction is
+        * emulated.
+        */
+       vcpu->arch.regs.nip -= 4;
+
        /*
         * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
         * in these instructions, so masking bit 31 out doesn't change these
@@ -67,7 +76,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                               (newmsr & MSR_TM)));
                newmsr = sanitize_msr(newmsr);
                vcpu->arch.shregs.msr = newmsr;
-               vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+               vcpu->arch.cfar = vcpu->arch.regs.nip;
                vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
                return RESUME_GUEST;
 
@@ -100,7 +109,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                vcpu->arch.bescr = bescr;
                msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
                vcpu->arch.shregs.msr = msr;
-               vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+               vcpu->arch.cfar = vcpu->arch.regs.nip;
                vcpu->arch.regs.nip = vcpu->arch.ebbrr;
                return RESUME_GUEST;
 
@@ -116,6 +125,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
                newmsr = sanitize_msr(newmsr);
                vcpu->arch.shregs.msr = newmsr;
+               vcpu->arch.regs.nip += 4;
                return RESUME_GUEST;
 
        /* ignore bit 31, see comment above */
@@ -152,6 +162,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                                msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
                }
                vcpu->arch.shregs.msr = msr;
+               vcpu->arch.regs.nip += 4;
                return RESUME_GUEST;
 
        /* ignore bit 31, see comment above */
@@ -189,6 +200,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
                        (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
                vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
+               vcpu->arch.regs.nip += 4;
                return RESUME_GUEST;
 
        /* ignore bit 31, see comment above */
@@ -220,6 +232,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
                        (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
                vcpu->arch.shregs.msr = msr | MSR_TS_S;
+               vcpu->arch.regs.nip += 4;
                return RESUME_GUEST;
        }