KVM: PPC: Book3S HV Nested: Fix TM softpatch HFAC interrupt emulation
authorNicholas Piggin <npiggin@gmail.com>
Wed, 11 Aug 2021 16:00:38 +0000 (02:00 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 25 Aug 2021 06:37:17 +0000 (16:37 +1000)
Have the TM softpatch emulation code set up the HFAC interrupt and
return -1 in case an instruction was executed with HFSCR bits clear,
and have the interrupt exit handler fall through to the HFAC handler.
When the L0 is running a nested guest, this ensures the HFAC interrupt
is correctly passed up to the L1.

The "direct guest" exit handler will turn these into PROGILL program
interrupts so functionality in practice will be unchanged. But it's
possible an L1 would want to handle these in a different way.

Also rearrange the FAC interrupt emulation code to match the HFAC format
while here (mainly, adding the FSCR_INTR_CAUSE mask).

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210811160134.904987-5-npiggin@gmail.com
arch/powerpc/include/asm/reg.h
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_tm.c

index be85cf156a1f5085b41d942c82807b61ac7a668a..e9d27265253b492a5efa5878aa0ea8ddcce79a4b 100644 (file)
 #define   FSCR_TAR     __MASK(FSCR_TAR_LG)
 #define   FSCR_EBB     __MASK(FSCR_EBB_LG)
 #define   FSCR_DSCR    __MASK(FSCR_DSCR_LG)
+#define   FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56)      /* interrupt cause */
 #define SPRN_HFSCR     0xbe    /* HV=1 Facility Status & Control Register */
 #define   HFSCR_PREFIX __MASK(FSCR_PREFIX_LG)
 #define   HFSCR_MSGP   __MASK(FSCR_MSGP_LG)
 #define   HFSCR_DSCR   __MASK(FSCR_DSCR_LG)
 #define   HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
 #define   HFSCR_FP     __MASK(FSCR_FP_LG)
-#define   HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56)     /* interrupt cause */
+#define   HFSCR_INTR_CAUSE FSCR_INTR_CAUSE
 #define SPRN_TAR       0x32f   /* Target Address Register */
 #define SPRN_LPCR      0x13E   /* LPAR Control Register */
 #define   LPCR_VPM0            ASM_CONST(0x8000000000000000)
index c402eb0276b0a37d31adc2fd5253016a9d5405cf..e7df8a3ca62c241578a8e20da31cd860faaa54ef 100644 (file)
@@ -1679,6 +1679,21 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                        r = RESUME_GUEST;
                }
                break;
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       case BOOK3S_INTERRUPT_HV_SOFTPATCH:
+               /*
+                * This occurs for various TM-related instructions that
+                * we need to emulate on POWER9 DD2.2.  We have already
+                * handled the cases where the guest was in real-suspend
+                * mode and was transitioning to transactional state.
+                */
+               r = kvmhv_p9_tm_emulation(vcpu);
+               if (r != -1)
+                       break;
+               fallthrough; /* go to facility unavailable handler */
+#endif
+
        /*
         * This occurs if the guest (kernel or userspace), does something that
         * is prohibited by HFSCR.
@@ -1697,18 +1712,6 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                }
                break;
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-       case BOOK3S_INTERRUPT_HV_SOFTPATCH:
-               /*
-                * This occurs for various TM-related instructions that
-                * we need to emulate on POWER9 DD2.2.  We have already
-                * handled the cases where the guest was in real-suspend
-                * mode and was transitioning to transactional state.
-                */
-               r = kvmhv_p9_tm_emulation(vcpu);
-               break;
-#endif
-
        case BOOK3S_INTERRUPT_HV_RM_HARD:
                r = RESUME_PASSTHROUGH;
                break;
@@ -1811,9 +1814,15 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
                 * mode and was transitioning to transactional state.
                 */
                r = kvmhv_p9_tm_emulation(vcpu);
-               break;
+               if (r != -1)
+                       break;
+               fallthrough; /* go to facility unavailable handler */
 #endif
 
+       case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+               r = RESUME_HOST;
+               break;
+
        case BOOK3S_INTERRUPT_HV_RM_HARD:
                vcpu->arch.trap = 0;
                r = RESUME_GUEST;
index e7c36f8bf2059b95d7454565aa355db2f6a664b2..866cadd70094d4c789792e1223af73acb9cde32f 100644 (file)
@@ -88,14 +88,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                }
                /* check EBB facility is available */
                if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
-                       /* generate an illegal instruction interrupt */
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
-                       return RESUME_GUEST;
+                       vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+                       vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56;
+                       vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+                       return -1; /* rerun host interrupt handler */
                }
                if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
                        /* generate a facility unavailable interrupt */
-                       vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
-                               ((u64)FSCR_EBB_LG << 56);
+                       vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+                       vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56;
                        kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
                        return RESUME_GUEST;
                }
@@ -138,14 +139,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                }
                /* check for TM disabled in the HFSCR or MSR */
                if (!(vcpu->arch.hfscr & HFSCR_TM)) {
-                       /* generate an illegal instruction interrupt */
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
-                       return RESUME_GUEST;
+                       vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+                       vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
+                       vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+                       return -1; /* rerun host interrupt handler */
                }
                if (!(msr & MSR_TM)) {
                        /* generate a facility unavailable interrupt */
-                       vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
-                               ((u64)FSCR_TM_LG << 56);
+                       vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+                       vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
                        kvmppc_book3s_queue_irqprio(vcpu,
                                                BOOK3S_INTERRUPT_FAC_UNAVAIL);
                        return RESUME_GUEST;
@@ -169,14 +171,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
        case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
                /* check for TM disabled in the HFSCR or MSR */
                if (!(vcpu->arch.hfscr & HFSCR_TM)) {
-                       /* generate an illegal instruction interrupt */
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
-                       return RESUME_GUEST;
+                       vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+                       vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
+                       vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+                       return -1; /* rerun host interrupt handler */
                }
                if (!(msr & MSR_TM)) {
                        /* generate a facility unavailable interrupt */
-                       vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
-                               ((u64)FSCR_TM_LG << 56);
+                       vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+                       vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
                        kvmppc_book3s_queue_irqprio(vcpu,
                                                BOOK3S_INTERRUPT_FAC_UNAVAIL);
                        return RESUME_GUEST;
@@ -208,14 +211,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                /* XXX do we need to check for PR=0 here? */
                /* check for TM disabled in the HFSCR or MSR */
                if (!(vcpu->arch.hfscr & HFSCR_TM)) {
-                       /* generate an illegal instruction interrupt */
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
-                       return RESUME_GUEST;
+                       vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+                       vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
+                       vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+                       return -1; /* rerun host interrupt handler */
                }
                if (!(msr & MSR_TM)) {
                        /* generate a facility unavailable interrupt */
-                       vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
-                               ((u64)FSCR_TM_LG << 56);
+                       vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+                       vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
                        kvmppc_book3s_queue_irqprio(vcpu,
                                                BOOK3S_INTERRUPT_FAC_UNAVAIL);
                        return RESUME_GUEST;