#define CPTR_EL2_TFP   (1 << CPTR_EL2_TFP_SHIFT)
 #define CPTR_EL2_TZ    (1 << 8)
 #define CPTR_NVHE_EL2_RES1     0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
-#define CPTR_EL2_DEFAULT       CPTR_NVHE_EL2_RES1
 #define CPTR_NVHE_EL2_RES0     (GENMASK(63, 32) |      \
                                 GENMASK(29, 21) |      \
                                 GENMASK(19, 14) |      \
        ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
        ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET)
 
-#define CPACR_EL1_DEFAULT      (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |\
-                                CPACR_EL1_ZEN_EL1EN)
+#define CPACR_EL1_TTA          (1 << 28)
 
 #define kvm_mode_names                         \
        { PSR_MODE_EL0t,        "EL0t" },       \
 
        return test_bit(feature, vcpu->arch.features);
 }
 
+static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
+{
+       u64 val;
+
+       if (has_vhe()) {
+               val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
+                      CPACR_EL1_ZEN_EL1EN);
+       } else if (has_hvhe()) {
+               val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
+       } else {
+               val = CPTR_NVHE_EL2_RES1;
+
+               if (vcpu_has_sve(vcpu) &&
+                   (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
+                       val |= CPTR_EL2_TZ;
+               if (cpus_have_final_cap(ARM64_SME))
+                       val &= ~CPTR_EL2_TSM;
+       }
+
+       return val;
+}
+
+static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
+{
+       u64 val = kvm_get_reset_cptr_el2(vcpu);
+
+       if (has_vhe() || has_hvhe())
+               write_sysreg(val, cpacr_el1);
+       else
+               write_sysreg(val, cptr_el2);
+}
 #endif /* __ARM64_KVM_EMULATE_H__ */
 
        }
 
        vcpu_reset_hcr(vcpu);
-       vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
+       vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
 
        /*
         * Handle the "start in power-off" case.
 
 
        /*
         * If we have VHE then the Hyp code will reset CPACR_EL1 to
-        * CPACR_EL1_DEFAULT and we need to reenable SME.
+        * the default value and we need to reenable SME.
         */
        if (has_vhe() && system_supports_sme()) {
                /* Also restore EL0 state seen on entry */
                /*
                 * The FPSIMD/SVE state in the CPU has not been touched, and we
                 * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
-                * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
+                * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
                 * for EL0.  To avoid spurious traps, restore the trap state
                 * seen by kvm_arch_vcpu_load_fp():
                 */
 
        /* Valid trap.  Switch the context: */
 
        /* First disable enough traps to allow us to update the registers */
-       if (has_vhe()) {
+       if (has_vhe() || has_hvhe()) {
                reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
                if (sve_guest)
                        reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
 
                handle_host_smc(host_ctxt);
                break;
        case ESR_ELx_EC_SVE:
-               sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
+               if (has_hvhe())
+                       sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
+                                                       CPACR_EL1_ZEN_EL0EN));
+               else
+                       sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
                isb();
                sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
                break;
 
        u64 hcr_set = HCR_RW;
        u64 hcr_clear = 0;
        u64 cptr_set = 0;
+       u64 cptr_clear = 0;
 
        /* Protected KVM does not support AArch32 guests. */
        BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
        }
 
        /* Trap SVE */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
-               cptr_set |= CPTR_EL2_TZ;
+       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
+               if (has_hvhe())
+                       cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+               else
+                       cptr_set |= CPTR_EL2_TZ;
+       }
 
        vcpu->arch.hcr_el2 |= hcr_set;
        vcpu->arch.hcr_el2 &= ~hcr_clear;
        vcpu->arch.cptr_el2 |= cptr_set;
+       vcpu->arch.cptr_el2 &= ~cptr_clear;
 }
 
 /*
                mdcr_set |= MDCR_EL2_TTRF;
 
        /* Trap Trace */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
-               cptr_set |= CPTR_EL2_TTA;
+       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
+               if (has_hvhe())
+                       cptr_set |= CPACR_EL1_TTA;
+               else
+                       cptr_set |= CPTR_EL2_TTA;
+       }
 
        vcpu->arch.mdcr_el2 |= mdcr_set;
        vcpu->arch.mdcr_el2 &= ~mdcr_clear;
        /* Clear res0 and set res1 bits to trap potential new features. */
        vcpu->arch.hcr_el2 &= ~(HCR_RES0);
        vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
-       vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
-       vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+       if (!has_hvhe()) {
+               vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
+               vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+       }
 }
 
 /*
 
        __activate_traps_common(vcpu);
 
        val = vcpu->arch.cptr_el2;
-       val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
+       val |= CPTR_EL2_TAM;    /* Same bit irrespective of E2H */
+       val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
+       if (cpus_have_final_cap(ARM64_SME)) {
+               if (has_hvhe())
+                       val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
+               else
+                       val |= CPTR_EL2_TSM;
+       }
+
        if (!guest_owns_fp_regs(vcpu)) {
-               val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+               if (has_hvhe())
+                       val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
+                                CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+               else
+                       val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+
                __activate_traps_fpsimd32(vcpu);
        }
-       if (cpus_have_final_cap(ARM64_SME))
-               val |= CPTR_EL2_TSM;
 
        write_sysreg(val, cptr_el2);
        write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
 static void __deactivate_traps(struct kvm_vcpu *vcpu)
 {
        extern char __kvm_hyp_host_vector[];
-       u64 cptr;
 
        ___deactivate_traps(vcpu);
 
 
        write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
 
-       cptr = CPTR_EL2_DEFAULT;
-       if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
-               cptr |= CPTR_EL2_TZ;
-       if (cpus_have_final_cap(ARM64_SME))
-               cptr &= ~CPTR_EL2_TSM;
-
-       write_sysreg(cptr, cptr_el2);
+       kvm_reset_cptr_el2(vcpu);
        write_sysreg(__kvm_hyp_host_vector, vbar_el2);
 }
 
 
         */
        asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 
-       write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
+       kvm_reset_cptr_el2(vcpu);
 
        if (!arm64_kernel_unmapped_at_el0())
                host_vectors = __this_cpu_read(this_cpu_vector);
 
        EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
        EL2_REG(HCR_EL2, access_rw, reset_val, 0),
        EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
-       EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_EL2_DEFAULT ),
+       EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
        EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
        EL2_REG(HACR_EL2, access_rw, reset_val, 0),