val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
 
                if (!vcpu_has_sve(vcpu) ||
-                   (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
+                   (*host_data_ptr(fp_owner) != FP_STATE_GUEST_OWNED))
                        val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
                if (cpus_have_final_cap(ARM64_SME))
                        val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
                val = CPTR_NVHE_EL2_RES1;
 
                if (vcpu_has_sve(vcpu) &&
-                   (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
+                   (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED))
                        val |= CPTR_EL2_TZ;
                if (cpus_have_final_cap(ARM64_SME))
                        val &= ~CPTR_EL2_TSM;
 
        struct kvm_cpu_context host_ctxt;
        struct user_fpsimd_state *fpsimd_state; /* hyp VA */
 
+       /* Ownership of the FP regs */
+       enum {
+               FP_STATE_FREE,
+               FP_STATE_HOST_OWNED,
+               FP_STATE_GUEST_OWNED,
+       } fp_owner;
+
        /*
         * host_debug_state contains the host registers which are
         * saved and restored during world switches.
        /* Exception Information */
        struct kvm_vcpu_fault_info fault;
 
-       /* Ownership of the FP regs */
-       enum {
-               FP_STATE_FREE,
-               FP_STATE_HOST_OWNED,
-               FP_STATE_GUEST_OWNED,
-       } fp_state;
-
        /* Configuration flags, set once and for all before the vcpu can run */
        u8 cflags;
 
 
 
        vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
 
-       /*
-        * Default value for the FP state, will be overloaded at load
-        * time if we support FP (pretty likely)
-        */
-       vcpu->arch.fp_state = FP_STATE_FREE;
-
        /* Set up the timer */
        kvm_timer_vcpu_init(vcpu);
 
 
         * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
         * FP_STATE_FREE if the flag set.
         */
-       vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
+       *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
        *host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state);
 
        vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
                 * been saved, this is very unlikely to happen.
                 */
                if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
-                       vcpu->arch.fp_state = FP_STATE_FREE;
+                       *host_data_ptr(fp_owner) = FP_STATE_FREE;
                        fpsimd_save_and_flush_cpu_state();
                }
        }
 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
 {
        if (test_thread_flag(TIF_FOREIGN_FPSTATE))
-               vcpu->arch.fp_state = FP_STATE_FREE;
+               *host_data_ptr(fp_owner) = FP_STATE_FREE;
 }
 
 /*
 
        WARN_ON_ONCE(!irqs_disabled());
 
-       if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+       if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) {
 
                /*
                 * Currently we do not support SME guests so SVCR is
                isb();
        }
 
-       if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
+       if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) {
                if (vcpu_has_sve(vcpu)) {
                        __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
 
 
 /* Check whether the FP regs are owned by the guest */
 static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED;
+       return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED;
 }
 
 /* Save the 32-bit only FPSIMD system register state */
        isb();
 
        /* Write out the host state if it's in the registers */
-       if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
+       if (*host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED)
                __fpsimd_save_state(*host_data_ptr(fpsimd_state));
 
        /* Restore the guest state */
        if (!(read_sysreg(hcr_el2) & HCR_RW))
                write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
 
-       vcpu->arch.fp_state = FP_STATE_GUEST_OWNED;
+       *host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;
 
        return true;
 }
 
        hyp_vcpu->vcpu.arch.cptr_el2    = host_vcpu->arch.cptr_el2;
 
        hyp_vcpu->vcpu.arch.iflags      = host_vcpu->arch.iflags;
-       hyp_vcpu->vcpu.arch.fp_state    = host_vcpu->arch.fp_state;
 
        hyp_vcpu->vcpu.arch.debug_ptr   = kern_hyp_va(host_vcpu->arch.debug_ptr);
 
        host_vcpu->arch.fault           = hyp_vcpu->vcpu.arch.fault;
 
        host_vcpu->arch.iflags          = hyp_vcpu->vcpu.arch.iflags;
-       host_vcpu->arch.fp_state        = hyp_vcpu->vcpu.arch.fp_state;
 
        host_cpu_if->vgic_hcr           = hyp_cpu_if->vgic_hcr;
        for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
 
 
        __sysreg_restore_state_nvhe(host_ctxt);
 
-       if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
+       if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED)
                __fpsimd_save_fpexc32(vcpu);
 
        __debug_switch_to_host(vcpu);
 
 
        sysreg_restore_host_state_vhe(host_ctxt);
 
-       if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
+       if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED)
                __fpsimd_save_fpexc32(vcpu);
 
        __debug_switch_to_host(vcpu);