From: Richard Henderson Date: Mon, 8 Apr 2024 18:44:37 +0000 (-1000) Subject: tagret/i386: Convert do_fxsave, do_fxrstor to X86Access X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=6d030aab29f8713776aa2fec31bc94bb98a96e55;p=qemu.git tagret/i386: Convert do_fxsave, do_fxrstor to X86Access Move the alignment fault from do_* to helper_*, as it need not apply to usage from within user-only signal handling. Reviewed-by: Paolo Bonzini Signed-off-by: Richard Henderson --- diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c index f21cdb45ea..4dcb0b92ff 100644 --- a/target/i386/tcg/fpu_helper.c +++ b/target/i386/tcg/fpu_helper.c @@ -2619,33 +2619,35 @@ static void do_xsave_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra) cpu_stq_data_ra(env, ptr, env->pkru, ra); } -static void do_fxsave(CPUX86State *env, target_ulong ptr, uintptr_t ra) +static void do_fxsave(X86Access *ac, target_ulong ptr) { - X86Access ac; - - /* The operand must be 16 byte aligned */ - if (ptr & 0xf) { - raise_exception_ra(env, EXCP0D_GPF, ra); - } - - access_prepare(&ac, env, ptr, sizeof(X86LegacyXSaveArea), - MMU_DATA_STORE, ra); - do_xsave_fpu(&ac, ptr); + CPUX86State *env = ac->env; + do_xsave_fpu(ac, ptr); if (env->cr[4] & CR4_OSFXSR_MASK) { - do_xsave_mxcsr(&ac, ptr); + do_xsave_mxcsr(ac, ptr); /* Fast FXSAVE leaves out the XMM registers */ if (!(env->efer & MSR_EFER_FFXSR) || (env->hflags & HF_CPL_MASK) || !(env->hflags & HF_LMA_MASK)) { - do_xsave_sse(&ac, ptr); + do_xsave_sse(ac, ptr); } } } void helper_fxsave(CPUX86State *env, target_ulong ptr) { - do_fxsave(env, ptr, GETPC()); + uintptr_t ra = GETPC(); + X86Access ac; + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + access_prepare(&ac, env, ptr, sizeof(X86LegacyXSaveArea), + MMU_DATA_STORE, ra); + do_fxsave(&ac, ptr); } static uint64_t get_xinuse(CPUX86State *env) @@ -2850,33 +2852,35 @@ static void do_xrstor_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra) env->pkru = cpu_ldq_data_ra(env, ptr, ra); } -static void do_fxrstor(CPUX86State *env, target_ulong ptr, uintptr_t ra) +static void do_fxrstor(X86Access *ac, target_ulong ptr) { - X86Access ac; - - /* The operand must be 16 byte aligned */ - if (ptr & 0xf) { - raise_exception_ra(env, EXCP0D_GPF, ra); - } - - access_prepare(&ac, env, ptr, sizeof(X86LegacyXSaveArea), - MMU_DATA_LOAD, ra); - do_xrstor_fpu(&ac, ptr); + CPUX86State *env = ac->env; + do_xrstor_fpu(ac, ptr); if (env->cr[4] & CR4_OSFXSR_MASK) { - do_xrstor_mxcsr(&ac, ptr); + do_xrstor_mxcsr(ac, ptr); /* Fast FXRSTOR leaves out the XMM registers */ if (!(env->efer & MSR_EFER_FFXSR) || (env->hflags & HF_CPL_MASK) || !(env->hflags & HF_LMA_MASK)) { - do_xrstor_sse(&ac, ptr); + do_xrstor_sse(ac, ptr); } } } void helper_fxrstor(CPUX86State *env, target_ulong ptr) { - do_fxrstor(env, ptr, GETPC()); + uintptr_t ra = GETPC(); + X86Access ac; + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + access_prepare(&ac, env, ptr, sizeof(X86LegacyXSaveArea), + MMU_DATA_LOAD, ra); + do_fxrstor(&ac, ptr); } static void do_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm, uintptr_t ra) @@ -3008,12 +3012,20 @@ void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32) void cpu_x86_fxsave(CPUX86State *env, target_ulong ptr) { - do_fxsave(env, ptr, 0); + X86Access ac; + + access_prepare(&ac, env, ptr, sizeof(X86LegacyXSaveArea), + MMU_DATA_STORE, 0); + do_fxsave(&ac, ptr); } void cpu_x86_fxrstor(CPUX86State *env, target_ulong ptr) { - do_fxrstor(env, ptr, 0); + X86Access ac; + + access_prepare(&ac, env, ptr, sizeof(X86LegacyXSaveArea), + MMU_DATA_LOAD, 0); + do_fxrstor(&ac, ptr); } void cpu_x86_xsave(CPUX86State *env, target_ulong ptr)