}
if (sz == 1) {
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
} else {
fpst = fpstatus_ptr(FPST_A32);
}
}
if (sz == 1) {
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
} else {
fpst = fpstatus_ptr(FPST_A32);
}
/*
* Do a half-precision operation. Functionally this is
* the same as do_vfp_3op_sp(), except:
- * - it uses the FPST_FPCR_F16
+ * - it uses the FPST_A32_F16
* - it doesn't need the VFP vector handling (fp16 is a
* v8 feature, and in v8 VFP vectors don't exist)
* - it does the aa32_fp16_arith feature test
f0 = tcg_temp_new_i32();
f1 = tcg_temp_new_i32();
fd = tcg_temp_new_i32();
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
vfp_load_reg16(f0, vn);
vfp_load_reg16(f1, vm);
/* VFNMA, VFNMS */
gen_vfp_negh(vd, vd);
}
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
vfp_store_reg32(vd, a->vd);
return true;
static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
{
- gen_helper_vfp_sqrth(vd, vm, fpstatus_ptr(FPST_FPCR_F16));
+ gen_helper_vfp_sqrth(vd, vm, fpstatus_ptr(FPST_A32_F16));
}
static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
tmp = tcg_temp_new_i32();
vfp_load_reg16(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
gen_helper_rinth(tmp, tmp, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
tmp = tcg_temp_new_i32();
vfp_load_reg16(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rinth(tmp, tmp, fpst);
gen_restore_rmode(tcg_rmode, fpst);
tmp = tcg_temp_new_i32();
vfp_load_reg16(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
gen_helper_rinth_exact(tmp, tmp, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
vm = tcg_temp_new_i32();
vfp_load_reg32(vm, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
if (a->s) {
/* i32 -> f16 */
gen_helper_vfp_sitoh(vm, vm, fpst);
vd = tcg_temp_new_i32();
vfp_load_reg32(vd, a->vd);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
shift = tcg_constant_i32(frac_bits);
/* Switch on op:U:sx bits */
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_A32_F16);
vm = tcg_temp_new_i32();
vfp_load_reg16(vm, a->vm);