return ret;
}
+float32 helper_fnadds(CPUSPARCState *env, float32 src1, float32 src2)
+{
+ float32 ret = float32_add(src1, src2, &env->fp_status);
+
+ /*
+ * NaN inputs or result do not get a sign change.
+ * Nor, apparently, does zero: on hardware, -(x + -x) yields +0.
+ */
+ if (!float32_is_any_nan(ret) && !float32_is_zero(ret)) {
+ ret = float32_chs(ret);
+ }
+ check_ieee_exceptions(env, GETPC());
+ return ret;
+}
+
+float32 helper_fnmuls(CPUSPARCState *env, float32 src1, float32 src2)
+{
+ float32 ret = float32_mul(src1, src2, &env->fp_status);
+
+ /* NaN inputs or result do not get a sign change. */
+ if (!float32_is_any_nan(ret)) {
+ ret = float32_chs(ret);
+ }
+ check_ieee_exceptions(env, GETPC());
+ return ret;
+}
+
+float64 helper_fnaddd(CPUSPARCState *env, float64 src1, float64 src2)
+{
+ float64 ret = float64_add(src1, src2, &env->fp_status);
+
+ /*
+ * NaN inputs or result do not get a sign change.
+ * Nor, apparently, does zero: on hardware, -(x + -x) yields +0.
+ */
+ if (!float64_is_any_nan(ret) && !float64_is_zero(ret)) {
+ ret = float64_chs(ret);
+ }
+ check_ieee_exceptions(env, GETPC());
+ return ret;
+}
+
+float64 helper_fnmuld(CPUSPARCState *env, float64 src1, float64 src2)
+{
+ float64 ret = float64_mul(src1, src2, &env->fp_status);
+
+ /* NaN inputs or result do not get a sign change. */
+ if (!float64_is_any_nan(ret)) {
+ ret = float64_chs(ret);
+ }
+ check_ieee_exceptions(env, GETPC());
+ return ret;
+}
+
+float64 helper_fnsmuld(CPUSPARCState *env, float32 src1, float32 src2)
+{
+ float64 ret = float64_mul(float32_to_float64(src1, &env->fp_status),
+ float32_to_float64(src2, &env->fp_status),
+ &env->fp_status);
+
+ /* NaN inputs or result do not get a sign change. */
+ if (!float64_is_any_nan(ret)) {
+ ret = float64_chs(ret);
+ }
+ check_ieee_exceptions(env, GETPC());
+ return ret;
+}
+
static uint32_t finish_fcmp(CPUSPARCState *env, FloatRelation r, uintptr_t ra)
{
check_ieee_exceptions(env, ra);
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
+DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
DEF_HELPER_FLAGS_3(faddq, TCG_CALL_NO_WG, i128, env, i128, i128)
DEF_HELPER_FLAGS_3(fsubq, TCG_CALL_NO_WG, i128, env, i128, i128)
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
+DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
DEF_HELPER_FLAGS_3(fsmuld, TCG_CALL_NO_WG, f64, env, f32, f32)
+DEF_HELPER_FLAGS_3(fnsmuld, TCG_CALL_NO_WG, f64, env, f32, f32)
DEF_HELPER_FLAGS_3(fdmulq, TCG_CALL_NO_WG, i128, env, f64, f64)
DEF_HELPER_FLAGS_2(fitod, TCG_CALL_NO_WG, f64, env, s32)
gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
}
+/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
+static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
+{
+ TCGv_i32 one = tcg_constant_i32(float32_one);
+ int op = float_muladd_halve_result;
+ gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+}
+
+static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
+{
+ TCGv_i64 one = tcg_constant_i64(float64_one);
+ int op = float_muladd_halve_result;
+ gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+}
+
+/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
+static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
+{
+ TCGv_i32 one = tcg_constant_i32(float32_one);
+ int op = float_muladd_negate_c | float_muladd_halve_result;
+ gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+}
+
+static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
+{
+ TCGv_i64 one = tcg_constant_i64(float64_one);
+ int op = float_muladd_negate_c | float_muladd_halve_result;
+ gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+}
+
+/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
+static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
+{
+ TCGv_i32 one = tcg_constant_i32(float32_one);
+ int op = float_muladd_negate_result | float_muladd_halve_result;
+ gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+}
+
+static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
+{
+ TCGv_i64 one = tcg_constant_i64(float64_one);
+ int op = float_muladd_negate_result | float_muladd_halve_result;
+ gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
+}
+
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
{
/*
TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
+TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
+TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
+TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
+
static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
{
TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
+TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
+TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
static bool do_dff(DisasContext *dc, arg_r_r_r *a,
void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
+TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
+TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
+TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
+
static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
void (*func)(TCGv, TCGv_i64, TCGv_i64))
{
TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
+TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
+TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
{
return advance_pc(dc);
}
+static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
+{
+ TCGv_i64 dst;
+ TCGv_i32 src1, src2;
+
+ if (!avail_VIS3(dc)) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ dst = tcg_temp_new_i64();
+ src1 = gen_load_fpr_F(dc, a->rs1);
+ src2 = gen_load_fpr_F(dc, a->rs2);
+ gen_helper_fnsmuld(dst, tcg_env, src1, src2);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
{