DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
FACGT_s 0111 1110 110 ..... 00101 1 ..... ..... @rrr_h
FACGT_s 0111 1110 1.1 ..... 11101 1 ..... ..... @rrr_sd
+FABD_s 0111 1110 110 ..... 00010 1 ..... ..... @rrr_h
+FABD_s 0111 1110 1.1 ..... 11010 1 ..... ..... @rrr_sd
+
### Advanced SIMD three same
FADD_v 0.00 1110 010 ..... 00010 1 ..... ..... @qrrr_h
FACGT_v 0.10 1110 110 ..... 00101 1 ..... ..... @qrrr_h
FACGT_v 0.10 1110 1.1 ..... 11101 1 ..... ..... @qrrr_sd
+FABD_v 0.10 1110 110 ..... 00010 1 ..... ..... @qrrr_h
+FABD_v 0.10 1110 1.1 ..... 11010 1 ..... ..... @qrrr_sd
+
### Advanced SIMD scalar x indexed element
FMUL_si 0101 1111 00 .. .... 1001 . 0 ..... ..... @rrx_h
};
TRANS(FACGT_s, do_fp3_scalar, a, &f_scalar_facgt)
+static void gen_fabd_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
+{
+ gen_helper_vfp_subh(d, n, m, s);
+ gen_vfp_absh(d, d);
+}
+
+static void gen_fabd_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s)
+{
+ gen_helper_vfp_subs(d, n, m, s);
+ gen_vfp_abss(d, d);
+}
+
+static void gen_fabd_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s)
+{
+ gen_helper_vfp_subd(d, n, m, s);
+ gen_vfp_absd(d, d);
+}
+
+static const FPScalar f_scalar_fabd = {
+ gen_fabd_h,
+ gen_fabd_s,
+ gen_fabd_d,
+};
+TRANS(FABD_s, do_fp3_scalar, a, &f_scalar_fabd)
+
static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a,
gen_helper_gvec_3_ptr * const fns[3])
{
};
TRANS(FACGT_v, do_fp3_vector, a, f_vector_facgt)
+static gen_helper_gvec_3_ptr * const f_vector_fabd[3] = {
+ gen_helper_gvec_fabd_h,
+ gen_helper_gvec_fabd_s,
+ gen_helper_gvec_fabd_d,
+};
+TRANS(FABD_v, do_fp3_vector, a, f_vector_fabd)
+
/*
* Advanced SIMD scalar/vector x indexed element
*/
case 0x3f: /* FRSQRTS */
gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
break;
- case 0x7a: /* FABD */
- gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
- gen_vfp_absd(tcg_res, tcg_res);
- break;
default:
case 0x18: /* FMAXNM */
case 0x19: /* FMLA */
case 0x5c: /* FCMGE */
case 0x5d: /* FACGE */
case 0x5f: /* FDIV */
+ case 0x7a: /* FABD */
case 0x7c: /* FCMGT */
case 0x7d: /* FACGT */
g_assert_not_reached();
case 0x3f: /* FRSQRTS */
gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
break;
- case 0x7a: /* FABD */
- gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
- gen_vfp_abss(tcg_res, tcg_res);
- break;
default:
case 0x18: /* FMAXNM */
case 0x19: /* FMLA */
case 0x5c: /* FCMGE */
case 0x5d: /* FACGE */
case 0x5f: /* FDIV */
+ case 0x7a: /* FABD */
case 0x7c: /* FCMGT */
case 0x7d: /* FACGT */
g_assert_not_reached();
switch (fpopcode) {
case 0x1f: /* FRECPS */
case 0x3f: /* FRSQRTS */
- case 0x7a: /* FABD */
break;
default:
case 0x1b: /* FMULX */
case 0x7d: /* FACGT */
case 0x1c: /* FCMEQ */
case 0x5c: /* FCMGE */
+ case 0x7a: /* FABD */
case 0x7c: /* FCMGT */
unallocated_encoding(s);
return;
switch (fpopcode) {
case 0x07: /* FRECPS */
case 0x0f: /* FRSQRTS */
- case 0x1a: /* FABD */
break;
default:
case 0x03: /* FMULX */
case 0x04: /* FCMEQ (reg) */
case 0x14: /* FCMGE (reg) */
case 0x15: /* FACGE */
+ case 0x1a: /* FABD */
case 0x1c: /* FCMGT (reg) */
case 0x1d: /* FACGT */
unallocated_encoding(s);
case 0x0f: /* FRSQRTS */
gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
break;
- case 0x1a: /* FABD */
- gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
- tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
- break;
default:
case 0x03: /* FMULX */
case 0x04: /* FCMEQ (reg) */
case 0x14: /* FCMGE (reg) */
case 0x15: /* FACGE */
+ case 0x1a: /* FABD */
case 0x1c: /* FCMGT (reg) */
case 0x1d: /* FACGT */
g_assert_not_reached();
return;
case 0x1f: /* FRECPS */
case 0x3f: /* FRSQRTS */
- case 0x7a: /* FABD */
if (!fp_access_check(s)) {
return;
}
case 0x5c: /* FCMGE */
case 0x5d: /* FACGE */
case 0x5f: /* FDIV */
+ case 0x7a: /* FABD */
case 0x7d: /* FACGT */
case 0x7c: /* FCMGT */
unallocated_encoding(s);
switch (fpopcode) {
case 0x7: /* FRECPS */
case 0xf: /* FRSQRTS */
- case 0x1a: /* FABD */
pairwise = false;
break;
case 0x10: /* FMAXNMP */
case 0x14: /* FCMGE */
case 0x15: /* FACGE */
case 0x17: /* FDIV */
+ case 0x1a: /* FABD */
case 0x1c: /* FCMGT */
case 0x1d: /* FACGT */
unallocated_encoding(s);
case 0xf: /* FRSQRTS */
gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
break;
- case 0x1a: /* FABD */
- gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
- tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
- break;
default:
case 0x0: /* FMAXNM */
case 0x1: /* FMLA */
case 0x14: /* FCMGE */
case 0x15: /* FACGE */
case 0x17: /* FDIV */
+ case 0x1a: /* FABD */
case 0x1c: /* FCMGT */
case 0x1d: /* FACGT */
g_assert_not_reached();
return float32_abs(float32_sub(op1, op2, stat));
}
+static float64 float64_abd(float64 op1, float64 op2, float_status *stat)
+{
+ return float64_abs(float64_sub(op1, op2, stat));
+}
+
/*
* Reciprocal step. These are the AArch32 version which uses a
* non-fused multiply-and-subtract.
DO_3OP(gvec_fabd_h, float16_abd, float16)
DO_3OP(gvec_fabd_s, float32_abd, float32)
+DO_3OP(gvec_fabd_d, float64_abd, float64)
DO_3OP(gvec_fceq_h, float16_ceq, float16)
DO_3OP(gvec_fceq_s, float32_ceq, float32)