};
TRANS(FRSQRTS_v, do_fp3_vector, a, f_vector_frsqrts)
+static gen_helper_gvec_3_ptr * const f_vector_faddp[3] = {
+ gen_helper_gvec_faddp_h,
+ gen_helper_gvec_faddp_s,
+ gen_helper_gvec_faddp_d,
+};
+TRANS(FADDP_v, do_fp3_vector, a, f_vector_faddp)
+
/*
* Advanced SIMD scalar/vector x indexed element
*/
TRANS(FMLA_vi, do_fmla_vector_idx, a, false)
TRANS(FMLS_vi, do_fmla_vector_idx, a, true)
+/*
+ * Advanced SIMD scalar pairwise
+ */
+
+static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f)
+{
+ switch (a->esz) {
+ case MO_64:
+ if (fp_access_check(s)) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ read_vec_element(s, t0, a->rn, 0, MO_64);
+ read_vec_element(s, t1, a->rn, 1, MO_64);
+ f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
+ write_fp_dreg(s, a->rd, t0);
+ }
+ break;
+ case MO_32:
+ if (fp_access_check(s)) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, t0, a->rn, 0, MO_32);
+ read_vec_element_i32(s, t1, a->rn, 1, MO_32);
+ f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR));
+ write_fp_sreg(s, a->rd, t0);
+ }
+ break;
+ case MO_16:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, t0, a->rn, 0, MO_16);
+ read_vec_element_i32(s, t1, a->rn, 1, MO_16);
+ f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16));
+ write_fp_sreg(s, a->rd, t0);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return true;
+}
+
+TRANS(FADDP_s, do_fp3_scalar_pair, a, &f_scalar_fadd)
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
fpst = NULL;
break;
case 0xc: /* FMAXNMP */
- case 0xd: /* FADDP */
case 0xf: /* FMAXP */
case 0x2c: /* FMINNMP */
case 0x2f: /* FMINP */
fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
break;
default:
+ case 0xd: /* FADDP */
unallocated_encoding(s);
return;
}
case 0xc: /* FMAXNMP */
gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
break;
- case 0xd: /* FADDP */
- gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
- break;
case 0xf: /* FMAXP */
gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
break;
gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
break;
default:
+ case 0xd: /* FADDP */
g_assert_not_reached();
}
case 0xc: /* FMAXNMP */
gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
break;
- case 0xd: /* FADDP */
- gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
- break;
case 0xf: /* FMAXP */
gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
break;
gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
break;
default:
+ case 0xd: /* FADDP */
g_assert_not_reached();
}
} else {
case 0xc: /* FMAXNMP */
gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
break;
- case 0xd: /* FADDP */
- gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
- break;
case 0xf: /* FMAXP */
gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
break;
gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
break;
default:
+ case 0xd: /* FADDP */
g_assert_not_reached();
}
}
case 0x58: /* FMAXNMP */
gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
break;
- case 0x5a: /* FADDP */
- gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
- break;
case 0x5e: /* FMAXP */
gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
break;
gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
break;
default:
+ case 0x5a: /* FADDP */
g_assert_not_reached();
}
}
case 0x58: /* FMAXNMP */
gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
break;
- case 0x5a: /* FADDP */
- gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
- break;
case 0x5e: /* FMAXP */
gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
break;
gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
break;
default:
+ case 0x5a: /* FADDP */
g_assert_not_reached();
}
switch (fpopcode) {
case 0x58: /* FMAXNMP */
- case 0x5a: /* FADDP */
case 0x5e: /* FMAXP */
case 0x78: /* FMINNMP */
case 0x7e: /* FMINP */
case 0x3a: /* FSUB */
case 0x3e: /* FMIN */
case 0x3f: /* FRSQRTS */
+ case 0x5a: /* FADDP */
case 0x5b: /* FMUL */
case 0x5c: /* FCMGE */
case 0x5d: /* FACGE */
switch (fpopcode) {
case 0x10: /* FMAXNMP */
- case 0x12: /* FADDP */
case 0x16: /* FMAXP */
case 0x18: /* FMINNMP */
case 0x1e: /* FMINP */
case 0xa: /* FSUB */
case 0xe: /* FMIN */
case 0xf: /* FRSQRTS */
+ case 0x12: /* FADDP */
case 0x13: /* FMUL */
case 0x14: /* FCMGE */
case 0x15: /* FACGE */
gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
fpst);
break;
- case 0x12: /* FADDP */
- gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
- break;
case 0x16: /* FMAXP */
gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
break;
gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
break;
default:
+ case 0x12: /* FADDP */
g_assert_not_reached();
}
}
#undef DO_NEON_PAIRWISE
+#define DO_3OP_PAIR(NAME, FUNC, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
+{ \
+ ARMVectorReg scratch; \
+ intptr_t oprsz = simd_oprsz(desc); \
+ intptr_t half = oprsz / sizeof(TYPE) / 2; \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ if (unlikely(d == m)) { \
+ m = memcpy(&scratch, m, oprsz); \
+ } \
+ for (intptr_t i = 0; i < half; ++i) { \
+ d[H(i)] = FUNC(n[H(i * 2)], n[H(i * 2 + 1)], stat); \
+ } \
+ for (intptr_t i = 0; i < half; ++i) { \
+ d[H(i + half)] = FUNC(m[H(i * 2)], m[H(i * 2 + 1)], stat); \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_3OP_PAIR(gvec_faddp_h, float16_add, float16, H2)
+DO_3OP_PAIR(gvec_faddp_s, float32_add, float32, H4)
+DO_3OP_PAIR(gvec_faddp_d, float64_add, float64, )
+
#define DO_VCVT_FIXED(NAME, FUNC, TYPE) \
void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
{ \