&2scalar vm=%vm_dp vn=%vn_dp vd=%vd_dp
VMLA_2sc 1111 001 . 1 . .. .... .... 0000 . 1 . 0 .... @2scalar
+ VMLA_F_2sc 1111 001 . 1 . .. .... .... 0001 . 1 . 0 .... @2scalar
VMLS_2sc 1111 001 . 1 . .. .... .... 0100 . 1 . 0 .... @2scalar
+ VMLS_F_2sc 1111 001 . 1 . .. .... .... 0101 . 1 . 0 .... @2scalar
VMUL_2sc 1111 001 . 1 . .. .... .... 1000 . 1 . 0 .... @2scalar
+ VMUL_F_2sc 1111 001 . 1 . .. .... .... 1001 . 1 . 0 .... @2scalar
]
}
return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
}
+
+/*
+ * Rather than have a float-specific version of do_2scalar just for
+ * three insns, we wrap a NeonGenTwoSingleOpFn to turn it into
+ * a NeonGenTwoOpFn.
+ */
+#define WRAP_FP_FN(WRAPNAME, FUNC) \
+ static void WRAPNAME(TCGv_i32 rd, TCGv_i32 rn, TCGv_i32 rm) \
+ { \
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1); \
+ FUNC(rd, rn, rm, fpstatus); \
+ tcg_temp_free_ptr(fpstatus); \
+ }
+
+WRAP_FP_FN(gen_VMUL_F_mul, gen_helper_vfp_muls)
+WRAP_FP_FN(gen_VMUL_F_add, gen_helper_vfp_adds)
+WRAP_FP_FN(gen_VMUL_F_sub, gen_helper_vfp_subs)
+
+static bool trans_VMUL_F_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpFn * const opfn[] = {
+ NULL,
+ NULL, /* TODO: fp16 support */
+ gen_VMUL_F_mul,
+ NULL,
+ };
+
+ return do_2scalar(s, a, opfn[a->size], NULL);
+}
+
+static bool trans_VMLA_F_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpFn * const opfn[] = {
+ NULL,
+ NULL, /* TODO: fp16 support */
+ gen_VMUL_F_mul,
+ NULL,
+ };
+ static NeonGenTwoOpFn * const accfn[] = {
+ NULL,
+ NULL, /* TODO: fp16 support */
+ gen_VMUL_F_add,
+ NULL,
+ };
+
+ return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
+}
+
+static bool trans_VMLS_F_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpFn * const opfn[] = {
+ NULL,
+ NULL, /* TODO: fp16 support */
+ gen_VMUL_F_mul,
+ NULL,
+ };
+ static NeonGenTwoOpFn * const accfn[] = {
+ NULL,
+ NULL, /* TODO: fp16 support */
+ gen_VMUL_F_sub,
+ NULL,
+ };
+
+ return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
+}
case 0: /* Integer VMLA scalar */
case 4: /* Integer VMLS scalar */
case 8: /* Integer VMUL scalar */
- return 1; /* handled by decodetree */
-
case 1: /* Float VMLA scalar */
case 5: /* Floating point VMLS scalar */
case 9: /* Floating point VMUL scalar */
- if (size == 1) {
- return 1;
- }
- /* fall through */
+ return 1; /* handled by decodetree */
+
case 12: /* VQDMULH scalar */
case 13: /* VQRDMULH scalar */
if (u && ((rd | rn) & 1)) {
} else {
gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
}
- } else if (op == 13) {
+ } else {
if (size == 1) {
gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
} else {
gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
}
- } else {
- TCGv_ptr fpstatus = get_fpstatus_ptr(1);
- gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
- tcg_temp_free_ptr(fpstatus);
}
tcg_temp_free_i32(tmp2);
- if (op < 8) {
- /* Accumulate. */
- tmp2 = neon_load_reg(rd, pass);
- switch (op) {
- case 1:
- {
- TCGv_ptr fpstatus = get_fpstatus_ptr(1);
- gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
- tcg_temp_free_ptr(fpstatus);
- break;
- }
- case 5:
- {
- TCGv_ptr fpstatus = get_fpstatus_ptr(1);
- gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
- tcg_temp_free_ptr(fpstatus);
- break;
- }
- default:
- abort();
- }
- tcg_temp_free_i32(tmp2);
- }
neon_store_reg(rd, pass, tmp);
}
break;