tcg_debug_assert(vece <= MO_32);
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
}
+
+static void gen_shsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andc_i64(t, b, a);
+ tcg_gen_vec_sar8i_i64(a, a, 1);
+ tcg_gen_vec_sar8i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_sub8_i64(d, a, b);
+ tcg_gen_vec_sub8_i64(d, d, t);
+}
+
+static void gen_shsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andc_i64(t, b, a);
+ tcg_gen_vec_sar16i_i64(a, a, 1);
+ tcg_gen_vec_sar16i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_sub16_i64(d, a, b);
+ tcg_gen_vec_sub16_i64(d, d, t);
+}
+
+static void gen_shsub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_andc_i32(t, b, a);
+ tcg_gen_sari_i32(a, a, 1);
+ tcg_gen_sari_i32(b, b, 1);
+ tcg_gen_andi_i32(t, t, 1);
+ tcg_gen_sub_i32(d, a, b);
+ tcg_gen_sub_i32(d, d, t);
+}
+
+static void gen_shsub_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_andc_vec(vece, t, b, a);
+ tcg_gen_sari_vec(vece, a, a, 1);
+ tcg_gen_sari_vec(vece, b, b, 1);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
+ tcg_gen_sub_vec(vece, d, a, b);
+ tcg_gen_sub_vec(vece, d, d, t);
+}
+
+void gen_gvec_shsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 g[4] = {
+ { .fni8 = gen_shsub8_i64,
+ .fniv = gen_shsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_shsub16_i64,
+ .fniv = gen_shsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_shsub_i32,
+ .fniv = gen_shsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_uhsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andc_i64(t, b, a);
+ tcg_gen_vec_shr8i_i64(a, a, 1);
+ tcg_gen_vec_shr8i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_sub8_i64(d, a, b);
+ tcg_gen_vec_sub8_i64(d, d, t);
+}
+
+static void gen_uhsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andc_i64(t, b, a);
+ tcg_gen_vec_shr16i_i64(a, a, 1);
+ tcg_gen_vec_shr16i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_sub16_i64(d, a, b);
+ tcg_gen_vec_sub16_i64(d, d, t);
+}
+
+static void gen_uhsub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_andc_i32(t, b, a);
+ tcg_gen_shri_i32(a, a, 1);
+ tcg_gen_shri_i32(b, b, 1);
+ tcg_gen_andi_i32(t, t, 1);
+ tcg_gen_sub_i32(d, a, b);
+ tcg_gen_sub_i32(d, d, t);
+}
+
+static void gen_uhsub_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_andc_vec(vece, t, b, a);
+ tcg_gen_shri_vec(vece, a, a, 1);
+ tcg_gen_shri_vec(vece, b, b, 1);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
+ tcg_gen_sub_vec(vece, d, a, b);
+ tcg_gen_sub_vec(vece, d, d, t);
+}
+
+void gen_gvec_uhsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 g[4] = {
+ { .fni8 = gen_uhsub8_i64,
+ .fniv = gen_uhsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_uhsub16_i64,
+ .fniv = gen_uhsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_uhsub_i32,
+ .fniv = gen_uhsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
+}