INSN_LSX(vadda_h, vvv)
INSN_LSX(vadda_w, vvv)
INSN_LSX(vadda_d, vvv)
+
+INSN_LSX(vmax_b, vvv)
+INSN_LSX(vmax_h, vvv)
+INSN_LSX(vmax_w, vvv)
+INSN_LSX(vmax_d, vvv)
+INSN_LSX(vmin_b, vvv)
+INSN_LSX(vmin_h, vvv)
+INSN_LSX(vmin_w, vvv)
+INSN_LSX(vmin_d, vvv)
+INSN_LSX(vmax_bu, vvv)
+INSN_LSX(vmax_hu, vvv)
+INSN_LSX(vmax_wu, vvv)
+INSN_LSX(vmax_du, vvv)
+INSN_LSX(vmin_bu, vvv)
+INSN_LSX(vmin_hu, vvv)
+INSN_LSX(vmin_wu, vvv)
+INSN_LSX(vmin_du, vvv)
+INSN_LSX(vmaxi_b, vv_i)
+INSN_LSX(vmaxi_h, vv_i)
+INSN_LSX(vmaxi_w, vv_i)
+INSN_LSX(vmaxi_d, vv_i)
+INSN_LSX(vmini_b, vv_i)
+INSN_LSX(vmini_h, vv_i)
+INSN_LSX(vmini_w, vv_i)
+INSN_LSX(vmini_d, vv_i)
+INSN_LSX(vmaxi_bu, vv_i)
+INSN_LSX(vmaxi_hu, vv_i)
+INSN_LSX(vmaxi_wu, vv_i)
+INSN_LSX(vmaxi_du, vv_i)
+INSN_LSX(vmini_bu, vv_i)
+INSN_LSX(vmini_hu, vv_i)
+INSN_LSX(vmini_wu, vv_i)
+INSN_LSX(vmini_du, vv_i)
DEF_HELPER_FLAGS_4(vadda_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(vadda_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(vadda_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vmaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
TRANS(vadda_h, gvec_vvv, MO_16, do_vadda)
TRANS(vadda_w, gvec_vvv, MO_32, do_vadda)
TRANS(vadda_d, gvec_vvv, MO_64, do_vadda)
+
+TRANS(vmax_b, gvec_vvv, MO_8, tcg_gen_gvec_smax)
+TRANS(vmax_h, gvec_vvv, MO_16, tcg_gen_gvec_smax)
+TRANS(vmax_w, gvec_vvv, MO_32, tcg_gen_gvec_smax)
+TRANS(vmax_d, gvec_vvv, MO_64, tcg_gen_gvec_smax)
+TRANS(vmax_bu, gvec_vvv, MO_8, tcg_gen_gvec_umax)
+TRANS(vmax_hu, gvec_vvv, MO_16, tcg_gen_gvec_umax)
+TRANS(vmax_wu, gvec_vvv, MO_32, tcg_gen_gvec_umax)
+TRANS(vmax_du, gvec_vvv, MO_64, tcg_gen_gvec_umax)
+
+TRANS(vmin_b, gvec_vvv, MO_8, tcg_gen_gvec_smin)
+TRANS(vmin_h, gvec_vvv, MO_16, tcg_gen_gvec_smin)
+TRANS(vmin_w, gvec_vvv, MO_32, tcg_gen_gvec_smin)
+TRANS(vmin_d, gvec_vvv, MO_64, tcg_gen_gvec_smin)
+TRANS(vmin_bu, gvec_vvv, MO_8, tcg_gen_gvec_umin)
+TRANS(vmin_hu, gvec_vvv, MO_16, tcg_gen_gvec_umin)
+TRANS(vmin_wu, gvec_vvv, MO_32, tcg_gen_gvec_umin)
+TRANS(vmin_du, gvec_vvv, MO_64, tcg_gen_gvec_umin)
+
+static void gen_vmini_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_smin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vmini_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_umin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vmaxi_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_smax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vmaxi_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_umax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void do_vmini_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+static void do_vmini_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vmini_b, gvec_vv_i, MO_8, do_vmini_s)
+TRANS(vmini_h, gvec_vv_i, MO_16, do_vmini_s)
+TRANS(vmini_w, gvec_vv_i, MO_32, do_vmini_s)
+TRANS(vmini_d, gvec_vv_i, MO_64, do_vmini_s)
+TRANS(vmini_bu, gvec_vv_i, MO_8, do_vmini_u)
+TRANS(vmini_hu, gvec_vv_i, MO_16, do_vmini_u)
+TRANS(vmini_wu, gvec_vv_i, MO_32, do_vmini_u)
+TRANS(vmini_du, gvec_vv_i, MO_64, do_vmini_u)
+
+static void do_vmaxi_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smax_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+static void do_vmaxi_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umax_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vmaxi_b, gvec_vv_i, MO_8, do_vmaxi_s)
+TRANS(vmaxi_h, gvec_vv_i, MO_16, do_vmaxi_s)
+TRANS(vmaxi_w, gvec_vv_i, MO_32, do_vmaxi_s)
+TRANS(vmaxi_d, gvec_vv_i, MO_64, do_vmaxi_s)
+TRANS(vmaxi_bu, gvec_vv_i, MO_8, do_vmaxi_u)
+TRANS(vmaxi_hu, gvec_vv_i, MO_16, do_vmaxi_u)
+TRANS(vmaxi_wu, gvec_vv_i, MO_32, do_vmaxi_u)
+TRANS(vmaxi_du, gvec_vv_i, MO_64, do_vmaxi_u)
@vv .... ........ ..... ..... vj:5 vd:5 &vv
@vvv .... ........ ..... vk:5 vj:5 vd:5 &vvv
@vv_ui5 .... ........ ..... imm:5 vj:5 vd:5 &vv_i
+@vv_i5 .... ........ ..... imm:s5 vj:5 vd:5 &vv_i
vadd_b 0111 00000000 10100 ..... ..... ..... @vvv
vadd_h 0111 00000000 10101 ..... ..... ..... @vvv
vadda_h 0111 00000101 11001 ..... ..... ..... @vvv
vadda_w 0111 00000101 11010 ..... ..... ..... @vvv
vadda_d 0111 00000101 11011 ..... ..... ..... @vvv
+
+vmax_b 0111 00000111 00000 ..... ..... ..... @vvv
+vmax_h 0111 00000111 00001 ..... ..... ..... @vvv
+vmax_w 0111 00000111 00010 ..... ..... ..... @vvv
+vmax_d 0111 00000111 00011 ..... ..... ..... @vvv
+vmaxi_b 0111 00101001 00000 ..... ..... ..... @vv_i5
+vmaxi_h 0111 00101001 00001 ..... ..... ..... @vv_i5
+vmaxi_w 0111 00101001 00010 ..... ..... ..... @vv_i5
+vmaxi_d 0111 00101001 00011 ..... ..... ..... @vv_i5
+vmax_bu 0111 00000111 01000 ..... ..... ..... @vvv
+vmax_hu 0111 00000111 01001 ..... ..... ..... @vvv
+vmax_wu 0111 00000111 01010 ..... ..... ..... @vvv
+vmax_du 0111 00000111 01011 ..... ..... ..... @vvv
+vmaxi_bu 0111 00101001 01000 ..... ..... ..... @vv_ui5
+vmaxi_hu 0111 00101001 01001 ..... ..... ..... @vv_ui5
+vmaxi_wu 0111 00101001 01010 ..... ..... ..... @vv_ui5
+vmaxi_du 0111 00101001 01011 ..... ..... ..... @vv_ui5
+
+vmin_b 0111 00000111 00100 ..... ..... ..... @vvv
+vmin_h 0111 00000111 00101 ..... ..... ..... @vvv
+vmin_w 0111 00000111 00110 ..... ..... ..... @vvv
+vmin_d 0111 00000111 00111 ..... ..... ..... @vvv
+vmini_b 0111 00101001 00100 ..... ..... ..... @vv_i5
+vmini_h 0111 00101001 00101 ..... ..... ..... @vv_i5
+vmini_w 0111 00101001 00110 ..... ..... ..... @vv_i5
+vmini_d 0111 00101001 00111 ..... ..... ..... @vv_i5
+vmin_bu 0111 00000111 01100 ..... ..... ..... @vvv
+vmin_hu 0111 00000111 01101 ..... ..... ..... @vvv
+vmin_wu 0111 00000111 01110 ..... ..... ..... @vvv
+vmin_du 0111 00000111 01111 ..... ..... ..... @vvv
+vmini_bu 0111 00101001 01100 ..... ..... ..... @vv_ui5
+vmini_hu 0111 00101001 01101 ..... ..... ..... @vv_ui5
+vmini_wu 0111 00101001 01110 ..... ..... ..... @vv_ui5
+vmini_du 0111 00101001 01111 ..... ..... ..... @vv_ui5
DO_VADDA(vadda_h, 16, H, DO_VABS)
DO_VADDA(vadda_w, 32, W, DO_VABS)
DO_VADDA(vadda_d, 64, D, DO_VABS)
+
+#define DO_MIN(a, b) (a < b ? a : b)
+#define DO_MAX(a, b) (a > b ? a : b)
+
+#define VMINMAXI(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ typedef __typeof(Vd->E(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
+ } \
+}
+
+VMINMAXI(vmini_b, 8, B, DO_MIN)
+VMINMAXI(vmini_h, 16, H, DO_MIN)
+VMINMAXI(vmini_w, 32, W, DO_MIN)
+VMINMAXI(vmini_d, 64, D, DO_MIN)
+VMINMAXI(vmaxi_b, 8, B, DO_MAX)
+VMINMAXI(vmaxi_h, 16, H, DO_MAX)
+VMINMAXI(vmaxi_w, 32, W, DO_MAX)
+VMINMAXI(vmaxi_d, 64, D, DO_MAX)
+VMINMAXI(vmini_bu, 8, UB, DO_MIN)
+VMINMAXI(vmini_hu, 16, UH, DO_MIN)
+VMINMAXI(vmini_wu, 32, UW, DO_MIN)
+VMINMAXI(vmini_du, 64, UD, DO_MIN)
+VMINMAXI(vmaxi_bu, 8, UB, DO_MAX)
+VMINMAXI(vmaxi_hu, 16, UH, DO_MAX)
+VMINMAXI(vmaxi_wu, 32, UW, DO_MAX)
+VMINMAXI(vmaxi_du, 64, UD, DO_MAX)