DEF_HELPER_FLAGS_6(gvec_vfma64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
DEF_HELPER_FLAGS_6(gvec_vfms64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
DEF_HELPER_FLAGS_6(gvec_vfms64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfsq64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfsq64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(servc, i32, env, i64, i64)
F(0xe78e, VFMS, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC)
/* VECTOR FP PERFORM SIGN OPERATION */
F(0xe7cc, VFPSO, VRR_a, V, 0, 0, 0, 0, vfpso, 0, IF_VEC)
+/* VECTOR FP SQUARE ROOT */
+ F(0xe7ce, VFSQ, VRR_a, V, 0, 0, 0, 0, vfsq, 0, IF_VEC)
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
}
return DISAS_NEXT;
}
+
+static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o)
+{
+ const uint8_t fpf = get_field(s->fields, m3);
+ const uint8_t m4 = get_field(s->fields, m4);
+ gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfsq64;
+
+ if (fpf != FPF_LONG || extract32(m4, 0, 3)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ if (extract32(m4, 3, 1)) {
+ fn = gen_helper_gvec_vfsq64s;
+ }
+ gen_gvec_2_ptr(get_field(s->fields, v1), get_field(s->fields, v2), cpu_env,
+ 0, fn);
+ return DISAS_NEXT;
+}
{
vfma64(v1, v2, v3, v4, env, true, float_muladd_negate_c, GETPC());
}
+
+static uint64_t vfsq64(uint64_t a, float_status *s)
+{
+ return float64_sqrt(a, s);
+}
+
+void HELPER(gvec_vfsq64)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ vop64_2(v1, v2, env, false, false, 0, vfsq64, GETPC());
+}
+
+void HELPER(gvec_vfsq64s)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ vop64_2(v1, v2, env, true, false, 0, vfsq64, GETPC());
+}