DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfslide1up_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfslide1down_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfslide1down_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfslide1down_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+
DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4)
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8)
-#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H) \
-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- uint32_t vm = vext_vm(desc); \
- uint32_t vl = env->vl; \
- uint32_t i; \
- \
- for (i = 0; i < vl; i++) { \
- if (!vm && !vext_elem_mask(v0, i)) { \
- continue; \
- } \
- if (i == 0) { \
- *((ETYPE *)vd + H(i)) = s1; \
- } else { \
- *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
- } \
- } \
+#define GEN_VEXT_VSLIE1UP(ESZ, H) \
+static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ typedef uint##ESZ##_t ETYPE; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ continue; \
+ } \
+ if (i == 0) { \
+ *((ETYPE *)vd + H(i)) = s1; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
+ } \
+ } \
+}
+
+GEN_VEXT_VSLIE1UP(8, H1)
+GEN_VEXT_VSLIE1UP(16, H2)
+GEN_VEXT_VSLIE1UP(32, H4)
+GEN_VEXT_VSLIE1UP(64, H8)
+
+#define GEN_VEXT_VSLIDE1UP_VX(NAME, ESZ) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \
}
/* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1)
-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2)
-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4)
-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8)
-
-#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H) \
-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- uint32_t vm = vext_vm(desc); \
- uint32_t vl = env->vl; \
- uint32_t i; \
- \
- for (i = 0; i < vl; i++) { \
- if (!vm && !vext_elem_mask(v0, i)) { \
- continue; \
- } \
- if (i == vl - 1) { \
- *((ETYPE *)vd + H(i)) = s1; \
- } else { \
- *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
- } \
- } \
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, 8)
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, 16)
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, 32)
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64)
+
+#define GEN_VEXT_VSLIDE1DOWN(ESZ, H) \
+static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ typedef uint##ESZ##_t ETYPE; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ continue; \
+ } \
+ if (i == vl - 1) { \
+ *((ETYPE *)vd + H(i)) = s1; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
+ } \
+ } \
+}
+
+GEN_VEXT_VSLIDE1DOWN(8, H1)
+GEN_VEXT_VSLIDE1DOWN(16, H2)
+GEN_VEXT_VSLIDE1DOWN(32, H4)
+GEN_VEXT_VSLIDE1DOWN(64, H8)
+
+#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ESZ) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \
}
/* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1)
-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2)
-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4)
-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, 8)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, 16)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, 32)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, 64)
+
+/* Vector Floating-Point Slide Instructions */
+#define GEN_VEXT_VFSLIDE1UP_VF(NAME, ESZ) \
+void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \
+}
+
+/* vfslide1up.vf vd, vs2, rs1, vm # vd[0]=f[rs1], vd[i+1] = vs2[i] */
+GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_h, 16)
+GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_w, 32)
+GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_d, 64)
+
+#define GEN_VEXT_VFSLIDE1DOWN_VF(NAME, ESZ) \
+void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \
+}
+
+/* vfslide1down.vf vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=f[rs1] */
+GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_h, 16)
+GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_w, 32)
+GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64)
/* Vector Register Gather Instruction */
#define GEN_VEXT_VRGATHER_VV(NAME, TS1, TS2, HS1, HS2) \