ISA_EXT_DATA_ENTRY(xtheadbs, true, PRIV_VERSION_1_11_0, ext_xtheadbs),
ISA_EXT_DATA_ENTRY(xtheadcmo, true, PRIV_VERSION_1_11_0, ext_xtheadcmo),
ISA_EXT_DATA_ENTRY(xtheadcondmov, true, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
+ ISA_EXT_DATA_ENTRY(xtheadfmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
ISA_EXT_DATA_ENTRY(xtheadmac, true, PRIV_VERSION_1_11_0, ext_xtheadmac),
ISA_EXT_DATA_ENTRY(xtheadmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
ISA_EXT_DATA_ENTRY(xtheadmempair, true, PRIV_VERSION_1_11_0, ext_xtheadmempair),
DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false),
DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false),
DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false),
+ DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false),
DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false),
DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false),
DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false),
} \
} while (0)
+#define REQUIRE_XTHEADFMEMIDX(ctx) do { \
+ if (!ctx->cfg_ptr->ext_xtheadfmemidx) { \
+ return false; \
+ } \
+} while (0)
+
#define REQUIRE_XTHEADMAC(ctx) do { \
if (!ctx->cfg_ptr->ext_xtheadmac) { \
return false; \
return gen_th_condmove(ctx, a, TCG_COND_NE);
}
+/* XTheadFMem */
+
+/*
+ * Load 64-bit float from indexed address.
+ * If !zext_offs, then address is rs1 + (rs2 << imm2).
+ * If zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
+ */
+static bool gen_fload_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
+ bool zext_offs)
+{
+ TCGv_i64 rd = cpu_fpr[a->rd];
+ TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
+
+ tcg_gen_qemu_ld_i64(rd, addr, ctx->mem_idx, memop);
+ if ((memop & MO_SIZE) == MO_32) {
+ gen_nanbox_s(rd, rd);
+ }
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+/*
+ * Store 64-bit float to indexed address.
+ * If !zext_offs, then address is rs1 + (rs2 << imm2).
+ * If zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
+ */
+static bool gen_fstore_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
+ bool zext_offs)
+{
+ TCGv_i64 rd = cpu_fpr[a->rd];
+ TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
+
+ tcg_gen_qemu_st_i64(rd, addr, ctx->mem_idx, memop);
+
+ return true;
+}
+
+static bool trans_th_flrd(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_XTHEADFMEMIDX(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ return gen_fload_idx(ctx, a, MO_TEUQ, false);
+}
+
+static bool trans_th_flrw(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_XTHEADFMEMIDX(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+ return gen_fload_idx(ctx, a, MO_TEUL, false);
+}
+
+static bool trans_th_flurd(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_XTHEADFMEMIDX(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ return gen_fload_idx(ctx, a, MO_TEUQ, true);
+}
+
+static bool trans_th_flurw(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_XTHEADFMEMIDX(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+ return gen_fload_idx(ctx, a, MO_TEUL, true);
+}
+
+static bool trans_th_fsrd(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_XTHEADFMEMIDX(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ return gen_fstore_idx(ctx, a, MO_TEUQ, false);
+}
+
+static bool trans_th_fsrw(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_XTHEADFMEMIDX(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+ return gen_fstore_idx(ctx, a, MO_TEUL, false);
+}
+
+static bool trans_th_fsurd(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_XTHEADFMEMIDX(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ return gen_fstore_idx(ctx, a, MO_TEUQ, true);
+}
+
+static bool trans_th_fsurw(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_XTHEADFMEMIDX(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+ return gen_fstore_idx(ctx, a, MO_TEUL, true);
+}
+
/* XTheadMac */
static bool gen_th_mac(DisasContext *ctx, arg_r *a,