ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot),
ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt),
ISA_EXT_DATA_ENTRY(xtheadcmo, true, PRIV_VERSION_1_11_0, ext_xtheadcmo),
+ ISA_EXT_DATA_ENTRY(xtheadsync, true, PRIV_VERSION_1_11_0, ext_xtheadsync),
ISA_EXT_DATA_ENTRY(xventanacondops, true, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
};
/* Vendor-specific custom extensions */
DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false),
+ DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false),
DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
/* These are experimental so mark with 'x-' */
/* Vendor-specific custom extensions */
bool ext_xtheadcmo;
+ bool ext_xtheadsync;
bool ext_XVentanaCondOps;
uint8_t pmu_num;
DEF_HELPER_1(mret, tl, env)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_1(tlb_flush, void, env)
+DEF_HELPER_1(tlb_flush_all, void, env)
/* Native Debug */
DEF_HELPER_1(itrigger_match, void, env)
#endif
} \
} while (0)
+#define REQUIRE_XTHEADSYNC(ctx) do { \
+ if (!ctx->cfg_ptr->ext_xtheadsync) { \
+ return false; \
+ } \
+} while (0)
+
/* XTheadCmo */
static inline int priv_level(DisasContext *ctx)
NOP_PRIVCHECK(th_l2cache_call, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_l2cache_ciall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_l2cache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
+
+/* XTheadSync */
+
+static bool trans_th_sfence_vmas(DisasContext *ctx, arg_th_sfence_vmas *a)
+{
+ (void) a;
+ REQUIRE_XTHEADSYNC(ctx);
+
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_PRIV_MS(ctx);
+ gen_helper_tlb_flush_all(cpu_env);
+ return true;
+#else
+ return false;
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static void gen_th_sync_local(DisasContext *ctx)
+{
+ /*
+ * Emulate out-of-order barriers with pipeline flush
+ * by exiting the translation block.
+ */
+ gen_set_pc_imm(ctx, ctx->pc_succ_insn);
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+#endif
+
+static bool trans_th_sync(DisasContext *ctx, arg_th_sync *a)
+{
+ (void) a;
+ REQUIRE_XTHEADSYNC(ctx);
+
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_PRIV_MSU(ctx);
+
+ /*
+ * th.sync is an out-of-order barrier.
+ */
+ gen_th_sync_local(ctx);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a)
+{
+ (void) a;
+ REQUIRE_XTHEADSYNC(ctx);
+
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_PRIV_MSU(ctx);
+
+ /*
+ * th.sync.i is th.sync plus pipeline flush.
+ */
+ gen_th_sync_local(ctx);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+static bool trans_th_sync_is(DisasContext *ctx, arg_th_sync_is *a)
+{
+ /* This instruction has the same behaviour like th.sync.i. */
+ return trans_th_sync_i(ctx, a);
+}
+
+static bool trans_th_sync_s(DisasContext *ctx, arg_th_sync_s *a)
+{
+ /* This instruction has the same behaviour like th.sync. */
+ return trans_th_sync(ctx, a);
+}
}
}
+void helper_tlb_flush_all(CPURISCVState *env)
+{
+ CPUState *cs = env_cpu(env);
+ tlb_flush_all_cpus_synced(cs);
+}
+
void helper_hyp_tlb_flush(CPURISCVState *env)
{
CPUState *cs = env_cpu(env);
static bool has_xthead_p(DisasContext *ctx __attribute__((__unused__)))
{
- return ctx->cfg_ptr->ext_xtheadcmo;
+ return ctx->cfg_ptr->ext_xtheadcmo || ctx->cfg_ptr->ext_xtheadsync;
}
#define MATERIALISE_EXT_PREDICATE(ext) \
# Fields:
%rs1 15:5
+%rs2 20:5
# Formats
@sfence_vm ....... ..... ..... ... ..... ....... %rs1
+@rs2_s ....... ..... ..... ... ..... ....... %rs2 %rs1
# XTheadCmo
th_dcache_call 0000000 00001 00000 000 00000 0001011
th_l2cache_call 0000000 10101 00000 000 00000 0001011
th_l2cache_ciall 0000000 10111 00000 000 00000 0001011
th_l2cache_iall 0000000 10110 00000 000 00000 0001011
+
+# XTheadSync
+th_sfence_vmas 0000010 ..... ..... 000 00000 0001011 @rs2_s
+th_sync 0000000 11000 00000 000 00000 0001011
+th_sync_i 0000000 11010 00000 000 00000 0001011
+th_sync_is 0000000 11011 00000 000 00000 0001011
+th_sync_s 0000000 11001 00000 000 00000 0001011