We should call decode_save_opc() for all relevant instructions which
can potentially generate a virtual instruction fault or a guest page
fault because generating transformed instruction upon guest page fault
expects opcode to be available. Without this, hypervisor will see
transformed instruction as zero in htinst CSR for guest MMIO emulation
which makes MMIO emulation in hypervisor slow and also breaks nested
virtualization.
Fixes: a9814e3e08d2 ("target/riscv: Minimize the calls to decode_save_opc")
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Message-Id: <
20230120125950.
2246378-5-apatel@ventanamicro.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
- TCGv src1 = get_address(ctx, a->rs1, 0);
+ TCGv src1;
+ decode_save_opc(ctx);
+ src1 = get_address(ctx, a->rs1, 0);
if (a->rl) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
+ decode_save_opc(ctx);
src1 = get_address(ctx, a->rs1, 0);
tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
MemOp mop)
{
TCGv dest = dest_gpr(ctx, a->rd);
- TCGv src1 = get_address(ctx, a->rs1, 0);
- TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+ TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+ decode_save_opc(ctx);
+ src1 = get_address(ctx, a->rs1, 0);
func(dest, src1, src2, ctx->mem_idx, mop);
gen_set_gpr(ctx, a->rd, dest);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
+ decode_save_opc(ctx);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
+ decode_save_opc(ctx);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ);
return true;
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
+ decode_save_opc(ctx);
addr = get_address(ctx, a->rs1, a->imm);
dest = cpu_fpr[a->rd];
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
+ decode_save_opc(ctx);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL);
return true;
#ifdef CONFIG_USER_ONLY
return false;
#else
+ decode_save_opc(ctx);
if (check_access(ctx)) {
TCGv dest = dest_gpr(ctx, a->rd);
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
#ifdef CONFIG_USER_ONLY
return false;
#else
+ decode_save_opc(ctx);
if (check_access(ctx)) {
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
static bool do_hlvx(DisasContext *ctx, arg_r2 *a,
void (*func)(TCGv, TCGv_env, TCGv))
{
+ decode_save_opc(ctx);
if (check_access(ctx)) {
TCGv dest = dest_gpr(ctx, a->rd);
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
{
+ decode_save_opc(ctx);
if (get_xl(ctx) == MXL_RV128) {
return gen_load_i128(ctx, a, memop);
} else {
static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
{
+ decode_save_opc(ctx);
if (get_xl(ctx) == MXL_RV128) {
return gen_store_i128(ctx, a, memop);
} else {
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
+ decode_save_opc(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
TCGv temp = temp_new(ctx);
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
+ decode_save_opc(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
TCGv temp = tcg_temp_new();
/* Do the same as sfence.vma currently */
REQUIRE_EXT(ctx, RVS);
#ifndef CONFIG_USER_ONLY
+ decode_save_opc(ctx);
gen_helper_tlb_flush(cpu_env);
return true;
#endif
/* Do the same as hfence.vvma currently */
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
+ decode_save_opc(ctx);
gen_helper_hyp_tlb_flush(cpu_env);
return true;
#endif
/* Do the same as hfence.gvma currently */
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
+ decode_save_opc(ctx);
gen_helper_hyp_gvma_tlb_flush(cpu_env);
return true;
#endif