static const VMStateDescription vmstate_env = {
.name = "env",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.fields = (VMStateField[]) {
- VMSTATE_UINT32_ARRAY(gpr, CPUOpenRISCState, 32),
+ VMSTATE_UINTTL_ARRAY(gpr, CPUOpenRISCState, 32),
+ VMSTATE_UINTTL(pc, CPUOpenRISCState),
+ VMSTATE_UINTTL(npc, CPUOpenRISCState),
+ VMSTATE_UINTTL(ppc, CPUOpenRISCState),
+ VMSTATE_UINTTL(jmp_pc, CPUOpenRISCState),
+ VMSTATE_UINTTL(lock_addr, CPUOpenRISCState),
+ VMSTATE_UINTTL(lock_value, CPUOpenRISCState),
+ VMSTATE_UINTTL(epcr, CPUOpenRISCState),
+ VMSTATE_UINTTL(eear, CPUOpenRISCState),
VMSTATE_UINT32(sr, CPUOpenRISCState),
- VMSTATE_UINT32(epcr, CPUOpenRISCState),
- VMSTATE_UINT32(eear, CPUOpenRISCState),
+ VMSTATE_UINT32(vr, CPUOpenRISCState),
+ VMSTATE_UINT32(upr, CPUOpenRISCState),
+ VMSTATE_UINT32(cpucfgr, CPUOpenRISCState),
+ VMSTATE_UINT32(dmmucfgr, CPUOpenRISCState),
+ VMSTATE_UINT32(immucfgr, CPUOpenRISCState),
VMSTATE_UINT32(esr, CPUOpenRISCState),
VMSTATE_UINT32(fpcsr, CPUOpenRISCState),
- VMSTATE_UINT32(pc, CPUOpenRISCState),
- VMSTATE_UINT32(npc, CPUOpenRISCState),
- VMSTATE_UINT32(ppc, CPUOpenRISCState),
VMSTATE_END_OF_LIST()
}
};
static TCGv cpu_npc;
static TCGv cpu_ppc;
static TCGv_i32 env_btaken; /* bf/bnf , F flag taken */
+static TCGv cpu_lock_addr;
+static TCGv cpu_lock_value;
static TCGv_i32 fpcsr;
static TCGv machi, maclo;
static TCGv fpmaddhi, fpmaddlo;
env_btaken = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUOpenRISCState, btaken),
"btaken");
+ cpu_lock_addr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, lock_addr),
+ "lock_addr");
+ cpu_lock_value = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, lock_value),
+ "lock_value");
fpcsr = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUOpenRISCState, fpcsr),
"fpcsr");
}
+static void gen_lwa(DisasContext *dc, TCGv rd, TCGv ra, int32_t ofs)
+{
+ TCGv ea = tcg_temp_new();
+
+ tcg_gen_addi_tl(ea, ra, ofs);
+ tcg_gen_qemu_ld_tl(rd, ea, dc->mem_idx, MO_TEUL);
+ tcg_gen_mov_tl(cpu_lock_addr, ea);
+ tcg_gen_mov_tl(cpu_lock_value, rd);
+ tcg_temp_free(ea);
+}
+
+static void gen_swa(DisasContext *dc, TCGv rb, TCGv ra, int32_t ofs)
+{
+ TCGv ea, val;
+ TCGLabel *lab_fail, *lab_done;
+
+ ea = tcg_temp_new();
+ tcg_gen_addi_tl(ea, ra, ofs);
+
+ lab_fail = gen_new_label();
+ lab_done = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
+ tcg_temp_free(ea);
+
+ val = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
+ rb, dc->mem_idx, MO_TEUL);
+ tcg_gen_setcond_tl(TCG_COND_EQ, env_btaken, val, cpu_lock_value);
+ tcg_temp_free(val);
+
+ tcg_gen_br(lab_done);
+
+ gen_set_label(lab_fail);
+ tcg_gen_movi_tl(env_btaken, 0);
+
+ gen_set_label(lab_done);
+ tcg_gen_movi_tl(cpu_lock_addr, -1);
+ wb_SR_F();
+}
+
static void dec_calc(DisasContext *dc, uint32_t insn)
{
uint32_t op0, op1, op2;
}
break;
+ case 0x1b: /* l.lwa */
+ LOG_DIS("l.lwa r%d, r%d, %d\n", rd, ra, I16);
+ gen_lwa(dc, cpu_R[rd], cpu_R[ra], I16);
+ break;
+
case 0x1c: /* l.cust1 */
LOG_DIS("l.cust1\n");
break;
}
break;
+ case 0x33: /* l.swa */
+ LOG_DIS("l.swa %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ gen_swa(dc, cpu_R[rb], cpu_R[ra], sign_extend(tmp, 16));
+ break;
+
/* not used yet, open it when we need or64. */
/*#ifdef TARGET_OPENRISC64
case 0x34: l.sd