db->singlestep_enabled = cpu->singlestep_enabled;
/* Instruction counting */
- max_insns = db->tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(db->tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
update db->pc_next and db->is_jmp to indicate what should be
done next -- either exiting this loop or locate the start of
the next instruction. */
- if (db->num_insns == max_insns && (db->tb->cflags & CF_LAST_IO)) {
+ if (db->num_insns == max_insns && (tb_cflags(db->tb) & CF_LAST_IO)) {
/* Accept I/O on the last instruction. */
gen_io_start();
ops->translate_insn(db, cpu);
TCGv_i32 count, imm;
exitreq_label = gen_new_label();
- if (tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(tb) & CF_USE_ICOUNT) {
count = tcg_temp_local_new_i32();
} else {
count = tcg_temp_new_i32();
tcg_gen_ld_i32(count, tcg_ctx.tcg_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
- if (tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(tb) & CF_USE_ICOUNT) {
imm = tcg_temp_new_i32();
/* We emit a movi with a dummy immediate argument. Keep the insn index
* of the movi so that we later (when we know the actual insn count)
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, exitreq_label);
- if (tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(tb) & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, tcg_ctx.tcg_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
}
static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
{
- if (tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(tb) & CF_USE_ICOUNT) {
/* Update the num_insn immediate parameter now that we know
* the actual insn count. */
tcg_set_insn_param(icount_start_insn_idx, 1, num_insns);
static bool use_exit_tb(DisasContext *ctx)
{
- return ((ctx->base.tb->cflags & CF_LAST_IO)
+ return ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
|| ctx->base.singlestep_enabled
|| singlestep);
}
case 0xC000:
/* RPCC */
va = dest_gpr(ctx, ra);
- if (ctx->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
gen_helper_load_pcc(va, cpu_env);
gen_io_end();
/* No direct tb linking with singlestep (either QEMU's or the ARM
* debug architecture kind) or deterministic io
*/
- if (s->base.singlestep_enabled || s->ss_active || (s->base.tb->cflags & CF_LAST_IO)) {
+ if (s->base.singlestep_enabled || s->ss_active ||
+ (tb_cflags(s->base.tb) & CF_LAST_IO)) {
return false;
}
break;
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
}
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
s->base.is_jmp = DISAS_UPDATE;
break;
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
}
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
gen_lookup_tb(s);
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) {
+ if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying code. */
cpu_abort(cpu, "IO on conditional branch instruction");
}
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
dc->clear_x = 1;
npc = dc->pc;
- if (tb->cflags & CF_LAST_IO)
+ if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end();
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
/* Suppress goto_tb in the case of single-steping and IO. */
- if ((ctx->base.tb->cflags & CF_LAST_IO) || ctx->base.singlestep_enabled) {
+ if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
return false;
}
return true;
static inline void gen_ins(DisasContext *s, TCGMemOp ot)
{
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_string_movl_A0_EDI(s);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_EDI);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
}
static inline void gen_outs(DisasContext *s, TCGMemOp ot)
{
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_string_movl_A0_ESI(s);
gen_op_movl_T0_Dshift(ot);
gen_op_add_reg_T0(s->aflag, R_ESI);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
}
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_ins(s, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_outs(s, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_jmp(s, s->pc - s->cs_base);
}
}
tcg_gen_movi_tl(cpu_T0, val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_movi_i32(cpu_tmp2_i32, val);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
svm_is_rep(prefixes));
gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
gen_bpt_io(s, cpu_tmp2_i32, ot);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
case 0x131: /* rdtsc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtsc(cpu_env);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdtscp(cpu_env);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
}
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T0);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(ot, rm, cpu_T0);
- if (s->base.tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
}
record/replay modes and there will always be an
additional step for ecx=0 when icount is enabled.
*/
- dc->repz_opt = !dc->jmp_opt && !(dc->base.tb->cflags & CF_USE_ICOUNT);
+ dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
#if 0
/* check addseg logic */
if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
the flag and abort the translation to give the irqs a
chance to happen */
dc->base.is_jmp = DISAS_TOO_MANY;
- } else if ((dc->base.tb->cflags & CF_USE_ICOUNT)
+ } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
&& ((dc->base.pc_next & TARGET_PAGE_MASK)
!= ((dc->base.pc_next + TARGET_MAX_INSN_SIZE - 1)
& TARGET_PAGE_MASK)
break;
case CSR_IM:
/* mark as an io operation because it could cause an interrupt */
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
dc->is_jmp = DISAS_UPDATE;
break;
case CSR_IP:
/* mark as an io operation because it could cause an interrupt */
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
dc->is_jmp = DISAS_UPDATE;
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
&& (dc->pc < next_page_start)
&& num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
dc->done_mac = 0;
dc->writeback_mask = 0;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
(pc_offset) < (TARGET_PAGE_SIZE - 32) &&
num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO)
+ if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end();
if (unlikely(cs->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
npc = dc->jmp_pc;
}
- if (tb->cflags & CF_LAST_IO)
+ if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end();
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
switch (sel) {
case 0:
/* Mark as an IO operation because we read the time. */
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_mfc0_count(arg, cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
/* Break the TB to be able to take timer interrupts immediately
if (sel != 0)
check_insn(ctx, ISA_MIPS32);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
trace_mips_translate_c0("mtc0", rn, reg, sel);
/* For simplicity assume that all writes can cause interrupts. */
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
/* BS_STOP isn't sufficient, we need to ensure we break out of
* translated code to check for pending interrupts. */
switch (sel) {
case 0:
/* Mark as an IO operation because we read the time. */
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_mfc0_count(arg, cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
/* Break the TB to be able to take timer interrupts immediately
if (sel != 0)
check_insn(ctx, ISA_MIPS64);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
trace_mips_translate_c0("dmtc0", rn, reg, sel);
/* For simplicity assume that all writes can cause interrupts. */
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
/* BS_STOP isn't sufficient, we need to ensure we break out of
* translated code to check for pending interrupts. */
gen_store_gpr(t0, rt);
break;
case 2:
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdhwr_cc(t0, cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
gen_store_gpr(t0, rt);
ctx.default_tcg_memop_mask = (ctx.insn_flags & ISA_MIPS32R6) ?
MO_UNALN : MO_ALIGN;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
goto done_generating;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
if (singlestep)
break;
}
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) {
ctx.singlestep_enabled = 0;
ctx.bstate = BS_NONE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
max_insns = 1;
} else {
int page_insns = (TARGET_PAGE_SIZE - (tb->pc & TARGET_PAGE_MASK)) / 4;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
!tcg_op_buf_full() &&
num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
disas_openrisc_insn(dc, cpu);
&& (dc->pc < next_page_start)
&& num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
msr_se = 1;
#endif
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
LOG_DISAS("----------------\n");
LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
ctx.nip, ctx.mem_idx, (int)msr_ir);
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO))
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO))
gen_io_start();
if (unlikely(need_byteswap(&ctx))) {
ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip));
exit(1);
}
}
- if (tb->cflags & CF_LAST_IO)
+ if (tb_cflags(tb) & CF_LAST_IO)
gen_io_end();
if (ctx.exception == POWERPC_EXCP_NONE) {
gen_goto_tb(&ctx, 0, ctx.nip);
#if !defined(CONFIG_USER_ONLY)
static void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
static void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
/* Time base */
static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
#if !defined(CONFIG_USER_ONLY)
static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
/* HDECR */
static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
{
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_stop_exception(ctx);
}
static bool use_exit_tb(DisasContext *s)
{
return (s->singlestep_enabled ||
- (s->tb->cflags & CF_LAST_IO) ||
+ (tb_cflags(s->tb) & CF_LAST_IO) ||
(s->tb->flags & FLAG_MASK_PER));
}
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
}
} while (status == NO_EXIT);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
(ctx.tbflags & (1 << SR_RB))) * 0x10;
ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
break;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
decode_opc(&ctx);
ctx.pc += 2;
}
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
#endif
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
goto exit_gen_loop;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
num_insns < max_insns);
exit_gen_loop:
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}
if (!dc->is_br) {
uint64_t pc_start = tb->pc;
uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
int num_insns = 0;
- int max_insns = tb->cflags & CF_COUNT_MASK;
+ int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
dc->pc = pc_start;
dc->mmuidx = 0;
int num_insns, max_insns;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
cpu_F1d = tcg_temp_new_i64();
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
+ max_insns = tb_cflags(tb) & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
goto done_generating;
}
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
dc->pc < next_page_start &&
num_insns < max_insns);
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
if (dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying
code. */
static bool gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
{
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_update_ccount(cpu_env);
tcg_gen_mov_i32(d, cpu_SR[sr]);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
return true;
}
static void gen_check_interrupts(DisasContext *dc)
{
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_check_interrupts(cpu_env);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
}
static bool gen_wsr_ccount(DisasContext *dc, uint32_t sr, TCGv_i32 v)
{
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_wsr_ccount(cpu_env, v);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jumpi_check_loop_end(dc, 0);
return true;
tcg_gen_mov_i32(cpu_SR[sr], v);
tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_update_ccompare(cpu_env, tmp);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
gen_jumpi_check_loop_end(dc, 0);
ret = true;
TCGv_i32 pc = tcg_const_i32(dc->next_pc);
TCGv_i32 intlevel = tcg_const_i32(imm4);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_waiti(cpu_env, pc, intlevel);
- if (dc->tb->cflags & CF_USE_ICOUNT) {
+ if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
gen_io_end();
}
tcg_temp_free(pc);
CPUXtensaState *env = cs->env_ptr;
DisasContext dc;
int insn_count = 0;
- int max_insns = tb->cflags & CF_COUNT_MASK;
+ int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
uint32_t pc_start = tb->pc;
uint32_t next_page_start =
(pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
gen_tb_start(tb);
- if ((tb->cflags & CF_USE_ICOUNT) &&
+ if ((tb_cflags(tb) & CF_USE_ICOUNT) &&
(tb->flags & XTENSA_TBFLAG_YIELD)) {
tcg_gen_insn_start(dc.pc);
++insn_count;
break;
}
- if (insn_count == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (insn_count == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
gen_io_start();
}
tcg_temp_free(dc.next_icount);
}
- if (tb->cflags & CF_LAST_IO) {
+ if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
}