Begin setting, but not relying upon, env->hflags.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id:
20191023150057.25731-17-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
aarch64_sve_narrow_vq(env, vq);
}
env->vfp.zcr_el[1] = vq - 1;
+ arm_rebuild_hflags(env);
ret = vq * 16;
}
return ret;
hw_breakpoint_update_all(cpu);
hw_watchpoint_update_all(cpu);
+ arm_rebuild_hflags(env);
}
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
} else {
env->regs[15] = new_pc & ~0x3;
}
+ helper_rebuild_hflags_a32(env, new_el);
qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
"AArch32 EL%d PC 0x%" PRIx32 "\n",
cur_el, new_el, env->regs[15]);
}
aarch64_restore_sp(env, new_el);
env->pc = new_pc;
+ helper_rebuild_hflags_a64(env, new_el);
qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
"AArch64 EL%d PC 0x%" PRIx64 "\n",
cur_el, new_el, env->pc);
}
+
/*
* Note that cur_el can never be 0. If new_el is 0, then
* el0_a64 is return_to_aa64, else el0_a64 is ignored.
env->regs[14] = env->regs[15] + offset;
}
env->regs[15] = newpc;
+ arm_rebuild_hflags(env);
}
static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
pstate_write(env, PSTATE_DAIF | new_mode);
env->aarch64 = 1;
aarch64_restore_sp(env, new_el);
+ helper_rebuild_hflags_a64(env, new_el);
env->pc = addr;
if (!kvm_enabled()) {
pmu_op_finish(&cpu->env);
}
+ arm_rebuild_hflags(&cpu->env);
return 0;
}
* state. Do the masking now.
*/
env->regs[15] &= (env->thumb ? ~1 : ~3);
+ arm_rebuild_hflags(env);
qemu_mutex_lock_iothread();
arm_call_el_change_hook(env_archcpu(env));