#define PSTATE_IL (1U << 20)
#define PSTATE_SS (1U << 21)
#define PSTATE_PAN (1U << 22)
+#define PSTATE_UAO (1U << 23)
#define PSTATE_V (1U << 28)
#define PSTATE_C (1U << 29)
#define PSTATE_Z (1U << 30)
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
}
+static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
+}
+
static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
.readfn = aa64_pan_read, .writefn = aa64_pan_write
};
+static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_UAO;
+}
+
+static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
+}
+
+static const ARMCPRegInfo uao_reginfo = {
+ .name = "UAO", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
+ .type = ARM_CP_NO_RAW, .access = PL1_RW,
+ .readfn = aa64_uao_read, .writefn = aa64_uao_write
+};
+
static CPAccessResult aa64_cacheop_access(CPUARMState *env,
const ARMCPRegInfo *ri,
bool isread)
define_arm_cp_regs(cpu, ats1cp_reginfo);
}
#endif
+ if (cpu_isar_feature(aa64_uao, cpu)) {
+ define_one_arm_cp_reg(cpu, &uao_reginfo);
+ }
if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
define_arm_cp_regs(cpu, vhe_reginfo);
s->base.is_jmp = DISAS_NEXT;
break;
+ case 0x03: /* UAO */
+ if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
+ goto do_unallocated;
+ }
+ if (crm & 1) {
+ set_pstate_bits(PSTATE_UAO);
+ } else {
+ clear_pstate_bits(PSTATE_UAO);
+ }
+ t1 = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_a64(cpu_env, t1);
+ tcg_temp_free_i32(t1);
+ break;
+
case 0x04: /* PAN */
if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
goto do_unallocated;