if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
- return cpu->env.pmsav8.mair0;
+ return cpu->env.pmsav8.mair0[attrs.secure];
case 0xdc4: /* MPU_MAIR1 */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
- return cpu->env.pmsav8.mair1;
+ return cpu->env.pmsav8.mair1[attrs.secure];
default:
bad_offset:
qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
}
if (cpu->pmsav7_dregion) {
/* Register is RES0 if no MPU regions are implemented */
- cpu->env.pmsav8.mair0 = value;
+ cpu->env.pmsav8.mair0[attrs.secure] = value;
}
/* We don't need to do anything else because memory attributes
* only affect cacheability, and we don't implement caching.
}
if (cpu->pmsav7_dregion) {
/* Register is RES0 if no MPU regions are implemented */
- cpu->env.pmsav8.mair1 = value;
+ cpu->env.pmsav8.mair1[attrs.secure] = value;
}
/* We don't need to do anything else because memory attributes
* only affect cacheability, and we don't implement caching.
}
}
env->pmsav7.rnr = 0;
- env->pmsav8.mair0 = 0;
- env->pmsav8.mair1 = 0;
+ env->pmsav8.mair0[M_REG_NS] = 0;
+ env->pmsav8.mair0[M_REG_S] = 0;
+ env->pmsav8.mair1[M_REG_NS] = 0;
+ env->pmsav8.mair1[M_REG_S] = 0;
}
set_flush_to_zero(1, &env->vfp.standard_fp_status);
*/
uint32_t *rbar;
uint32_t *rlar;
- uint32_t mair0;
- uint32_t mair1;
+ uint32_t mair0[2];
+ uint32_t mair1[2];
} pmsav8;
void *nvic;
vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(env.pmsav8.rlar, ARMCPU, pmsav7_dregion, 0,
vmstate_info_uint32, uint32_t),
- VMSTATE_UINT32(env.pmsav8.mair0, ARMCPU),
- VMSTATE_UINT32(env.pmsav8.mair1, ARMCPU),
+ VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
+ VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
VMSTATE_END_OF_LIST()
}
};
VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
VMSTATE_END_OF_LIST()
}
};