* for the accesses done as part of a stage 1 page table walk, rather than
* having to walk the stage 2 page table over and over.)
*
+ * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
+ * are not quite the same -- different CPU types (most notably M profile
+ * vs A/R profile) would like to use MMU indexes with different semantics,
+ * but since we don't ever need to use all of those in a single CPU we
+ * can avoid setting NB_MMU_MODES to more than 8. The lower bits of
+ * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
+ * the same for any particular CPU.
+ * Variables of type ARMMUIdx are always full values, and the core
+ * index values are in variables of type 'int'.
+ *
* Our enumeration includes at the end some entries which are not "true"
* mmu_idx values in that they don't have corresponding TLBs and are only
* valid for doing slow path page table walks.
* of the AT/ATS operations.
* The values used are carefully arranged to make mmu_idx => EL lookup easy.
*/
+#define ARM_MMU_IDX_A 0x10 /* A profile (and M profile, for the moment) */
+#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
+
+#define ARM_MMU_IDX_TYPE_MASK (~0x7)
+#define ARM_MMU_IDX_COREIDX_MASK 0x7
+
typedef enum ARMMMUIdx {
- ARMMMUIdx_S12NSE0 = 0,
- ARMMMUIdx_S12NSE1 = 1,
- ARMMMUIdx_S1E2 = 2,
- ARMMMUIdx_S1E3 = 3,
- ARMMMUIdx_S1SE0 = 4,
- ARMMMUIdx_S1SE1 = 5,
- ARMMMUIdx_S2NS = 6,
+ ARMMMUIdx_S12NSE0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S12NSE1 = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A,
/* Indexes below here don't have TLBs and are used only for AT system
* instructions or for the first stage of an S12 page table walk.
*/
- ARMMMUIdx_S1NSE0 = 7,
- ARMMMUIdx_S1NSE1 = 8,
+ ARMMMUIdx_S1NSE0 = 0 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_S1NSE1 = 1 | ARM_MMU_IDX_NOTLB,
} ARMMMUIdx;
+/* Bit macros for the core-mmu-index values for each index,
+ * for use when calling tlb_flush_by_mmuidx() and friends.
+ */
+typedef enum ARMMMUIdxBit {
+ ARMMMUIdxBit_S12NSE0 = 1 << 0,
+ ARMMMUIdxBit_S12NSE1 = 1 << 1,
+ ARMMMUIdxBit_S1E2 = 1 << 2,
+ ARMMMUIdxBit_S1E3 = 1 << 3,
+ ARMMMUIdxBit_S1SE0 = 1 << 4,
+ ARMMMUIdxBit_S1SE1 = 1 << 5,
+ ARMMMUIdxBit_S2NS = 1 << 6,
+} ARMMMUIdxBit;
+
#define MMU_USER_IDX 0
+static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
+}
+
+static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
+{
+ return mmu_idx | ARM_MMU_IDX_A;
+}
+
/* Return the exception level we're running at if this is our mmu_idx */
static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
{
- assert(mmu_idx < ARMMMUIdx_S2NS);
- return mmu_idx & 3;
+ switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) {
+ case ARM_MMU_IDX_A:
+ return mmu_idx & 3;
+ default:
+ g_assert_not_reached();
+ }
}
/* Determine the current mmu_idx to use for normal loads/stores */
int el = arm_current_el(env);
if (el < 2 && arm_is_secure_below_el3(env)) {
- return ARMMMUIdx_S1SE0 + el;
+ return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0 + el);
}
return el;
}
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
- ARMMMUIdx mmu_idx = cpu_mmu_index(env, false);
+ ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
if (is_a64(env)) {
*pc = env->pc;
*flags = ARM_TBFLAG_AARCH64_STATE_MASK;
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
- *flags |= (mmu_idx << ARM_TBFLAG_MMUIDX_SHIFT);
+ *flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
CPUState *cs = ENV_GET_CPU(env);
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
}
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = ENV_GET_CPU(env);
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
}
static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 40);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
}
static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 40);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S2NS);
}
static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
}
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
}
static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = ENV_GET_CPU(env);
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
}
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1E2));
+ ARMMMUIdxBit_S1E2);
}
static const ARMCPRegInfo cp_reginfo[] = {
/* Accesses to VTTBR may change the VMID so we must flush the TLB. */
if (raw_read(env, ri) != value) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
raw_write(env, ri, value);
}
}
if (arm_is_secure_below_el3(env)) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
if (sec) {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
if (arm_is_secure_below_el3(env)) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
if (arm_feature(env, ARM_FEATURE_EL2)) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
} else {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
}
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E3));
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (sec) {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else if (has_el2) {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
} else {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E3));
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_is_secure_below_el3(env)) {
tlb_flush_page_by_mmuidx(cs, pageaddr,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_page_by_mmuidx(cs, pageaddr,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E3));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (sec) {
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
uint64_t pageaddr = sextract64(value << 12, 0, 56);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1E2));
+ ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = sextract64(value << 12, 0, 56);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1E3));
+ ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 48);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
}
static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 48);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S2NS);
}
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
}
+/* Convert a possible stage1+2 MMU index into the appropriate
+ * stage 1 MMU index
+ */
+static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
+ }
+ return mmu_idx;
+}
+
/* Returns TBI0 value for current regime el */
uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
{
uint32_t el;
/* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
- * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
- */
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- mmu_idx += ARMMMUIdx_S1NSE0;
- }
+ * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
+ */
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
tcr = regime_tcr(env, mmu_idx);
el = regime_el(env, mmu_idx);
uint32_t el;
/* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
- * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
- */
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- mmu_idx += ARMMMUIdx_S1NSE0;
- }
+ * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
+ */
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
tcr = regime_tcr(env, mmu_idx);
el = regime_el(env, mmu_idx);
* on whether the long or short descriptor format is in use. */
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- mmu_idx += ARMMMUIdx_S1NSE0;
- }
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
return regime_using_lpae_format(env, mmu_idx);
}
int ret;
ret = get_phys_addr(env, address, access_type,
- mmu_idx + ARMMMUIdx_S1NSE0, &ipa, attrs,
+ stage_1_mmu_idx(mmu_idx), &ipa, attrs,
prot, page_size, fsr, fi);
/* If S1 fails or S2 is disabled, return early. */
/*
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
*/
- mmu_idx += ARMMMUIdx_S1NSE0;
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
}
}
int ret;
MemTxAttrs attrs = {};
- ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
+ ret = get_phys_addr(env, address, access_type,
+ core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
&attrs, &prot, &page_size, fsr, fi);
if (!ret) {
/* Map a single [sub]page. */
bool ret;
uint32_t fsr;
ARMMMUFaultInfo fi = {};
+ ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
*attrs = (MemTxAttrs) {};
- ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
+ ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
attrs, &prot, &page_size, &fsr, &fi);
if (ret) {