From: Richard Henderson Date: Fri, 1 Mar 2024 20:41:10 +0000 (-1000) Subject: target/arm: Do memory type alignment check when translation enabled X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=728b923f548dba640cecb693ff1f1f134ef1097a;p=qemu.git target/arm: Do memory type alignment check when translation enabled If translation is enabled, and the PTE memory type is Device, enable checking alignment via TLB_CHECK_ALIGNMENT. While the check is done later than it should be per the ARM, it's better than not performing the check at all. Signed-off-by: Richard Henderson Message-id: 20240301204110.656742-7-richard.henderson@linaro.org [PMM: tweaks to comment text] Signed-off-by: Peter Maydell Reviewed-by: Peter Maydell --- diff --git a/target/arm/ptw.c b/target/arm/ptw.c index ba1a27ca2b..31ae43f60e 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -471,6 +471,16 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress, return false; } +static bool S1_attrs_are_device(uint8_t attrs) +{ + /* + * This slightly under-decodes the MAIR_ELx field: + * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE; + * 0b0000dd1x is UNPREDICTABLE. + */ + return (attrs & 0xf0) == 0; +} + static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) { /* @@ -1684,6 +1694,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, bool aarch64 = arm_el_is_aa64(env, el); uint64_t descriptor, new_descriptor; ARMSecuritySpace out_space; + bool device; /* TODO: This code does not support shareability levels. */ if (aarch64) { @@ -2106,6 +2117,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, if (regime_is_stage2(mmu_idx)) { result->cacheattrs.is_s2_format = true; result->cacheattrs.attrs = extract32(attrs, 2, 4); + /* + * Security state does not really affect HCR_EL2.FWB; + * we only need to filter FWB for aa32 or other FEAT. + */ + device = S2_attrs_are_device(arm_hcr_el2_eff(env), + result->cacheattrs.attrs); } else { /* Index into MAIR registers for cache attributes */ uint8_t attrindx = extract32(attrs, 2, 3); @@ -2118,6 +2135,28 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ } + device = S1_attrs_are_device(result->cacheattrs.attrs); + } + + /* + * Enable alignment checks on Device memory. + * + * Per R_XCHFJ, this check is mis-ordered. The correct ordering + * for alignment, permission, and stage 2 faults should be: + * - Alignment fault caused by the memory type + * - Permission fault + * - A stage 2 fault on the memory access + * but due to the way the TCG softmmu TLB operates, we will have + * implicitly done the permission check and the stage2 lookup in + * finding the TLB entry, so the alignment check cannot be done sooner. + * + * In v7, for a CPU without the Virtualization Extensions this + * access is UNPREDICTABLE; we choose to make it take the alignment + * fault as is required for a v7VE CPU. (QEMU doesn't emulate any + * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) + */ + if (device) { + result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED; } /*