commit
f80872e21c07edd06eb343eeeefc8af404b518a6 (mmu-hash64: Implement
Virtual Page Class Key Protection) added a new page protection
mechanism based on page keys and the AMR register to control access.
The AMR register allows or prohibits reads and/or writes on a page
depending on the control bits associated to the key. A store or a load
is only permitted if the associate bit is 0 (Power ISA), and not 1 as
the code is currently doing. This patch modifies ppc_hash64_amr_prot()
to correct the protection check.
This issue was unvailed by commit
ccfb53ed6360cac0d5f6f7915ca9ae7eed866412
(target-ppc: fix Authority Mask Register init value) which changed the
initialisation value of the AMR register to 0.
Signed-off-by: Cédric Le Goater <clg@fr.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte)
{
int key, amrbits;
- int prot = PAGE_EXEC;
+ int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
/* Only recent MMUs implement Virtual Page Class Key Protection */
if (!(env->mmu_model & POWERPC_MMU_AMR)) {
- return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return prot;
}
key = HPTE64_R_KEY(pte.pte1);
/* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
/* env->spr[SPR_AMR]); */
+ /*
+ * A store is permitted if the AMR bit is 0. Remove write
+ * protection if it is set.
+ */
if (amrbits & 0x2) {
- prot |= PAGE_WRITE;
+ prot &= ~PAGE_WRITE;
}
+ /*
+ * A load is permitted if the AMR bit is 0. Remove read
+ * protection if it is set.
+ */
if (amrbits & 0x1) {
- prot |= PAGE_READ;
+ prot &= ~PAGE_READ;
}
return prot;