powerpc/32s: Handle PROTFAULT in hash_page() also for CONFIG_PPC_KUAP
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Mon, 16 Nov 2020 16:09:31 +0000 (16:09 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 9 Dec 2020 05:59:46 +0000 (16:59 +1100)
On hash 32 bits, handling minor protection faults like unsetting
dirty flag is heavy if done from the normal page_fault processing,
because it implies hash table software lookup for flushing the entry
and then a DSI is taken anyway to add the entry back.

When KUAP was implemented, as explained in commit a68c31fc01ef
("powerpc/32s: Implement Kernel Userspace Access Protection"),
protection faults has been diverted from hash_page() because
hash_page() was not able to identify a KUAP fault.

Implement KUAP verification in hash_page(), by clearing write
permission when the access is a kernel access and Ks is 1.
This works regardless of the address because kernel segments always
have Ks set to 0 while user segments have Ks set to 0 only
when kernel write to userspace is granted.

Then protection faults can be handled by hash_page() even for KUAP.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/8a4ffe4798e9ea32aaaccdf85e411bb1beed3500.1605542955.git.christophe.leroy@csgroup.eu
arch/powerpc/kernel/head_book3s_32.S
arch/powerpc/mm/book3s32/hash_low.S

index b102eca44874de003867f3e014be9b6b9724e2c2..349bf3f0c3afa994c5cc0c0ccd88ad4c1df7c8ac 100644 (file)
@@ -292,11 +292,7 @@ BEGIN_MMU_FTR_SECTION
        stw     r11, THR11(r10)
        mfspr   r10, SPRN_DSISR
        mfcr    r11
-#ifdef CONFIG_PPC_KUAP
-       andis.  r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
-#else
        andis.  r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
-#endif
        mfspr   r10, SPRN_SPRG_THREAD
        beq     hash_page_dsi
 .Lhash_page_dsi_cont:
@@ -313,11 +309,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
        EXCEPTION_PROLOG handle_dar_dsisr=1
        get_and_save_dar_dsisr_on_stack r4, r5, r11
 BEGIN_MMU_FTR_SECTION
-#ifdef CONFIG_PPC_KUAP
-       andis.  r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
-#else
        andis.  r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
-#endif
        bne     handle_page_fault_tramp_2       /* if not, try to put a PTE */
        rlwinm  r3, r5, 32 - 15, 21, 21         /* DSISR_STORE -> _PAGE_RW */
        bl      hash_page
index ceb90a6e32562eb3f38c00adc1a916b18df2ee26..98b8d8a5ad6402f82bad74f6366108c8137c55ab 100644 (file)
@@ -89,8 +89,6 @@ _GLOBAL(hash_page)
 #else
        rlwimi  r8,r4,23,20,28          /* compute pte address */
 #endif
-       rlwinm  r0,r3,32-3,24,24        /* _PAGE_RW access -> _PAGE_DIRTY */
-       ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
 
        /*
         * Update the linux PTE atomically.  We do the lwarx up-front
@@ -106,7 +104,18 @@ _GLOBAL(hash_page)
 #endif
 .Lretry:
        lwarx   r6,0,r8                 /* get linux-style pte, flag word */
+#ifdef CONFIG_PPC_KUAP
+       mfsrin  r5,r4
+       rlwinm  r0,r9,28,_PAGE_RW       /* MSR[PR] => _PAGE_RW */
+       rlwinm  r5,r5,12,_PAGE_RW       /* Ks => _PAGE_RW */
+       andc    r5,r5,r0                /* Ks & ~MSR[PR] */
+       andc    r5,r6,r5                /* Clear _PAGE_RW when Ks = 1 && MSR[PR] = 0 */
+       andc.   r5,r3,r5                /* check access & ~permission */
+#else
        andc.   r5,r3,r6                /* check access & ~permission */
+#endif
+       rlwinm  r0,r3,32-3,24,24        /* _PAGE_RW access -> _PAGE_DIRTY */
+       ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
 #ifdef CONFIG_SMP
        bne-    .Lhash_page_out         /* return if access not permitted */
 #else