--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
+#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
+
+#include <linux/const.h>
+
+#define AMR_KUAP_BLOCK_READ    UL(0x4000000000000000)
+#define AMR_KUAP_BLOCK_WRITE   UL(0x8000000000000000)
+#define AMR_KUAP_BLOCKED       (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
+#define AMR_KUAP_SHIFT         62
+
+#ifdef __ASSEMBLY__
+
+.macro kuap_restore_amr        gpr
+#ifdef CONFIG_PPC_KUAP
+       BEGIN_MMU_FTR_SECTION_NESTED(67)
+       ld      \gpr, STACK_REGS_KUAP(r1)
+       mtspr   SPRN_AMR, \gpr
+       END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
+#endif
+.endm
+
+.macro kuap_check_amr gpr1, gpr2
+#ifdef CONFIG_PPC_KUAP_DEBUG
+       BEGIN_MMU_FTR_SECTION_NESTED(67)
+       mfspr   \gpr1, SPRN_AMR
+       li      \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
+       sldi    \gpr2, \gpr2, AMR_KUAP_SHIFT
+999:   tdne    \gpr1, \gpr2
+       EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
+       END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
+#endif
+.endm
+
+.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
+#ifdef CONFIG_PPC_KUAP
+       BEGIN_MMU_FTR_SECTION_NESTED(67)
+       .ifnb \msr_pr_cr
+       bne     \msr_pr_cr, 99f
+       .endif
+       mfspr   \gpr1, SPRN_AMR
+       std     \gpr1, STACK_REGS_KUAP(r1)
+       li      \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
+       sldi    \gpr2, \gpr2, AMR_KUAP_SHIFT
+       cmpd    \use_cr, \gpr1, \gpr2
+       beq     \use_cr, 99f
+       // We don't isync here because we very recently entered via rfid
+       mtspr   SPRN_AMR, \gpr2
+       isync
+99:
+       END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_PPC_KUAP
+
+#include <asm/reg.h>
+
+/*
+ * We support individually allowing read or write, but we don't support nesting
+ * because that would require an expensive read/modify write of the AMR.
+ */
+
+static inline void set_kuap(unsigned long value)
+{
+       if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
+               return;
+
+       /*
+        * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
+        * before and after the move to AMR. See table 6 on page 1134.
+        */
+       isync();
+       mtspr(SPRN_AMR, value);
+       isync();
+}
+
+static inline void allow_user_access(void __user *to, const void __user *from,
+                                    unsigned long size)
+{
+       // This is written so we can resolve to a single case at build time
+       if (__builtin_constant_p(to) && to == NULL)
+               set_kuap(AMR_KUAP_BLOCK_WRITE);
+       else if (__builtin_constant_p(from) && from == NULL)
+               set_kuap(AMR_KUAP_BLOCK_READ);
+       else
+               set_kuap(0);
+}
+
+static inline void prevent_user_access(void __user *to, const void __user *from,
+                                      unsigned long size)
+{
+       set_kuap(AMR_KUAP_BLOCKED);
+}
+
+#endif /* CONFIG_PPC_KUAP */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
 
        RESTORE_CTR(r1, area);                                             \
        b       bad_stack;                                                 \
 3:     EXCEPTION_PROLOG_COMMON_1();                                       \
+       kuap_save_amr_and_lock r9, r10, cr1, cr0;                          \
        beq     4f;                     /* if from kernel mode          */ \
        ACCOUNT_CPU_USER_ENTRY(r13, r9, r10);                              \
        SAVE_PPR(area, r9);                                                \
  */
 #define EXCEPTION_COMMON_NORET_STACK(area, trap, label, hdlr, additions) \
        EXCEPTION_PROLOG_COMMON_1();                            \
+       kuap_save_amr_and_lock r9, r10, cr1;                    \
        EXCEPTION_PROLOG_COMMON_2(area);                        \
        EXCEPTION_PROLOG_COMMON_3(trap);                        \
        /* Volatile regs are potentially clobbered here */      \
 
 #define END_MMU_FTR_SECTION(msk, val)          \
        END_MMU_FTR_SECTION_NESTED(msk, val, 97)
 
+#define END_MMU_FTR_SECTION_NESTED_IFSET(msk, label)   \
+       END_MMU_FTR_SECTION_NESTED((msk), (msk), label)
+
 #define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
 #define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
 
 
 #ifndef _ASM_POWERPC_KUP_H_
 #define _ASM_POWERPC_KUP_H_
 
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/kup-radix.h>
+#endif
+
 #ifndef __ASSEMBLY__
 
 #include <asm/pgtable.h>
 
  */
 #define MMU_FTR_1T_SEGMENT             ASM_CONST(0x40000000)
 
+/*
+ * Supports KUAP (key 0 controlling userspace addresses) on radix
+ */
+#define MMU_FTR_RADIX_KUAP             ASM_CONST(0x80000000)
+
 /* MMU feature bit sets for various CPUs */
 #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2  \
        MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
 #endif
 #ifdef CONFIG_PPC_RADIX_MMU
                MMU_FTR_TYPE_RADIX |
-#endif
+#ifdef CONFIG_PPC_KUAP
+               MMU_FTR_RADIX_KUAP |
+#endif /* CONFIG_PPC_KUAP */
+#endif /* CONFIG_PPC_RADIX_MMU */
                0,
 };
 
 
 #include <asm/exception-64e.h>
 #endif
 #include <asm/feature-fixups.h>
+#include <asm/kup.h>
 
 /*
  * System calls.
        addi    r9,r1,STACK_FRAME_OVERHEAD
        ld      r11,exception_marker@toc(r2)
        std     r11,-16(r9)             /* "regshere" marker */
+
+       kuap_check_amr r10, r11
+
 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
 BEGIN_FW_FTR_SECTION
        beq     33f
        andi.   r6,r8,MSR_PR
        ld      r4,_LINK(r1)
 
+       kuap_check_amr r10, r11
+
 #ifdef CONFIG_PPC_BOOK3S
        /*
         * Clear MSR_RI, MSR_EE is already and remains disabled. We could do
        std     r8, PACATMSCRATCH(r13)
 #endif
 
+       /*
+        * We don't need to restore AMR on the way back to userspace for KUAP.
+        * The value of AMR only matters while we're in the kernel.
+        */
        ld      r13,GPR13(r1)   /* only restore r13 if returning to usermode */
        ld      r2,GPR2(r1)
        ld      r1,GPR1(r1)
        RFI_TO_USER
        b       .       /* prevent speculative execution */
 
-       /* exit to kernel */
-1:     ld      r2,GPR2(r1)
+1:     /* exit to kernel */
+       kuap_restore_amr r2
+
+       ld      r2,GPR2(r1)
        ld      r1,GPR1(r1)
        mtlr    r4
        mtcr    r5
        std     r23,_CCR(r1)
        std     r1,KSP(r3)      /* Set old stack pointer */
 
+       kuap_check_amr r9, r10
+
        FLUSH_COUNT_CACHE
 
        /*
        ld      r4,_XER(r1)
        mtspr   SPRN_XER,r4
 
+       kuap_check_amr r5, r6
+
        REST_8GPRS(5, r1)
 
        andi.   r0,r3,MSR_RI
        ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
        REST_GPR(13, r1)
 
+       /*
+        * We don't need to restore AMR on the way back to userspace for KUAP.
+        * The value of AMR only matters while we're in the kernel.
+        */
        mtspr   SPRN_SRR1,r3
 
        ld      r2,_CCR(r1)
        ld      r0,GPR0(r1)
        ld      r2,GPR2(r1)
        ld      r3,GPR3(r1)
+
+       kuap_restore_amr r4
+
        ld      r4,GPR4(r1)
        ld      r1,GPR1(r1)
        RFI_TO_KERNEL
 
 #include <asm/cpuidle.h>
 #include <asm/head-64.h>
 #include <asm/feature-fixups.h>
+#include <asm/kup.h>
 
 /*
  * There are a few constraints to be concerned with.
        mfspr   r11,SPRN_DSISR          /* Save DSISR */
        std     r11,_DSISR(r1)
        std     r9,_CCR(r1)             /* Save CR in stackframe */
+       kuap_save_amr_and_lock r9, r10, cr1
        /* Save r9 through r13 from EXMC save area to stack frame. */
        EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
        mfmsr   r11                     /* get MSR value */
        mfspr   r11,SPRN_HSRR0          /* Save HSRR0 */
        mfspr   r12,SPRN_HSRR1          /* Save HSRR1 */
        EXCEPTION_PROLOG_COMMON_1()
+       /* We don't touch AMR here, we never go to virtual mode */
        EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
        EXCEPTION_PROLOG_COMMON_3(0xe60)
        addi    r3,r1,STACK_FRAME_OVERHEAD
 
 #include <asm/powernv.h>
 #include <asm/sections.h>
 #include <asm/trace.h>
+#include <asm/uaccess.h>
 
 #include <trace/events/thp.h>
 
 }
 #endif
 
+#ifdef CONFIG_PPC_KUAP
+void setup_kuap(bool disabled)
+{
+       if (disabled || !early_radix_enabled())
+               return;
+
+       if (smp_processor_id() == boot_cpuid) {
+               pr_info("Activating Kernel Userspace Access Prevention\n");
+               cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
+       }
+
+       /* Make sure userspace can't change the AMR */
+       mtspr(SPRN_UAMOR, 0);
+       mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
+       isync();
+}
+#endif
+
 void __init radix__early_init_mmu(void)
 {
        unsigned long lpcr;
 
 
 #include <asm/mman.h>
 #include <asm/mmu_context.h>
+#include <asm/mmu.h>
 #include <asm/setup.h>
 #include <linux/pkeys.h>
 #include <linux/of_device.h>
 
        depends on PPC_BOOK3S_64
        select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
        select PPC_HAVE_KUEP
+       select PPC_HAVE_KUAP
        default y
        help
          Enable support for the Power ISA 3.0 Radix style MMU. Currently this
 
          If you're unsure, say Y.
 
+config PPC_KUAP_DEBUG
+       bool "Extra debugging for Kernel Userspace Access Protection"
+       depends on PPC_HAVE_KUAP && PPC_RADIX_MMU
+       help
+         Add extra debugging for Kernel Userspace Access Protection (KUAP)
+         If you're unsure, say N.
+
 config ARCH_ENABLE_HUGEPAGE_MIGRATION
        def_bool y
        depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION