config CC_HAS_ELFV2
        def_bool PPC64 && $(cc-option, -mabi=elfv2)
 
+config CC_HAS_PREFIXED
+       def_bool PPC64 && $(cc-option, -mcpu=power10 -mprefixed)
+
 config 32BIT
        bool
        default y if PPC32
 
 endif
 
 # No prefix or pcrel
+ifdef CONFIG_PPC_KERNEL_PREFIXED
+KBUILD_CFLAGS += $(call cc-option,-mprefixed)
+else
 KBUILD_CFLAGS += $(call cc-option,-mno-prefixed)
+endif
 KBUILD_CFLAGS += $(call cc-option,-mno-pcrel)
 
 # No AltiVec or VSX or MMA instructions when building kernel
 
 {
        int t;
 
-       __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+       /* -mprefixed can generate offsets beyond range, fall back hack */
+       if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+               __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+       else
+               __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
 
        return t;
 }
 
 static __inline__ void arch_atomic_set(atomic_t *v, int i)
 {
-       __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+       /* -mprefixed can generate offsets beyond range, fall back hack */
+       if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+               __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+       else
+               __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
 }
 
 #define ATOMIC_OP(op, asm_op, suffix, sign, ...)                       \
 {
        s64 t;
 
-       __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+       /* -mprefixed can generate offsets beyond range, fall back hack */
+       if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+               __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+       else
+               __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
 
        return t;
 }
 
 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
 {
-       __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+       /* -mprefixed can generate offsets beyond range, fall back hack */
+       if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+               __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+       else
+               __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
 }
 
 #define ATOMIC64_OP(op, asm_op)                                                \
 
  *
  */
 
+/* -mprefixed can generate offsets beyond range, fall back hack */
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
+#define DEF_MMIO_IN_X(name, size, insn)                                \
+static inline u##size name(const volatile u##size __iomem *addr)       \
+{                                                                      \
+       u##size ret;                                                    \
+       __asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync"    \
+               : "=r" (ret) : "r" (addr) : "memory");                  \
+       return ret;                                                     \
+}
+
+#define DEF_MMIO_OUT_X(name, size, insn)                               \
+static inline void name(volatile u##size __iomem *addr, u##size val)   \
+{                                                                      \
+       __asm__ __volatile__("sync;"#insn" %1,0,%0"                     \
+               : : "r" (addr), "r" (val) : "memory");                  \
+       mmiowb_set_pending();                                           \
+}
+
+#define DEF_MMIO_IN_D(name, size, insn)                                \
+static inline u##size name(const volatile u##size __iomem *addr)       \
+{                                                                      \
+       u##size ret;                                                    \
+       __asm__ __volatile__("sync;"#insn" %0,0(%1);twi 0,%0,0;isync"\
+               : "=r" (ret) : "b" (addr) : "memory");  \
+       return ret;                                                     \
+}
+
+#define DEF_MMIO_OUT_D(name, size, insn)                               \
+static inline void name(volatile u##size __iomem *addr, u##size val)   \
+{                                                                      \
+       __asm__ __volatile__("sync;"#insn" %1,0(%0)"                    \
+               : : "b" (addr), "r" (val) : "memory");  \
+       mmiowb_set_pending();                                           \
+}
+#else
 #define DEF_MMIO_IN_X(name, size, insn)                                \
 static inline u##size name(const volatile u##size __iomem *addr)       \
 {                                                                      \
                : "=m<>" (*addr) : "r" (val) : "memory");       \
        mmiowb_set_pending();                                           \
 }
+#endif
 
 DEF_MMIO_IN_D(in_8,     8, lbz);
 DEF_MMIO_OUT_D(out_8,   8, stb);
 
  * because we do not write to any memory gcc knows about, so there
  * are no aliasing issues.
  */
+/* -mprefixed can generate offsets beyond range, fall back hack */
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
+#define __put_user_asm_goto(x, addr, label, op)                        \
+       asm_volatile_goto(                                      \
+               "1:     " op " %0,0(%1) # put_user\n"           \
+               EX_TABLE(1b, %l2)                               \
+               :                                               \
+               : "r" (x), "b" (addr)                           \
+               :                                               \
+               : label)
+#else
 #define __put_user_asm_goto(x, addr, label, op)                        \
        asm_volatile_goto(                                      \
                "1:     " op "%U1%X1 %0,%1      # put_user\n"   \
                EX_TABLE(1b, %l2)                               \
                :                                               \
-               : "r" (x), "m<>" (*addr)                \
+               : "r" (x), "m<>" (*addr)                        \
                :                                               \
                : label)
+#endif
 
 #ifdef __powerpc64__
 #define __put_user_asm2_goto(x, ptr, label)                    \
 
 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
 
+/* -mprefixed can generate offsets beyond range, fall back hack */
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
+#define __get_user_asm_goto(x, addr, label, op)                        \
+       asm_volatile_goto(                                      \
+               "1:     "op" %0,0(%1)   # get_user\n"           \
+               EX_TABLE(1b, %l2)                               \
+               : "=r" (x)                                      \
+               : "b" (addr)                                    \
+               :                                               \
+               : label)
+#else
 #define __get_user_asm_goto(x, addr, label, op)                        \
        asm_volatile_goto(                                      \
                "1:     "op"%U1%X1 %0, %1       # get_user\n"   \
                EX_TABLE(1b, %l2)                               \
                : "=r" (x)                                      \
-               : "m<>" (*addr)                         \
+               : "m<>" (*addr)                                 \
                :                                               \
                : label)
+#endif
 
 #ifdef __powerpc64__
 #define __get_user_asm2_goto(x, addr, label)                   \
 
         * get corrupted.
         *
         * Use a b +8 to jump over the load.
+        * XXX: could make PCREL depend on MPROFILE_KERNEL
+        * XXX: check PCREL && MPROFILE_KERNEL calling sequence
         */
        if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
                pop = ppc_inst(PPC_RAW_NOP());
 
        bool "POWER10"
        depends on PPC_BOOK3S_64
        select ARCH_HAS_FAST_MULTIPLIER
+       select PPC_HAVE_PREFIXED_SUPPORT
 
 config E5500_CPU
        bool "Freescale e5500"
 
          If you're unsure, say Y.
 
+config PPC_KERNEL_PREFIXED
+       depends on PPC_HAVE_PREFIXED_SUPPORT
+       depends on CC_HAS_PREFIXED
+       default n
+       bool "Build Kernel with Prefixed Instructions"
+       help
+         POWER10 and later CPUs support prefixed instructions, 8 byte
+         instructions that include large immediate, pc relative addressing,
+         and various floating point, vector, MMA.
+
+         This option builds the kernel with prefixed instructions, and
+         allows a pc relative addressing option to be selected.
+
+         Kernel support for prefixed instructions in applications and guests
+         is not affected by this option.
+
 config PPC_KUEP
        bool "Kernel Userspace Execution Prevention" if !40x
        default y if !40x
 config PPC_HAVE_PMU_SUPPORT
        bool
 
+config PPC_HAVE_PREFIXED_SUPPORT
+       bool
+
 config PMU_SYSFS
        bool "Create PMU SPRs sysfs file"
        default n