--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __ASM_POINTER_AUTH_H
+#define __ASM_POINTER_AUTH_H
+
+#include <linux/random.h>
+
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+/*
+ * Each key is a 128-bit quantity which is split across a pair of 64-bit
+ * registers (Lo and Hi).
+ */
+struct ptrauth_key {
+       unsigned long lo, hi;
+};
+
+/*
+ * We give each process its own keys, which are shared by all threads. The keys
+ * are inherited upon fork(), and reinitialised upon exec*().
+ */
+struct ptrauth_keys {
+       struct ptrauth_key apia;
+       struct ptrauth_key apib;
+       struct ptrauth_key apda;
+       struct ptrauth_key apdb;
+       struct ptrauth_key apga;
+};
+
+static inline void ptrauth_keys_init(struct ptrauth_keys *keys)
+{
+       if (system_supports_address_auth()) {
+               get_random_bytes(&keys->apia, sizeof(keys->apia));
+               get_random_bytes(&keys->apib, sizeof(keys->apib));
+               get_random_bytes(&keys->apda, sizeof(keys->apda));
+               get_random_bytes(&keys->apdb, sizeof(keys->apdb));
+       }
+
+       if (system_supports_generic_auth())
+               get_random_bytes(&keys->apga, sizeof(keys->apga));
+}
+
+#define __ptrauth_key_install(k, v)                            \
+do {                                                           \
+       struct ptrauth_key __pki_v = (v);                       \
+       write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1);     \
+       write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1);     \
+} while (0)
+
+static inline void ptrauth_keys_switch(struct ptrauth_keys *keys)
+{
+       if (system_supports_address_auth()) {
+               __ptrauth_key_install(APIA, keys->apia);
+               __ptrauth_key_install(APIB, keys->apib);
+               __ptrauth_key_install(APDA, keys->apda);
+               __ptrauth_key_install(APDB, keys->apdb);
+       }
+
+       if (system_supports_generic_auth())
+               __ptrauth_key_install(APGA, keys->apga);
+}
+
+#define ptrauth_thread_init_user(tsk)                                  \
+do {                                                                   \
+       struct task_struct *__ptiu_tsk = (tsk);                         \
+       ptrauth_keys_init(&__ptiu_tsk->thread_info.keys_user);          \
+       ptrauth_keys_switch(&__ptiu_tsk->thread_info.keys_user);        \
+} while (0)
+
+#define ptrauth_thread_switch(tsk)     \
+       ptrauth_keys_switch(&(tsk)->thread_info.keys_user)
+
+#else /* CONFIG_ARM64_PTR_AUTH */
+#define ptrauth_thread_init_user(tsk)
+#define ptrauth_thread_switch(tsk)
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#endif /* __ASM_POINTER_AUTH_H */
 
 #endif /* CONFIG_ARM64_RAS_EXTN */
 
 #ifdef CONFIG_ARM64_PTR_AUTH
+static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
+{
+       sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
+                                      SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
+}
+
 static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
                             int __unused)
 {
                .capability = ARM64_HAS_ADDRESS_AUTH,
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .matches = has_address_auth,
+               .cpu_enable = cpu_enable_address_auth,
        },
        {
                .desc = "Generic authentication (architected algorithm)",
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
 #endif
        HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
+#ifdef CONFIG_ARM64_PTR_AUTH
+       { .desc = "HWCAP_PACA", .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_address_auth,
+               .hwcap_type = CAP_HWCAP, .hwcap = HWCAP_PACA },
+       { .desc = "HWCAP_PACG", .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_generic_auth,
+               .hwcap_type = CAP_HWCAP, .hwcap = HWCAP_PACG },
+#endif
        {},
 };
 
 
 #include <asm/fpsimd.h>
 #include <asm/mmu_context.h>
 #include <asm/processor.h>
+#include <asm/pointer_auth.h>
 #include <asm/stacktrace.h>
 
 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
        contextidr_thread_switch(next);
        entry_task_switch(next);
        uao_thread_switch(next);
+       ptrauth_thread_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
 void arch_setup_new_exec(void)
 {
        current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+
+       ptrauth_thread_init_user(current);
 }