DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
+DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
+
 static bool vgic_present;
 
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
        return 0;
 }
 
+static void pkvm_hyp_init_ptrauth(void)
+{
+       struct kvm_cpu_context *hyp_ctxt;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
+               hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
+               hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
+               hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
+               hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
+               hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
+               hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
+               hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
+               hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
+               hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
+               hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
+       }
+}
+
 /* Inits Hyp-mode on all online CPUs */
 static int __init init_hyp_mode(void)
 {
        kvm_hyp_init_symbols();
 
        if (is_protected_kvm_enabled()) {
+               if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
+                   cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH))
+                       pkvm_hyp_init_ptrauth();
+
                init_cpu_logical_map();
 
                if (!init_psci_relay()) {
 
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
+#include <asm/kvm_ptrauth.h>
 
        .text
 
 
        /* Save the host context pointer in x29 across the function call */
        mov     x29, x0
+
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+b __skip_pauth_save
+alternative_else_nop_endif
+
+alternative_if ARM64_KVM_PROTECTED_MODE
+       /* Save kernel ptrauth keys. */
+       add x18, x29, #CPU_APIAKEYLO_EL1
+       ptrauth_save_state x18, x19, x20
+
+       /* Use hyp keys. */
+       adr_this_cpu x18, kvm_hyp_ctxt, x19
+       add x18, x18, #CPU_APIAKEYLO_EL1
+       ptrauth_restore_state x18, x19, x20
+       isb
+alternative_else_nop_endif
+__skip_pauth_save:
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
        bl      handle_trap
 
-       /* Restore host regs x0-x17 */
 __host_enter_restore_full:
+       /* Restore kernel keys. */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+b __skip_pauth_restore
+alternative_else_nop_endif
+
+alternative_if ARM64_KVM_PROTECTED_MODE
+       add x18, x29, #CPU_APIAKEYLO_EL1
+       ptrauth_restore_state x18, x19, x20
+alternative_else_nop_endif
+__skip_pauth_restore:
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+       /* Restore host regs x0-x17 */
        ldp     x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
        ldp     x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
        ldp     x4, x5,   [x29, #CPU_XREG_OFFSET(4)]