KVM: arm64: Prepare non-protected nVHE hypervisor stacktrace
authorKalesh Singh <kaleshsingh@google.com>
Tue, 26 Jul 2022 07:37:42 +0000 (00:37 -0700)
committerMarc Zyngier <maz@kernel.org>
Tue, 26 Jul 2022 09:49:27 +0000 (10:49 +0100)
In non-protected nVHE mode (non-pKVM) the host can directly access
hypervisor memory; and unwinding of the hypervisor stacktrace is
done from EL1 to save on memory for shared buffers.

To unwind the hypervisor stack from EL1 the host needs to know the
starting point for the unwind and information that will allow it to
translate hypervisor stack addresses to the corresponding kernel
addresses. This patch sets up this book keeping. It is made use of
later in the series.

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220726073750.3219117-10-kaleshsingh@google.com
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kvm/hyp/nvhe/stacktrace.c
arch/arm64/kvm/hyp/nvhe/switch.c

index 2e277f2ed6712f7bf4f2ba73a80151beaa503dde..53035763e48e891e6e2c0ff43c740c8a26e3d3af 100644 (file)
@@ -176,6 +176,22 @@ struct kvm_nvhe_init_params {
        unsigned long vtcr;
 };
 
+/*
+ * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
+ * hyp_panic() in non-protected mode.
+ *
+ * @stack_base:                 hyp VA of the hyp_stack base.
+ * @overflow_stack_base:        hyp VA of the hyp_overflow_stack base.
+ * @fp:                         hyp FP where the backtrace begins.
+ * @pc:                         hyp PC where the backtrace begins.
+ */
+struct kvm_nvhe_stacktrace_info {
+       unsigned long stack_base;
+       unsigned long overflow_stack_base;
+       unsigned long fp;
+       unsigned long pc;
+};
+
 /* Translate a kernel address @ptr into its equivalent linear mapping */
 #define kvm_ksym_ref(ptr)                                              \
        ({                                                              \
index a3d5b34e12499a0e90ff6df2934d22c30744f640..b8a280aa026a8d56baea7b8719f7c80dbc4afc0a 100644 (file)
@@ -4,8 +4,49 @@
  *
  * Copyright (C) 2022 Google LLC
  */
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
 #include <asm/memory.h>
 #include <asm/percpu.h>
 
 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
        __aligned(16);
+
+DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
+
+/*
+ * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Save the information needed by the host to unwind the non-protected
+ * nVHE hypervisor stack in EL1.
+ */
+static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
+{
+       struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
+       struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
+
+       stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
+       stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
+       stacktrace_info->fp = fp;
+       stacktrace_info->pc = pc;
+}
+
+/*
+ * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Saves the information needed by the host to dump the nVHE hypervisor
+ * backtrace.
+ */
+void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
+{
+       if (is_protected_kvm_enabled())
+               return;
+       else
+               hyp_prepare_backtrace(fp, pc);
+}
index 6db801db8f271939ca7329f03ce300715e163dfd..64e13445d0d913d63af4aa779c683753531083c4 100644 (file)
@@ -34,6 +34,8 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
 
+extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
+
 static void __activate_traps(struct kvm_vcpu *vcpu)
 {
        u64 val;
@@ -375,6 +377,10 @@ asmlinkage void __noreturn hyp_panic(void)
                __sysreg_restore_state_nvhe(host_ctxt);
        }
 
+       /* Prepare to dump kvm nvhe hyp stacktrace */
+       kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
+                                  _THIS_IP_);
+
        __hyp_do_panic(host_ctxt, spsr, elr, par);
        unreachable();
 }