arm64: stacktrace: rework stack boundary discovery
authorMark Rutland <mark.rutland@arm.com>
Thu, 1 Sep 2022 13:06:43 +0000 (14:06 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 9 Sep 2022 11:30:07 +0000 (12:30 +0100)
In subsequent patches we'll want to acquire the stack boundaries
ahead-of-time, and we'll need to be able to acquire the relevant
stack_info regardless of whether we have an object the happens to be on
the stack.

This patch replaces the on_XXX_stack() helpers with stackinfo_get_XXX()
helpers, with the caller being responsible for the checking whether an
object is on a relevant stack. For the moment this is moved into the
on_accessible_stack() functions, making these slightly larger;
subsequent patches will remove the on_accessible_stack() functions and
simplify the logic.

The on_irq_stack() and on_task_stack() helpers are kept as these are
used by IRQ entry sequences and stackleak respectively. As they're only
used as predicates, the stack_info pointer parameter is removed in both
cases.

As the on_accessible_stack() functions are always passed a non-NULL info
pointer, these now update info unconditionally. When updating the type
to STACK_TYPE_UNKNOWN, the low/high bounds are also modified, but as
these will not be consumed this should have no adverse affect.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Kalesh Singh <kaleshsingh@google.com>
Reviewed-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Cc: Fuad Tabba <tabba@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220901130646.1316937-7-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/stacktrace/common.h
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kvm/hyp/nvhe/stacktrace.c
arch/arm64/kvm/stacktrace.c

index 86eb0bfe3b380a562299216e8862a58edf2ca9b7..61883518fc50b1d69e90c77c7cd9117fc54657da 100644 (file)
@@ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task);
  * The top of the current task's task stack
  */
 #define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE)
-#define on_thread_stack()      (on_task_stack(current, current_stack_pointer, 1, NULL))
+#define on_thread_stack()      (on_task_stack(current, current_stack_pointer, 1))
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_PROCESSOR_H */
index fa2df1ea22ebc462bb6b2393956672fbaf660813..aad0c6258721d8f04af4f1ddd0a1612f7fdde46c 100644 (file)
@@ -22,77 +22,91 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
 
 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
 
-static inline bool on_irq_stack(unsigned long sp, unsigned long size,
-                               struct stack_info *info)
+static inline struct stack_info stackinfo_get_irq(void)
 {
        unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
        unsigned long high = low + IRQ_STACK_SIZE;
 
-       return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+               .type = STACK_TYPE_IRQ,
+       };
 }
 
-static inline bool on_task_stack(const struct task_struct *tsk,
-                                unsigned long sp, unsigned long size,
-                                struct stack_info *info)
+static inline bool on_irq_stack(unsigned long sp, unsigned long size)
+{
+       struct stack_info info = stackinfo_get_irq();
+       return stackinfo_on_stack(&info, sp, size);
+}
+
+static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk)
 {
        unsigned long low = (unsigned long)task_stack_page(tsk);
        unsigned long high = low + THREAD_SIZE;
 
-       return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+               .type = STACK_TYPE_TASK,
+       };
+}
+
+static inline bool on_task_stack(const struct task_struct *tsk,
+                                unsigned long sp, unsigned long size)
+{
+       struct stack_info info = stackinfo_get_task(tsk);
+       return stackinfo_on_stack(&info, sp, size);
 }
 
 #ifdef CONFIG_VMAP_STACK
 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
 
-static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
-                               struct stack_info *info)
+static inline struct stack_info stackinfo_get_overflow(void)
 {
        unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
        unsigned long high = low + OVERFLOW_STACK_SIZE;
 
-       return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+               .type = STACK_TYPE_OVERFLOW,
+       };
 }
 #else
-static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
-                                    struct stack_info *info)
-{
-       return false;
-}
+#define stackinfo_get_overflow()       stackinfo_get_unknown()
 #endif
 
 #if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK)
 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
 
-static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
-                                       struct stack_info *info)
+static inline struct stack_info stackinfo_get_sdei_normal(void)
 {
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
-       return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+               .type = STACK_TYPE_SDEI_NORMAL,
+       };
 }
 
-static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
-                                         struct stack_info *info)
+static inline struct stack_info stackinfo_get_sdei_critical(void)
 {
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
-       return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+               .type = STACK_TYPE_SDEI_CRITICAL,
+       };
 }
 #else
-static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
-                                       struct stack_info *info)
-{
-       return false;
-}
-
-static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
-                                         struct stack_info *info)
-{
-       return false;
-}
+#define stackinfo_get_sdei_normal()    stackinfo_get_unknown()
+#define stackinfo_get_sdei_critical()  stackinfo_get_unknown()
 #endif
 
 #endif /* __ASM_STACKTRACE_H */
index 81c21378b1ac9e2f1b6b398a70b813ab7696e75f..3c3bda34bb8724f4c2b65e1be10ae2364d1f3ede 100644 (file)
@@ -65,6 +65,15 @@ struct unwind_state {
        struct task_struct *task;
 };
 
+static inline struct stack_info stackinfo_get_unknown(void)
+{
+       return (struct stack_info) {
+               .low = 0,
+               .high = 0,
+               .type = STACK_TYPE_UNKNOWN,
+       };
+}
+
 static inline bool stackinfo_on_stack(const struct stack_info *info,
                                      unsigned long sp, unsigned long size)
 {
@@ -77,25 +86,6 @@ static inline bool stackinfo_on_stack(const struct stack_info *info,
        return true;
 }
 
-static inline bool on_stack(unsigned long sp, unsigned long size,
-                           unsigned long low, unsigned long high,
-                           enum stack_type type, struct stack_info *info)
-{
-       struct stack_info tmp = {
-               .low = low,
-               .high = high,
-               .type = type,
-       };
-
-       if (!stackinfo_on_stack(&tmp, sp, size))
-               return false;
-
-       if (info)
-               *info = tmp;
-
-       return true;
-}
-
 static inline void unwind_init_common(struct unwind_state *state,
                                      struct task_struct *task)
 {
index eb7c08dfb8348e29175e0076eb5c783bea24f8d3..00c05da7112cbe43c3d03bf6b4e2d61b0bc154f3 100644 (file)
@@ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
 {
        return ((addr & ~(THREAD_SIZE - 1))  ==
                (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
-               on_irq_stack(addr, sizeof(unsigned long), NULL);
+               on_irq_stack(addr, sizeof(unsigned long));
 }
 
 /**
index edf9edca205528a556a75863a9fc578095f7d181..ca56fd732c2a9fe66f9618cb6ed0f2bd5af4fbf7 100644 (file)
@@ -67,36 +67,55 @@ static inline void unwind_init_from_task(struct unwind_state *state,
        state->pc = thread_saved_pc(task);
 }
 
-/*
- * We can only safely access per-cpu stacks from current in a non-preemptible
- * context.
- */
 static bool on_accessible_stack(const struct task_struct *tsk,
                                unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
-       if (info)
-               info->type = STACK_TYPE_UNKNOWN;
+       struct stack_info tmp;
 
-       if (on_task_stack(tsk, sp, size, info))
-               return true;
-       if (tsk != current || preemptible())
-               return false;
-       if (on_irq_stack(sp, size, info))
-               return true;
-       if (on_overflow_stack(sp, size, info))
-               return true;
-
-       if (IS_ENABLED(CONFIG_VMAP_STACK) &&
-           IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) &&
-           in_nmi()) {
-               if (on_sdei_critical_stack(sp, size, info))
-                       return true;
-               if (on_sdei_normal_stack(sp, size, info))
-                       return true;
-       }
+       tmp = stackinfo_get_task(tsk);
+       if (stackinfo_on_stack(&tmp, sp, size))
+               goto found;
 
+       /*
+        * We can only safely access per-cpu stacks when unwinding the current
+        * task in a non-preemptible context.
+        */
+       if (tsk != current || preemptible())
+               goto not_found;
+
+       tmp = stackinfo_get_irq();
+       if (stackinfo_on_stack(&tmp, sp, size))
+               goto found;
+
+       tmp = stackinfo_get_overflow();
+       if (stackinfo_on_stack(&tmp, sp, size))
+               goto found;
+
+       /*
+        * We can only safely access SDEI stacks which unwinding the current
+        * task in an NMI context.
+        */
+       if (!IS_ENABLED(CONFIG_VMAP_STACK) ||
+           !IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) ||
+           !in_nmi())
+               goto not_found;
+
+       tmp = stackinfo_get_sdei_normal();
+       if (stackinfo_on_stack(&tmp, sp, size))
+               goto found;
+
+       tmp = stackinfo_get_sdei_critical();
+       if (stackinfo_on_stack(&tmp, sp, size))
+               goto found;
+
+not_found:
+       *info = stackinfo_get_unknown();
        return false;
+
+found:
+       *info = tmp;
+       return true;
 }
 
 /*
index 579b46aa9a553358439eaf40a89fb788b99b73c3..5da0d44f61b73b6ca8a31d846e234b1f5dffe339 100644 (file)
@@ -39,34 +39,51 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
 
 DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
 
-static bool on_overflow_stack(unsigned long sp, unsigned long size,
-                             struct stack_info *info)
+static struct stack_info stackinfo_get_overflow(void)
 {
        unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
        unsigned long high = low + OVERFLOW_STACK_SIZE;
 
-       return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+               .type = STACK_TYPE_OVERFLOW,
+       };
 }
 
-static bool on_hyp_stack(unsigned long sp, unsigned long size,
-                             struct stack_info *info)
+static struct stack_info stackinfo_get_hyp(void)
 {
        struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
        unsigned long high = params->stack_hyp_va;
        unsigned long low = high - PAGE_SIZE;
 
-       return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+               .type = STACK_TYPE_HYP,
+       };
 }
 
 static bool on_accessible_stack(const struct task_struct *tsk,
                                unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
-       if (info)
-               info->type = STACK_TYPE_UNKNOWN;
+       struct stack_info tmp;
 
-       return (on_overflow_stack(sp, size, info) ||
-               on_hyp_stack(sp, size, info));
+       tmp = stackinfo_get_overflow();
+       if (stackinfo_on_stack(&tmp, sp, size))
+               goto found;
+
+       tmp = stackinfo_get_hyp();
+       if (stackinfo_on_stack(&tmp, sp, size))
+               goto found;
+
+       *info = stackinfo_get_unknown();
+       return false;
+
+found:
+       *info = tmp;
+       return true;
 }
 
 static int unwind_next(struct unwind_state *state)
index b69c18a26567d1cdd95774fd99a36d45b163282f..26927344a263298ce0640bdeebf73631850db75f 100644 (file)
@@ -62,37 +62,54 @@ static bool kvm_nvhe_stack_kern_va(unsigned long *addr,
        return true;
 }
 
-static bool on_overflow_stack(unsigned long sp, unsigned long size,
-                             struct stack_info *info)
+static struct stack_info stackinfo_get_overflow(void)
 {
        struct kvm_nvhe_stacktrace_info *stacktrace_info
                                = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
        unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
        unsigned long high = low + OVERFLOW_STACK_SIZE;
 
-       return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+               .type = STACK_TYPE_OVERFLOW,
+       };
 }
 
-static bool on_hyp_stack(unsigned long sp, unsigned long size,
-                        struct stack_info *info)
+static struct stack_info stackinfo_get_hyp(void)
 {
        struct kvm_nvhe_stacktrace_info *stacktrace_info
                                = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
        unsigned long low = (unsigned long)stacktrace_info->stack_base;
        unsigned long high = low + PAGE_SIZE;
 
-       return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+               .type = STACK_TYPE_HYP,
+       };
 }
 
 static bool on_accessible_stack(const struct task_struct *tsk,
                                unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
-       if (info)
-               info->type = STACK_TYPE_UNKNOWN;
+       struct stack_info tmp;
 
-       return (on_overflow_stack(sp, size, info) ||
-               on_hyp_stack(sp, size, info));
+       tmp = stackinfo_get_overflow();
+       if (stackinfo_on_stack(&tmp, sp, size))
+               goto found;
+
+       tmp = stackinfo_get_hyp();
+       if (stackinfo_on_stack(&tmp, sp, size))
+               goto found;
+
+       *info = stackinfo_get_unknown();
+       return false;
+
+found:
+       *info = tmp;
+       return true;
 }
 
 static int unwind_next(struct unwind_state *state)