perf: arm64: Add SVE vector granule register to user regs
authorJames Clark <james.clark@arm.com>
Thu, 1 Sep 2022 13:26:57 +0000 (14:26 +0100)
committerWill Deacon <will@kernel.org>
Thu, 22 Sep 2022 14:06:02 +0000 (15:06 +0100)
Dwarf based unwinding in a function that pushes SVE registers onto
the stack requires the unwinder to know the length of the SVE register
to calculate the stack offsets correctly. This was added to the Arm
specific Dwarf spec as the VG pseudo register[1].

Add the vector length at position 46 if it's requested by userspace and
SVE is supported. If it's not supported then fail to open the event.

The vector length must be on each sample because it can be changed
at runtime via a prctl or ptrace call. Also by adding it as a register
rather than a separate attribute, minimal changes will be required in an
unwinder that already indexes into the register list.

[1]: https://github.com/ARM-software/abi-aa/blob/main/aadwarf64/aadwarf64.rst

Reviewed-by: Mark Brown <broonie@kernel.org>
Signed-off-by: James Clark <james.clark@arm.com>
Link: https://lore.kernel.org/r/20220901132658.1024635-2-james.clark@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/uapi/asm/perf_regs.h
arch/arm64/kernel/perf_regs.c
drivers/perf/arm_pmu.c

index d54daafa89e3ff21782fff617d52d89b9dd388b6..0d4b40c78e47f21919b2a2d2e04e3110dfe06c3b 100644 (file)
@@ -37,5 +37,12 @@ enum perf_event_arm_regs {
        PERF_REG_ARM64_SP,
        PERF_REG_ARM64_PC,
        PERF_REG_ARM64_MAX,
+
+       /* Extended/pseudo registers */
+       PERF_REG_ARM64_VG = 46, // SVE Vector Granule
+       PERF_REG_ARM64_EXTENDED_MAX
 };
+
+#define PERF_REG_EXTENDED_MASK (1ULL << PERF_REG_ARM64_VG)
+
 #endif /* _ASM_ARM64_PERF_REGS_H */
index f6f58e6265df89d8c1ce7a49ff052728cbdb9c8c..b4eece3eb17d0336d4a735579f8a958c25198f96 100644 (file)
@@ -9,9 +9,27 @@
 #include <asm/perf_regs.h>
 #include <asm/ptrace.h>
 
+static u64 perf_ext_regs_value(int idx)
+{
+       switch (idx) {
+       case PERF_REG_ARM64_VG:
+               if (WARN_ON_ONCE(!system_supports_sve()))
+                       return 0;
+
+               /*
+                * Vector granule is current length in bits of SVE registers
+                * divided by 64.
+                */
+               return (task_get_sve_vl(current) * 8) / 64;
+       default:
+               WARN_ON_ONCE(true);
+               return 0;
+       }
+}
+
 u64 perf_reg_value(struct pt_regs *regs, int idx)
 {
-       if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
+       if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_EXTENDED_MAX))
                return 0;
 
        /*
@@ -51,6 +69,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
        if ((u32)idx == PERF_REG_ARM64_PC)
                return regs->pc;
 
+       if ((u32)idx >= PERF_REG_ARM64_MAX)
+               return perf_ext_regs_value(idx);
+
        return regs->regs[idx];
 }
 
@@ -58,7 +79,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
 
 int perf_reg_validate(u64 mask)
 {
-       if (!mask || mask & REG_RESERVED)
+       u64 reserved_mask = REG_RESERVED;
+
+       if (system_supports_sve())
+               reserved_mask &= ~(1ULL << PERF_REG_ARM64_VG);
+
+       if (!mask || mask & reserved_mask)
                return -EINVAL;
 
        return 0;
index 59d3980b8ca2a201d3e1510468364104ec75da63..3f07df5a7e950af47545b45718303460a28438ca 100644 (file)
@@ -894,7 +894,7 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
                 * pmu::filter_match callback and pmu::event_init group
                 * validation).
                 */
-               .capabilities   = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
+               .capabilities   = PERF_PMU_CAP_HETEROGENEOUS_CPUS | PERF_PMU_CAP_EXTENDED_REGS,
        };
 
        pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =