arm_pmu: acpi: factor out PMU<->CPU association
authorMark Rutland <mark.rutland@arm.com>
Fri, 30 Sep 2022 11:18:42 +0000 (12:18 +0100)
committerWill Deacon <will@kernel.org>
Mon, 7 Nov 2022 16:16:19 +0000 (16:16 +0000)
A subsequent patch will rework the ACPI probing of PMUs, and we'll need
to associate a CPU with a PMU in two separate paths.

Factor out the association logic into a helper function so that it can
be reused.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Pierre Gondois <pierre.gondois@arm.com>
Cc: Will Deacon <will@kernel.org>
Reviewed-and-tested-by: Pierre Gondois <pierre.gondois@arm.com>
Link: https://lore.kernel.org/r/20220930111844.1522365-2-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/perf/arm_pmu_acpi.c

index 96ffadd654ff137b30a1ac5bf5520f959b93e4e8..a52a4aafd629f7bdee663a0433e0145492b5e241 100644 (file)
@@ -242,6 +242,22 @@ static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
        return true;
 }
 
+static void arm_pmu_acpi_associate_pmu_cpu(struct arm_pmu *pmu,
+                                          unsigned int cpu)
+{
+       int irq = per_cpu(pmu_irqs, cpu);
+
+       per_cpu(probed_pmus, cpu) = pmu;
+
+       if (pmu_irq_matches(pmu, irq)) {
+               struct pmu_hw_events __percpu *hw_events;
+               hw_events = pmu->hw_events;
+               per_cpu(hw_events->irq, cpu) = irq;
+       }
+
+       cpumask_set_cpu(cpu, &pmu->supported_cpus);
+}
+
 /*
  * This must run before the common arm_pmu hotplug logic, so that we can
  * associate a CPU and its interrupt before the common code tries to manage the
@@ -254,27 +270,16 @@ static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
 static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
 {
        struct arm_pmu *pmu;
-       struct pmu_hw_events __percpu *hw_events;
-       int irq;
 
        /* If we've already probed this CPU, we have nothing to do */
        if (per_cpu(probed_pmus, cpu))
                return 0;
 
-       irq = per_cpu(pmu_irqs, cpu);
-
        pmu = arm_pmu_acpi_find_alloc_pmu();
        if (!pmu)
                return -ENOMEM;
 
-       per_cpu(probed_pmus, cpu) = pmu;
-
-       if (pmu_irq_matches(pmu, irq)) {
-               hw_events = pmu->hw_events;
-               per_cpu(hw_events->irq, cpu) = irq;
-       }
-
-       cpumask_set_cpu(cpu, &pmu->supported_cpus);
+       arm_pmu_acpi_associate_pmu_cpu(pmu, cpu);
 
        /*
         * Ideally, we'd probe the PMU here when we find the first matching