A subsequent patch will rework the ACPI probing of PMUs, and we'll need
to match a CPU with a known cpuid in two separate paths.
Factor out the matching logic into a helper function so that it can be
reused.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Pierre Gondois <pierre.gondois@arm.com>
Cc: Will Deacon <will@kernel.org>
Reviewed-and-tested-by: Pierre Gondois <pierre.gondois@arm.com>
Link: https://lore.kernel.org/r/20220930111844.1522365-3-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
return err;
}
-static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
+static struct arm_pmu *arm_pmu_acpi_find_pmu(void)
{
unsigned long cpuid = read_cpuid_id();
struct arm_pmu *pmu;
return pmu;
}
+ return NULL;
+}
+
+static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
+{
+ struct arm_pmu *pmu;
+
+ pmu = arm_pmu_acpi_find_pmu();
+ if (pmu)
+ return pmu;
+
pmu = armpmu_alloc_atomic();
if (!pmu) {
pr_warn("Unable to allocate PMU for CPU%d\n",
return NULL;
}
- pmu->acpi_cpuid = cpuid;
+ pmu->acpi_cpuid = read_cpuid_id();
return pmu;
}