sprintf(pmu->name, "uncore_%s", type->name);
                else
                        sprintf(pmu->name, "uncore");
-       } else
-               sprintf(pmu->name, "uncore_%s_%d", type->name, pmu->pmu_idx);
-
+       } else {
+               /*
+                * Use the box ID from the discovery table if applicable.
+                */
+               sprintf(pmu->name, "uncore_%s_%d", type->name,
+                       type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx);
+       }
 }
 
 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
        void    (*cpu_init)(void);
        int     (*pci_init)(void);
        void    (*mmio_init)(void);
+       bool    use_discovery;
 };
 
 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
        .mmio_init = snr_uncore_mmio_init,
 };
 
+static const struct intel_uncore_init_fun spr_uncore_init __initconst = {
+       .cpu_init = spr_uncore_cpu_init,
+       .pci_init = spr_uncore_pci_init,
+       .mmio_init = spr_uncore_mmio_init,
+       .use_discovery = true,
+};
+
 static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
        .cpu_init = intel_uncore_generic_uncore_cpu_init,
        .pci_init = intel_uncore_generic_uncore_pci_init,
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &rkl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &adl_uncore_init),
+       X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &spr_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &snr_uncore_init),
        {},
 };
                        uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init;
                else
                        return -ENODEV;
-       } else
+       } else {
                uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
+               if (uncore_no_discover && uncore_init->use_discovery)
+                       return -ENODEV;
+               if (uncore_init->use_discovery && !intel_uncore_has_discovery_tables())
+                       return -ENODEV;
+       }
 
        if (uncore_init->pci_init) {
                pret = uncore_init->pci_init();
 
 // SPDX-License-Identifier: GPL-2.0
 /* SandyBridge-EP/IvyTown uncore support */
 #include "uncore.h"
+#include "uncore_discovery.h"
 
 /* SNB-EP pci bus to socket mapping */
 #define SNBEP_CPUNODEID                        0x40
 }
 
 /* end of ICX uncore support */
+
+/* SPR uncore support */
+
+#define UNCORE_SPR_NUM_UNCORE_TYPES            12
+
+static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+};
+
+static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
+                                       struct intel_uncore_type *from_type)
+{
+       if (!to_type || !from_type)
+               return;
+
+       if (from_type->name)
+               to_type->name = from_type->name;
+       if (from_type->fixed_ctr_bits)
+               to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
+       if (from_type->event_mask)
+               to_type->event_mask = from_type->event_mask;
+       if (from_type->event_mask_ext)
+               to_type->event_mask_ext = from_type->event_mask_ext;
+       if (from_type->fixed_ctr)
+               to_type->fixed_ctr = from_type->fixed_ctr;
+       if (from_type->fixed_ctl)
+               to_type->fixed_ctl = from_type->fixed_ctl;
+       if (from_type->fixed_ctr_bits)
+               to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
+       if (from_type->num_shared_regs)
+               to_type->num_shared_regs = from_type->num_shared_regs;
+       if (from_type->constraints)
+               to_type->constraints = from_type->constraints;
+       if (from_type->ops)
+               to_type->ops = from_type->ops;
+       if (from_type->event_descs)
+               to_type->event_descs = from_type->event_descs;
+       if (from_type->format_group)
+               to_type->format_group = from_type->format_group;
+}
+
+static struct intel_uncore_type **
+uncore_get_uncores(enum uncore_access_type type_id)
+{
+       struct intel_uncore_type **types, **start_types;
+
+       start_types = types = intel_uncore_generic_init_uncores(type_id);
+
+       /* Only copy the customized features */
+       for (; *types; types++) {
+               if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
+                       continue;
+               uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
+       }
+
+       return start_types;
+}
+
+void spr_uncore_cpu_init(void)
+{
+       uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR);
+}
+
+int spr_uncore_pci_init(void)
+{
+       uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI);
+       return 0;
+}
+
+void spr_uncore_mmio_init(void)
+{
+       uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO);
+}
+
+/* end of SPR uncore support */