{
        struct perf_pmu *pmu = NULL;
 
-       while ((pmu = perf_pmus__scan(pmu))) {
-               if (!is_pmu_core(pmu->name))
-                       continue;
-
+       while ((pmu = perf_pmus__scan_core(pmu))) {
                /*
                 * The cpumap should cover all CPUs. Otherwise, some CPUs may
                 * not support some events or have different event IDs.
 
                        continue;
                }
 
-               while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+               while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
                        struct perf_cpu_map *cpus;
                        struct evsel *evsel;
 
-                       if (!pmu->is_core)
-                               continue;
-
                        evsel = evsel__new(attrs + i);
                        if (evsel == NULL)
                                goto out_delete_partial_list;
 
                 * The same register set is supported among different hybrid PMUs.
                 * Only check the first available one.
                 */
-               while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-                       if (pmu->is_core) {
-                               type = pmu->type;
-                               break;
-                       }
+               while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+                       type = pmu->type;
+                       break;
                }
                attr.config |= type << PERF_PMU_TYPE_SHIFT;
        }
 
        int nr_aliases;
        int nr_formats;
        int nr_caps;
+       bool is_core;
 };
 
 static const struct option options[] = {
                r = results + nr_pmus;
 
                r->name = strdup(pmu->name);
+               r->is_core = pmu->is_core;
                r->nr_caps = pmu->nr_caps;
 
                r->nr_aliases = 0;
        return 0;
 }
 
-static int check_result(void)
+static int check_result(bool core_only)
 {
        struct pmu_scan_result *r;
        struct perf_pmu *pmu;
 
        for (int i = 0; i < nr_pmus; i++) {
                r = &results[i];
+               if (core_only && !r->is_core)
+                       continue;
+
                pmu = perf_pmus__find(r->name);
                if (pmu == NULL) {
                        pr_err("Cannot find PMU %s\n", r->name);
        struct timeval start, end, diff;
        double time_average, time_stddev;
        u64 runtime_us;
-       unsigned int i;
        int ret;
 
        init_stats(&stats);
                return -1;
        }
 
-       for (i = 0; i < iterations; i++) {
-               gettimeofday(&start, NULL);
-               perf_pmus__scan(NULL);
-               gettimeofday(&end, NULL);
-
-               timersub(&end, &start, &diff);
-               runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
-               update_stats(&stats, runtime_us);
-
-               ret = check_result();
-               perf_pmus__destroy();
-               if (ret < 0)
-                       break;
+       for (int j = 0; j < 2; j++) {
+               bool core_only = (j == 0);
+
+               for (unsigned int i = 0; i < iterations; i++) {
+                       gettimeofday(&start, NULL);
+                       if (core_only)
+                               perf_pmus__scan_core(NULL);
+                       else
+                               perf_pmus__scan(NULL);
+                       gettimeofday(&end, NULL);
+                       timersub(&end, &start, &diff);
+                       runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+                       update_stats(&stats, runtime_us);
+
+                       ret = check_result(core_only);
+                       perf_pmus__destroy();
+                       if (ret < 0)
+                               break;
+               }
+               time_average = avg_stats(&stats);
+               time_stddev = stddev_stats(&stats);
+               pr_info("  Average%s PMU scanning took: %.3f usec (+- %.3f usec)\n",
+                       core_only ? " core" : "", time_average, time_stddev);
        }
-
-       time_average = avg_stats(&stats);
-       time_stddev = stddev_stats(&stats);
-       pr_info("  Average PMU scanning took: %.3f usec (+- %.3f usec)\n",
-               time_average, time_stddev);
-
        delete_result();
        return 0;
 }
 
        struct perf_pmu *pmu = NULL;
        unsigned long i;
 
-       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+       while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
                int count = 0;
 
-               if (!is_pmu_core(pmu->name))
-                       continue;
-
                if (list_empty(&pmu->format)) {
                        pr_debug2("skipping testing core PMU %s\n", pmu->name);
                        continue;
 
        if (!perf_pmus__has_hybrid())
                return NULL;
 
-       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-               if (pmu->is_core)
-                       nr++;
-       }
+       while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
+               nr++;
+
        if (nr == 0)
                return NULL;
 
                return NULL;
 
        tp->nr = nr;
-       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-               if (!pmu->is_core)
-                       continue;
-
+       while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
                if (load_hybrid_node(&tp->nodes[i], pmu)) {
                        hybrid_topology__delete(tp);
                        return NULL;
 
         */
        if (perf_pmus__has_hybrid()) {
                pmu = NULL;
-               while ((pmu = perf_pmus__scan(pmu))) {
-                       if (!pmu->is_core)
-                               continue;
-
+               while ((pmu = perf_pmus__scan_core(pmu))) {
                        ret = __write_pmu_caps(ff, pmu, true);
                        if (ret < 0)
                                return ret;
 
                } else {
                        struct perf_pmu *pmu = NULL;
 
-                       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-                               if (!pmu->is_core)
-                                       continue;
-
+                       while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
                                scnprintf(sysfs_name, sizeof(sysfs_name),
                                          e->sysfs_name, pmu->name);
                                e->supported |= perf_mem_event__supported(mnt, sysfs_name);
        char sysfs_name[100];
        struct perf_pmu *pmu = NULL;
 
-       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-               if (!pmu->is_core)
-                       continue;
-
+       while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
                scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
                          pmu->name);
                if (!perf_mem_event__supported(mnt, sysfs_name)) {
                                return -1;
                        }
 
-                       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-                               if (!pmu->is_core)
-                                       continue;
+                       while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
                                rec_argv[i++] = "-e";
                                s = perf_mem_events__name(j, pmu->name);
                                if (s) {
 
        const char *config_name = get_config_name(head_config);
        const char *metric_id = get_config_metric_id(head_config);
 
-       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+       /* Legacy cache events are only supported by core PMUs. */
+       while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
                LIST_HEAD(config_terms);
                struct perf_event_attr attr;
                int ret;
 
-               /* Skip unsupported PMUs. */
-               if (!perf_pmu__supports_legacy_cache(pmu))
-                       continue;
-
                if (parse_events__filter_pmu(parse_state, pmu))
                        continue;
 
                return __parse_events_add_numeric(parse_state, list, /*pmu=*/NULL,
                                                  type, config, head_config);
 
-       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+       /* Wildcards on numeric values are only supported by core PMUs. */
+       while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
                int ret;
 
-               if (!perf_pmu__supports_wildcard_numeric(pmu))
-                       continue;
-
                if (parse_events__filter_pmu(parse_state, pmu))
                        continue;
 
 
        return pmu->is_core;
 }
 
-bool perf_pmu__supports_wildcard_numeric(const struct perf_pmu *pmu)
-{
-       return pmu->is_core;
-}
-
 bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu)
 {
        return !is_pmu_hybrid(pmu->name);
 }
 
-bool perf_pmu__is_mem_pmu(const struct perf_pmu *pmu)
-{
-       return pmu->is_core;
-}
-
 bool perf_pmu__have_event(const struct perf_pmu *pmu, const char *name)
 {
        struct perf_pmu_alias *alias;
 
 bool is_pmu_core(const char *name);
 bool is_pmu_hybrid(const char *name);
 bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu);
-bool perf_pmu__supports_wildcard_numeric(const struct perf_pmu *pmu);
 bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu);
-bool perf_pmu__is_mem_pmu(const struct perf_pmu *pmu);
 bool perf_pmu__have_event(const struct perf_pmu *pmu, const char *name);
 
 FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name);
 
 }
 
 /* Add all pmus in sysfs to pmu list: */
-static void pmu_read_sysfs(void)
+static void pmu_read_sysfs(bool core_only)
 {
        int fd;
        DIR *dir;
        while ((dent = readdir(dir))) {
                if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
                        continue;
+               if (core_only && !is_pmu_core(dent->d_name))
+                       continue;
                /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
                perf_pmu__find2(fd, dent->d_name);
        }
        bool use_core_pmus = !pmu || pmu->is_core;
 
        if (!pmu) {
-               pmu_read_sysfs();
+               pmu_read_sysfs(/*core_only=*/false);
                pmu = list_prepare_entry(pmu, &core_pmus, list);
        }
        if (use_core_pmus) {
        return NULL;
 }
 
+struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
+{
+       if (!pmu) {
+               pmu_read_sysfs(/*core_only=*/true);
+               pmu = list_prepare_entry(pmu, &core_pmus, list);
+       }
+       list_for_each_entry_continue(pmu, &core_pmus, list)
+               return pmu;
+
+       return NULL;
+}
+
 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
 {
        struct perf_pmu *pmu = NULL;
        struct perf_pmu *pmu = NULL;
        int count = 0;
 
-       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-               if (perf_pmu__is_mem_pmu(pmu))
-                       count++;
-       }
+       /* All core PMUs are for mem events. */
+       while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
+               count++;
+
        return count;
 }
 
        if (!hybrid_scanned) {
                struct perf_pmu *pmu = NULL;
 
-               while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-                       if (pmu->is_core && is_pmu_hybrid(pmu->name)) {
+               while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+                       if (is_pmu_hybrid(pmu->name)) {
                                has_hybrid = true;
                                break;
                        }
 
 struct perf_pmu *perf_pmus__find_by_type(unsigned int type);
 
 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu);
+struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu);
 
 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str);
 
 
        struct perf_pmu *pmu = NULL;
        const char *event_type_descriptor = event_type_descriptors[PERF_TYPE_HW_CACHE];
 
-       while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-               /*
-                * Skip uncore PMUs for performance. PERF_TYPE_HW_CACHE type
-                * attributes can accept software PMUs in the extended type, so
-                * also skip.
-                */
+       /*
+        * Only print core PMUs, skipping uncore for performance and
+        * PERF_TYPE_SOFTWARE that can succeed in opening legacy cache evenst.
+        */
+       while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
                if (pmu->is_uncore || pmu->type == PERF_TYPE_SOFTWARE)
                        continue;