for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
                if (cpu_map__is_dummy(cpus) ||
-                   test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
+                   test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
                        if (thread_data->maps) {
                                thread_data->maps[tm] = &mmap[m];
                                pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
 
 static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
 {
-       int c;
+       struct perf_cpu cpu;
+       int idx;
 
        if (cpu_map__is_dummy(cpus))
                return;
 
-       for (c = 0; c < cpus->nr; c++)
-               set_bit(cpus->map[c].cpu, mask->bits);
+       perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+               set_bit(cpu.cpu, mask->bits);
 }
 
 static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
        pr_debug("nr_threads: %d\n", rec->nr_threads);
 
        for (t = 0; t < rec->nr_threads; t++) {
-               set_bit(cpus->map[t].cpu, rec->thread_masks[t].maps.bits);
-               set_bit(cpus->map[t].cpu, rec->thread_masks[t].affinity.bits);
+               set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
+               set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
                if (verbose) {
                        pr_debug("thread_masks[%d]: ", t);
                        mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
 
        struct bpf_link *link;
        struct evsel *evsel;
        struct cgroup *cgrp, *leader_cgrp;
-       __u32 i, cpu;
-       __u32 nr_cpus = evlist->core.all_cpus->nr;
+       int i, j;
+       struct perf_cpu cpu;
        int total_cpus = cpu__max_cpu().cpu;
        int map_size, map_fd;
        int prog_fd, err;
                goto out;
        }
 
-       for (i = 0; i < nr_cpus; i++) {
+       perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
                link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
-                                                     FD(cgrp_switch, i));
+                                                     FD(cgrp_switch, cpu.cpu));
                if (IS_ERR(link)) {
                        pr_err("Failed to attach cgroup program\n");
                        err = PTR_ERR(link);
                        }
 
                        map_fd = bpf_map__fd(skel->maps.events);
-                       for (cpu = 0; cpu < nr_cpus; cpu++) {
-                               int fd = FD(evsel, cpu);
-                               __u32 idx = evsel->core.idx * total_cpus +
-                                       evlist->core.all_cpus->map[cpu].cpu;
+                       perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
+                               int fd = FD(evsel, cpu.cpu);
+                               __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
 
                                err = bpf_map_update_elem(map_fd, &idx, &fd,
                                                          BPF_ANY);
  */
 static int bperf_cgrp__sync_counters(struct evlist *evlist)
 {
-       int i, cpu;
-       int nr_cpus = evlist->core.all_cpus->nr;
+       struct perf_cpu cpu;
+       int idx;
        int prog_fd = bpf_program__fd(skel->progs.trigger_read);
 
-       for (i = 0; i < nr_cpus; i++) {
-               cpu = evlist->core.all_cpus->map[i].cpu;
-               bperf_trigger_reading(prog_fd, cpu);
-       }
+       perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
+               bperf_trigger_reading(prog_fd, cpu.cpu);
 
        return 0;
 }
 static int bperf_cgrp__read(struct evsel *evsel)
 {
        struct evlist *evlist = evsel->evlist;
-       int i, cpu, nr_cpus = evlist->core.all_cpus->nr;
        int total_cpus = cpu__max_cpu().cpu;
        struct perf_counts_values *counts;
        struct bpf_perf_event_value *values;
        int reading_map_fd, err = 0;
-       __u32 idx;
 
        if (evsel->core.idx)
                return 0;
        reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
 
        evlist__for_each_entry(evlist, evsel) {
-               idx = evsel->core.idx;
+               __u32 idx = evsel->core.idx;
+               int i;
+               struct perf_cpu cpu;
+
                err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
                if (err) {
                        pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
                        goto out;
                }
 
-               for (i = 0; i < nr_cpus; i++) {
-                       cpu = evlist->core.all_cpus->map[i].cpu;
-
+               perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
                        counts = perf_counts(evsel->counts, i, 0);
-                       counts->val = values[cpu].counter;
-                       counts->ena = values[cpu].enabled;
-                       counts->run = values[cpu].running;
+                       counts->val = values[cpu.cpu].counter;
+                       counts->ena = values[cpu.cpu].enabled;
+                       counts->run = values[cpu.cpu].running;
                }
        }