int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 {
-       int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
+       int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
        int nr_threads = perf_thread_map__nr(evlist->threads);
        int nfds = 0;
        struct perf_evsel *evsel;
               int idx, struct perf_mmap_param *mp, int cpu_idx,
               int thread, int *_output, int *_output_overwrite)
 {
-       struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->user_requested_cpus, cpu_idx);
+       struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
        struct perf_evsel *evsel;
        int revent;
 
             struct perf_mmap_param *mp)
 {
        int nr_threads = perf_thread_map__nr(evlist->threads);
-       int nr_cpus    = perf_cpu_map__nr(evlist->user_requested_cpus);
+       int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
        int cpu, thread;
 
        for (cpu = 0; cpu < nr_cpus; cpu++) {
 {
        int nr_mmaps;
 
-       nr_mmaps = perf_cpu_map__nr(evlist->user_requested_cpus);
-       if (perf_cpu_map__empty(evlist->user_requested_cpus))
+       nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
+       if (perf_cpu_map__empty(evlist->all_cpus))
                nr_mmaps = perf_thread_map__nr(evlist->threads);
 
        return nr_mmaps;
                          struct perf_mmap_param *mp)
 {
        struct perf_evsel *evsel;
-       const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
+       const struct perf_cpu_map *cpus = evlist->all_cpus;
 
        if (!ops || !ops->get || !ops->mmap)
                return -EINVAL;
 
        }
 }
 
+static bool evlist__per_thread(struct evlist *evlist)
+{
+       return cpu_map__is_dummy(evlist->core.user_requested_cpus);
+}
+
 static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist)
 {
        int m, tm, nr_mmaps = evlist->core.nr_mmaps;
        struct mmap *mmap = evlist->mmap;
        struct mmap *overwrite_mmap = evlist->overwrite_mmap;
-       struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
+       struct perf_cpu_map *cpus = evlist->core.all_cpus;
+       bool per_thread = evlist__per_thread(evlist);
 
-       if (cpu_map__is_dummy(cpus))
+       if (per_thread)
                thread_data->nr_mmaps = nr_mmaps;
        else
                thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
                 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
 
        for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
-               if (cpu_map__is_dummy(cpus) ||
+               if (per_thread ||
                    test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
                        if (thread_data->maps) {
                                thread_data->maps[tm] = &mmap[m];
                return err;
        }
 
-       err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.user_requested_cpus,
+       err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
                                             process_synthesized_event, NULL);
        if (err < 0) {
                pr_err("Couldn't synthesize cpu map.\n");
 static int record__init_thread_masks(struct record *rec)
 {
        int ret = 0;
-       struct perf_cpu_map *cpus = rec->evlist->core.user_requested_cpus;
+       struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
 
        if (!record__threads_enabled(rec))
                return record__init_thread_default_masks(rec, cpus);
 
-       if (cpu_map__is_dummy(cpus)) {
+       if (evlist__per_thread(rec->evlist)) {
                pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
                return -EINVAL;
        }