goto out_delete_filters;
        }
 
-       ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
+       ret = evlist__create_maps(ftrace.evlist, &ftrace.target);
        if (ret < 0)
                goto out_delete_evlist;
 
 
                goto out;
        }
 
-       if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
+       if (evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
                usage_with_options(live_usage, live_options);
 
        /*
 
        rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
 
        err = -ENOMEM;
-       if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
+       if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
                usage_with_options(record_usage, record_options);
 
        err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
 
        if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
                target.per_thread = true;
 
-       if (perf_evlist__create_maps(evsel_list, &target) < 0) {
+       if (evlist__create_maps(evsel_list, &target) < 0) {
                if (target__has_task(&target)) {
                        pr_err("Problems finding threads of monitor\n");
                        parse_options_usage(stat_usage, stat_options, "p", 1);
 
        if (target__none(target))
                target->system_wide = true;
 
-       if (perf_evlist__create_maps(top.evlist, target) < 0) {
+       if (evlist__create_maps(top.evlist, target) < 0) {
                ui__error("Couldn't create thread/CPU maps: %s\n",
                          errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
                goto out_delete_evlist;
 
        if (trace->cgroup)
                evlist__set_default_cgroup(trace->evlist, trace->cgroup);
 
-       err = perf_evlist__create_maps(evlist, &trace->opts.target);
+       err = evlist__create_maps(evlist, &trace->opts.target);
        if (err < 0) {
                fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
                goto out_delete_evlist;
 
                return TEST_FAIL;
        }
 
-       err = perf_evlist__create_maps(evlist, &opts.target);
+       err = evlist__create_maps(evlist, &opts.target);
        if (err < 0) {
                pr_debug("Not enough memory to create thread/cpu maps\n");
                goto out_delete_evlist;
 
                return TEST_FAIL;
        }
 
-       err = perf_evlist__create_maps(evlist, &opts.target);
+       err = evlist__create_maps(evlist, &opts.target);
        if (err < 0) {
                pr_debug("Not enough memory to create thread/cpu maps\n");
                goto out_delete_evlist;
 
 
        pr_debug("attaching to spawned child, enable on exec\n");
 
-       err = perf_evlist__create_maps(evlist, &target);
+       err = evlist__create_maps(evlist, &target);
        if (err < 0) {
                pr_debug("Not enough memory to create thread/cpu maps\n");
                return err;
 
 
        evlist__add(evlist, evsel);
 
-       err = perf_evlist__create_maps(evlist, &opts.target);
+       err = evlist__create_maps(evlist, &opts.target);
        if (err < 0) {
-               pr_debug("%s: perf_evlist__create_maps\n", __func__);
+               pr_debug("%s: evlist__create_maps\n", __func__);
                goto out_delete_evlist;
        }
 
 
         * evlist__prepare_workload we'll fill in the only thread
         * we're monitoring, the one forked there.
         */
-       err = perf_evlist__create_maps(evlist, &opts.target);
+       err = evlist__create_maps(evlist, &opts.target);
        if (err < 0) {
                pr_debug("Not enough memory to create thread/cpu maps\n");
                goto out_delete_evlist;
 
        return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
 }
 
-int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
+int evlist__create_maps(struct evlist *evlist, struct target *target)
 {
        bool all_threads = (target->per_thread && target->system_wide);
        struct perf_cpu_map *cpus;
        }
 }
 
-static int perf_evlist__create_syswide_maps(struct evlist *evlist)
+static int evlist__create_syswide_maps(struct evlist *evlist)
 {
        struct perf_cpu_map *cpus;
        struct perf_thread_map *threads;
         * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
         */
        if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
-               err = perf_evlist__create_syswide_maps(evlist);
+               err = evlist__create_syswide_maps(evlist);
                if (err < 0)
                        goto out_err;
        }
 
 void perf_evlist__set_selected(struct evlist *evlist,
                               struct evsel *evsel);
 
-int perf_evlist__create_maps(struct evlist *evlist, struct target *target);
+int evlist__create_maps(struct evlist *evlist, struct target *target);
 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
 
 void __evlist__set_leader(struct list_head *list);
 
        if (!evlist)
                return 0;
 
-       if (perf_evlist__create_maps(evlist, target))
+       if (evlist__create_maps(evlist, target))
                goto out_delete_evlist;
 
        if (evlist->core.nr_entries > 1) {