}
 
 /*
- * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
+ * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
  * we asked by setting its exec_error to the function below,
  * ftrace__workload_exec_failed_signal.
  *
        if (write_tracing_file("trace", "0") < 0)
                goto out;
 
-       if (argc && perf_evlist__prepare_workload(ftrace->evlist,
-                               &ftrace->target, argv, false,
-                               ftrace__workload_exec_failed_signal) < 0) {
+       if (argc && evlist__prepare_workload(ftrace->evlist, &ftrace->target, argv, false,
+                                            ftrace__workload_exec_failed_signal) < 0) {
                goto out;
        }
 
                }
        }
 
-       perf_evlist__start_workload(ftrace->evlist);
+       evlist__start_workload(ftrace->evlist);
 
        if (ftrace->initial_delay) {
                usleep(ftrace->initial_delay * 1000);
 
 static volatile int workload_exec_errno;
 
 /*
- * perf_evlist__prepare_workload will send a SIGUSR1
+ * evlist__prepare_workload will send a SIGUSR1
  * if the fork fails, since we asked by setting its
  * want_signal to true.
  */
        record__init_features(rec);
 
        if (forks) {
-               err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
-                                                   argv, data->is_pipe,
-                                                   workload_exec_failed_signal);
+               err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
+                                              workload_exec_failed_signal);
                if (err < 0) {
                        pr_err("Couldn't run the workload!\n");
                        status = err;
                                                  machine);
                free(event);
 
-               perf_evlist__start_workload(rec->evlist);
+               evlist__start_workload(rec->evlist);
        }
 
        if (evlist__initialize_ctlfd(rec->evlist, opts->ctl_fd, opts->ctl_fd_ack))
  * XXX Will stay a global variable till we fix builtin-script.c to stop messing
  * with it and switch to use the library functions in perf_evlist that came
  * from builtin-record.c, i.e. use record_opts,
- * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
+ * evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
  * using pipes, etc.
  */
 static struct option __record_options[] = {
 
 static volatile int workload_exec_errno;
 
 /*
- * perf_evlist__prepare_workload will send a SIGUSR1
+ * evlist__prepare_workload will send a SIGUSR1
  * if the fork fails, since we asked by setting its
  * want_signal to true.
  */
        bool second_pass = false;
 
        if (forks) {
-               if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
-                                                 workload_exec_failed_signal) < 0) {
+               if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
                        perror("failed to prepare workload");
                        return -1;
                }
        clock_gettime(CLOCK_MONOTONIC, &ref_time);
 
        if (forks) {
-               perf_evlist__start_workload(evsel_list);
+               evlist__start_workload(evsel_list);
                enable_counters();
 
                if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
 
         * Better not use !target__has_task() here because we need to cover the
         * case where no threads were specified in the command line, but a
         * workload was, and in that case we will fill in the thread_map when
-        * we fork the workload in perf_evlist__prepare_workload.
+        * we fork the workload in evlist__prepare_workload.
         */
        if (trace->filter_pids.nr > 0) {
                err = perf_evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
        signal(SIGINT, sig_handler);
 
        if (forks) {
-               err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
-                                                   argv, false, NULL);
+               err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
                if (err < 0) {
                        fprintf(trace->output, "Couldn't run the workload!\n");
                        goto out_delete_evlist;
                evlist__enable(evlist);
 
        if (forks)
-               perf_evlist__start_workload(evlist);
+               evlist__start_workload(evlist);
 
        if (trace->opts.initial_delay) {
                usleep(trace->opts.initial_delay * 1000);
 
                return err;
        }
 
-       err = perf_evlist__prepare_workload(evlist, &target, argv, false, NULL);
+       err = evlist__prepare_workload(evlist, &target, argv, false, NULL);
        if (err < 0) {
                pr_debug("Couldn't run the workload!\n");
                return err;
                return err;
        }
 
-       return perf_evlist__start_workload(evlist) == 1 ? TEST_OK : TEST_FAIL;
+       return evlist__start_workload(evlist) == 1 ? TEST_OK : TEST_FAIL;
 }
 
 static int detach__enable_on_exec(struct evlist *evlist)
 
        /*
         * Create maps of threads and cpus to monitor. In this case
         * we start with all threads and cpus (-1, -1) but then in
-        * perf_evlist__prepare_workload we'll fill in the only thread
+        * evlist__prepare_workload we'll fill in the only thread
         * we're monitoring, the one forked there.
         */
        err = perf_evlist__create_maps(evlist, &opts.target);
 
        /*
         * Prepare the workload in argv[] to run, it'll fork it, and then wait
-        * for perf_evlist__start_workload() to exec it. This is done this way
+        * for evlist__start_workload() to exec it. This is done this way
         * so that we have time to open the evlist (calling sys_perf_event_open
         * on all the fds) and then mmap them.
         */
-       err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
+       err = evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
        if (err < 0) {
                pr_debug("Couldn't run the workload!\n");
                goto out_delete_evlist;
        /*
         * Now!
         */
-       perf_evlist__start_workload(evlist);
+       evlist__start_workload(evlist);
 
        while (1) {
                int before = total_events;
 
 }
 
 /*
- * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
+ * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
  * we asked by setting its exec_error to this handler.
  */
 static void workload_exec_failed_signal(int signo __maybe_unused,
        /*
         * Create maps of threads and cpus to monitor. In this case
         * we start with all threads and cpus (-1, -1) but then in
-        * perf_evlist__prepare_workload we'll fill in the only thread
+        * evlist__prepare_workload we'll fill in the only thread
         * we're monitoring, the one forked there.
         */
        cpus = perf_cpu_map__dummy_new();
        cpus    = NULL;
        threads = NULL;
 
-       err = perf_evlist__prepare_workload(evlist, &target, argv, false,
-                                           workload_exec_failed_signal);
+       err = evlist__prepare_workload(evlist, &target, argv, false, workload_exec_failed_signal);
        if (err < 0) {
                pr_debug("Couldn't run the workload!\n");
                goto out_delete_evlist;
                goto out_delete_evlist;
        }
 
-       perf_evlist__start_workload(evlist);
+       evlist__start_workload(evlist);
 
 retry:
        md = &evlist->mmap[0];
 
        return err;
 }
 
-int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
-                                 const char *argv[], bool pipe_output,
-                                 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
+int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
+                            bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
 {
        int child_ready_pipe[2], go_pipe[2];
        char bf;
                /*
                 * The parent will ask for the execvp() to be performed by
                 * writing exactly one byte, in workload.cork_fd, usually via
-                * perf_evlist__start_workload().
+                * evlist__start_workload().
                 *
                 * For cancelling the workload without actually running it,
                 * the parent will just close workload.cork_fd, without writing
        return -1;
 }
 
-int perf_evlist__start_workload(struct evlist *evlist)
+int evlist__start_workload(struct evlist *evlist)
 {
        if (evlist->workload.cork_fd > 0) {
                char bf = 0;
 
                         struct callchain_param *callchain);
 int record_opts__config(struct record_opts *opts);
 
-int perf_evlist__prepare_workload(struct evlist *evlist,
-                                 struct target *target,
-                                 const char *argv[], bool pipe_output,
-                                 void (*exec_error)(int signo, siginfo_t *info,
-                                                    void *ucontext));
-int perf_evlist__start_workload(struct evlist *evlist);
+int evlist__prepare_workload(struct evlist *evlist, struct target *target,
+                            const char *argv[], bool pipe_output,
+                            void (*exec_error)(int signo, siginfo_t *info, void *ucontext));
+int evlist__start_workload(struct evlist *evlist);
 
 struct option;