libperf cpumap: Rename perf_cpu_map__empty() to perf_cpu_map__has_any_cpu_or_is_empty()
authorIan Rogers <irogers@google.com>
Wed, 29 Nov 2023 06:02:00 +0000 (22:02 -0800)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 12 Dec 2023 17:55:13 +0000 (14:55 -0300)
The name perf_cpu_map_empty is misleading as true is also returned
when the map contains an "any" CPU (aka dummy) map.

Rename to perf_cpu_map__has_any_cpu_or_is_empty(), later changes will
(re)introduce perf_cpu_map__empty() and perf_cpu_map__has_any_cpu().

Reviewed-by: James Clark <james.clark@arm.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Andrew Jones <ajones@ventanamicro.com>
Cc: André Almeida <andrealmeid@igalia.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Atish Patra <atishp@rivosinc.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paran Lee <p4ranlee@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Steinar H. Gunderson <sesse@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Yang Li <yang.lee@linux.alibaba.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Cc: bpf@vger.kernel.org
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20231129060211.1890454-4-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
14 files changed:
tools/lib/perf/Documentation/libperf.txt
tools/lib/perf/cpumap.c
tools/lib/perf/evlist.c
tools/lib/perf/include/perf/cpumap.h
tools/lib/perf/libperf.map
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/builtin-c2c.c
tools/perf/builtin-stat.c
tools/perf/util/auxtrace.c
tools/perf/util/record.c
tools/perf/util/stat.c

index a256a26598b040c73752091e31ab4505fa020ce9..fcfb9499ef9cdfbdfa7903c13f32a060b49e19c2 100644 (file)
@@ -46,7 +46,7 @@ SYNOPSIS
   void perf_cpu_map__put(struct perf_cpu_map *map);
   int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
   int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
-  bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+  bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
   int perf_cpu_map__max(struct perf_cpu_map *map);
   bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
 
index 3aa80d0d26e86949e9b37706654ff1f4c29bf3e5..4adcd7920d033dfa5f99ec6c4f9bcb3d7a7d2d10 100644 (file)
@@ -311,7 +311,7 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
        return cpus ? __perf_cpu_map__nr(cpus) : 1;
 }
 
-bool perf_cpu_map__empty(const struct perf_cpu_map *map)
+bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
 {
        return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
 }
index 3acbbccc19019c4baa11f82ea253daacd73f7603..75f36218fdd98ed53aa4e4f5bcfff579f64623ec 100644 (file)
@@ -619,7 +619,7 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
 
        /* One for each CPU */
        nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
-       if (perf_cpu_map__empty(evlist->all_cpus)) {
+       if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
                /* Plus one for each thread */
                nr_mmaps += perf_thread_map__nr(evlist->threads);
                /* Minus the per-thread CPU (-1) */
@@ -653,7 +653,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
        if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
                return -ENOMEM;
 
-       if (perf_cpu_map__empty(cpus))
+       if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                return mmap_per_thread(evlist, ops, mp);
 
        return mmap_per_cpu(evlist, ops, mp);
index b24bd8b8f34e1c32758a6eb3765ab409949f842b..9cf361fc5edcb4a8c3648efefd4eaa2ec1395d34 100644 (file)
@@ -47,9 +47,9 @@ LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
 LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
 LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
 /**
- * perf_cpu_map__empty - is map either empty or the "any CPU"/dummy value.
+ * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value.
  */
-LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+LIBPERF_API bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
 LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
 LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
 LIBPERF_API bool perf_cpu_map__equal(const struct perf_cpu_map *lhs,
index 8a71f841498e4960a32347d02d750c8bde8291d6..10b3f372264264ff35e77e529ac3387836ba09e0 100644 (file)
@@ -9,7 +9,7 @@ LIBPERF_0.0.1 {
                perf_cpu_map__read;
                perf_cpu_map__nr;
                perf_cpu_map__cpu;
-               perf_cpu_map__empty;
+               perf_cpu_map__has_any_cpu_or_is_empty;
                perf_cpu_map__max;
                perf_cpu_map__has;
                perf_thread_map__new_array;
index 2cf873d71dff03730e62b31c299ed90c2c0a975e..c6b7b3066324ee2853b63de1f61585aefa8f21fe 100644 (file)
@@ -211,7 +211,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
                 * program can run on any CPUs in this case, thus don't skip
                 * validation.
                 */
-               if (!perf_cpu_map__empty(event_cpus) &&
+               if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus) &&
                    !perf_cpu_map__has(event_cpus, cpu))
                        continue;
 
@@ -435,7 +435,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
         * Also the case of per-cpu mmaps, need the contextID in order to be notified
         * when a context switch happened.
         */
-       if (!perf_cpu_map__empty(cpus)) {
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
                                           "timestamp", 1);
                evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
@@ -461,7 +461,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
        evsel->core.attr.sample_period = 1;
 
        /* In per-cpu case, always need the time of mmap events etc */
-       if (!perf_cpu_map__empty(cpus))
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                evsel__set_sample_bit(evsel, TIME);
 
        err = cs_etm_validate_config(itr, cs_etm_evsel);
@@ -539,7 +539,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
        struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
 
        /* cpu map is not empty, we have specific CPUs to work with */
-       if (!perf_cpu_map__empty(event_cpus)) {
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
                for (i = 0; i < cpu__max_cpu().cpu; i++) {
                        struct perf_cpu cpu = { .cpu = i, };
 
@@ -814,7 +814,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
                return -EINVAL;
 
        /* If the cpu_map is empty all online CPUs are involved */
-       if (perf_cpu_map__empty(event_cpus)) {
+       if (perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
                cpu_map = online_cpus;
        } else {
                /* Make sure all specified CPUs are online */
index e3acc739bd0027b214a4aa5296e81bfcac3afba7..51ccbfd3d246d484400c9a83220efaa8833f9f95 100644 (file)
@@ -232,7 +232,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
         * In the case of per-cpu mmaps, sample CPU for AUX event;
         * also enable the timestamp tracing for samples correlation.
         */
-       if (!perf_cpu_map__empty(cpus)) {
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                evsel__set_sample_bit(arm_spe_evsel, CPU);
                evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel,
                                           "ts_enable", 1);
@@ -265,7 +265,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
        tracking_evsel->core.attr.sample_period = 1;
 
        /* In per-cpu case, always need the time of mmap events etc */
-       if (!perf_cpu_map__empty(cpus)) {
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                evsel__set_sample_bit(tracking_evsel, TIME);
                evsel__set_sample_bit(tracking_evsel, CPU);
 
index d2c8cac1147021dbf51185db50409d1c078bf9e9..af8ae4647585b460c5f3ef381cfea29f3fa966bd 100644 (file)
@@ -143,7 +143,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
        if (!opts->full_auxtrace)
                return 0;
 
-       if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) {
+       if (opts->full_auxtrace && !perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
                return -EINVAL;
        }
@@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
                 * In the case of per-cpu mmaps, we need the CPU on the
                 * AUX event.
                 */
-               if (!perf_cpu_map__empty(cpus))
+               if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                        evsel__set_sample_bit(intel_bts_evsel, CPU);
        }
 
index fa0c718b9e7277f0374356bf5d46b603f19ed7ca..d199619df3abe1b22c70fbfa1eea485eb095ae6f 100644 (file)
@@ -369,7 +369,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
                        ui__warning("Intel Processor Trace: TSC not available\n");
        }
 
-       per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
+       per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus);
 
        auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
        auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
@@ -774,7 +774,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
         * Per-cpu recording needs sched_switch events to distinguish different
         * threads.
         */
-       if (have_timing_info && !perf_cpu_map__empty(cpus) &&
+       if (have_timing_info && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
            !record_opts__no_switch_events(opts)) {
                if (perf_can_record_switch_events()) {
                        bool cpu_wide = !target__none(&opts->target) &&
@@ -832,7 +832,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                 * In the case of per-cpu mmaps, we need the CPU on the
                 * AUX event.
                 */
-               if (!perf_cpu_map__empty(cpus))
+               if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                        evsel__set_sample_bit(intel_pt_evsel, CPU);
        }
 
@@ -858,7 +858,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                        tracking_evsel->immediate = true;
 
                /* In per-cpu case, always need the time of mmap events etc */
-               if (!perf_cpu_map__empty(cpus)) {
+               if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                        evsel__set_sample_bit(tracking_evsel, TIME);
                        /* And the CPU for switch events */
                        evsel__set_sample_bit(tracking_evsel, CPU);
@@ -870,7 +870,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
         * Warn the user when we do not have enough information to decode i.e.
         * per-cpu with no sched_switch (except workload-only).
         */
-       if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
+       if (!ptr->have_sched_switch && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
            !target__none(&opts->target) &&
            !intel_pt_evsel->core.attr.exclude_user)
                ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
index a4cf9de7a7b5a9d6ae416f1a65f6a5c47c6e2e4f..f78eea9e21539352e96c68f37c4b0001c84054e4 100644 (file)
@@ -2320,7 +2320,7 @@ static int setup_nodes(struct perf_session *session)
                nodes[node] = set;
 
                /* empty node, skip */
-               if (perf_cpu_map__empty(map))
+               if (perf_cpu_map__has_any_cpu_or_is_empty(map))
                        continue;
 
                perf_cpu_map__for_each_cpu(cpu, idx, map) {
index 0bfa70791cfcfd5b4db7d5a7fd54d90873f1bd0e..bda020c0b9d52fcc4298eeff7ee7b2b2d8dbc171 100644 (file)
@@ -1316,7 +1316,7 @@ static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
         * be the first online CPU in the cache domain else use the
         * first online CPU of the cache domain as the ID.
         */
-       if (perf_cpu_map__empty(cpu_map))
+       if (perf_cpu_map__has_any_cpu_or_is_empty(cpu_map))
                id = cpu.cpu;
        else
                id = perf_cpu_map__cpu(cpu_map, 0).cpu;
@@ -1622,7 +1622,7 @@ static int perf_stat_init_aggr_mode(void)
         * taking the highest cpu number to be the size of
         * the aggregation translate cpumap.
         */
-       if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus))
                nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
        else
                nr = 0;
@@ -2289,7 +2289,7 @@ int process_stat_config_event(struct perf_session *session,
 
        perf_event__read_stat_config(&stat_config, &event->stat_config);
 
-       if (perf_cpu_map__empty(st->cpus)) {
+       if (perf_cpu_map__has_any_cpu_or_is_empty(st->cpus)) {
                if (st->aggr_mode != AGGR_UNSET)
                        pr_warning("warning: processing task data, aggregation mode not set\n");
        } else if (st->aggr_mode != AGGR_UNSET) {
index f528c4364d23588c12d96ff9caa746c559ca5563..3684e6009b635076c8171d68b4b9edb89bfcf1f6 100644 (file)
@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
                                   struct evlist *evlist,
                                   struct evsel *evsel, int idx)
 {
-       bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
+       bool per_cpu = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
 
        mp->mmap_needed = evsel->needs_auxtrace_mmap;
 
@@ -648,7 +648,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
 
 static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
 {
-       bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
+       bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
 
        if (per_cpu_mmaps) {
                struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
index 9eb5c6a08999e83bb1ef05117ba8ce926d93dc16..40290382b2d7092e0e8525ebd03ee18c87154a9d 100644 (file)
@@ -237,7 +237,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
 
        evsel = evlist__last(temp_evlist);
 
-       if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) {
+       if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
                struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
 
                if (cpus)
index ec350604221736783df773d40e1ea66bb6983283..012c4946b9c495131a1e2e5ccc2860eb6658d228 100644 (file)
@@ -315,7 +315,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
        if (!counter->per_pkg)
                return 0;
 
-       if (perf_cpu_map__empty(cpus))
+       if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                return 0;
 
        if (!mask) {