perf tools: Use dedicated non-atomic clear/set bit helpers
authorSean Christopherson <seanjc@google.com>
Sat, 19 Nov 2022 01:34:46 +0000 (01:34 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 2 Dec 2022 18:22:33 +0000 (13:22 -0500)
Use the dedicated non-atomic helpers for {clear,set}_bit() and their
test variants, i.e. the double-underscore versions.  Depsite being
defined in atomic.h, and despite the kernel versions being atomic in the
kernel, tools' {clear,set}_bit() helpers aren't actually atomic.  Move
to the double-underscore versions so that the versions that are expected
to be atomic (for kernel developers) can be made atomic without affecting
users that don't want atomic operations.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Message-Id: <20221119013450.2643007-6-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
15 files changed:
tools/perf/bench/find-bit-bench.c
tools/perf/builtin-c2c.c
tools/perf/builtin-kwork.c
tools/perf/builtin-record.c
tools/perf/builtin-sched.c
tools/perf/tests/bitmap.c
tools/perf/tests/mem2node.c
tools/perf/util/affinity.c
tools/perf/util/header.c
tools/perf/util/mmap.c
tools/perf/util/pmu.c
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/svghelper.c

index 22b5cfe9702370c3d7f6b04c0eb572965fb7b7e1..d103c3136983d55aad738fd8a078a76cd418cb60 100644 (file)
@@ -70,7 +70,7 @@ static int do_for_each_set_bit(unsigned int num_bits)
                bitmap_zero(to_test, num_bits);
                skip = num_bits / set_bits;
                for (i = 0; i < num_bits; i += skip)
-                       set_bit(i, to_test);
+                       __set_bit(i, to_test);
 
                for (i = 0; i < outer_iterations; i++) {
                        old = accumulator;
index a9190458d2d50015cbe997ed9cab63b8b51d984a..52d94c7dd8366c244a026fe0daa214462c0530f0 100644 (file)
@@ -230,7 +230,7 @@ static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he,
                      "WARNING: no sample cpu value"))
                return;
 
-       set_bit(sample->cpu, c2c_he->cpuset);
+       __set_bit(sample->cpu, c2c_he->cpuset);
 }
 
 static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
@@ -247,7 +247,7 @@ static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
        if (WARN_ONCE(node < 0, "WARNING: failed to find node\n"))
                return;
 
-       set_bit(node, c2c_he->nodeset);
+       __set_bit(node, c2c_he->nodeset);
 
        if (c2c_he->paddr != sample->phys_addr) {
                c2c_he->paddr_cnt++;
@@ -2318,7 +2318,7 @@ static int setup_nodes(struct perf_session *session)
                        continue;
 
                perf_cpu_map__for_each_cpu(cpu, idx, map) {
-                       set_bit(cpu.cpu, set);
+                       __set_bit(cpu.cpu, set);
 
                        if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug"))
                                return -EINVAL;
index fb8c63656ad897674b34ec895784b145b39f2b20..1f63e24f704e2d0b32618e074cf0efb67ccd1e4a 100644 (file)
@@ -216,7 +216,7 @@ static struct kwork_atom *atom_new(struct perf_kwork *kwork,
        list_add_tail(&page->list, &kwork->atom_page_list);
 
 found_atom:
-       set_bit(i, page->bitmap);
+       __set_bit(i, page->bitmap);
        atom->time = sample->time;
        atom->prev = NULL;
        atom->page_addr = page;
@@ -229,8 +229,8 @@ static void atom_free(struct kwork_atom *atom)
        if (atom->prev != NULL)
                atom_free(atom->prev);
 
-       clear_bit(atom->bit_inpage,
-                 ((struct kwork_atom_page *)atom->page_addr)->bitmap);
+       __clear_bit(atom->bit_inpage,
+                   ((struct kwork_atom_page *)atom->page_addr)->bitmap);
 }
 
 static void atom_del(struct kwork_atom *atom)
index e128b855dddec15e28eb183c9db52bf1bb74d194..2711c141c5bf0167e58e1b4364b31f68976369d8 100644 (file)
@@ -3555,7 +3555,7 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp
                /* Return ENODEV is input cpu is greater than max cpu */
                if ((unsigned long)cpu.cpu > mask->nbits)
                        return -ENODEV;
-               set_bit(cpu.cpu, mask->bits);
+               __set_bit(cpu.cpu, mask->bits);
        }
 
        return 0;
@@ -3627,8 +3627,8 @@ static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map
        pr_debug("nr_threads: %d\n", rec->nr_threads);
 
        for (t = 0; t < rec->nr_threads; t++) {
-               set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
-               set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
+               __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
+               __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
                if (verbose) {
                        pr_debug("thread_masks[%d]: ", t);
                        mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
index f93737eef07ba0fcda59cccd415963459cb1715d..86e18575c9beee8eab168a088b716df37066d64d 100644 (file)
@@ -1573,7 +1573,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
 
        if (sched->map.comp) {
                cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
-               if (!test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
+               if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
                        sched->map.comp_cpus[cpus_nr++] = this_cpu;
                        new_cpu = true;
                }
index 4965dd6669566125d38ca3ef61714994aa1addb4..0173f5402a35b9c2e58a38f3515f7e138586f192 100644 (file)
@@ -18,7 +18,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
 
        if (map && bm) {
                for (i = 0; i < perf_cpu_map__nr(map); i++)
-                       set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
+                       __set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
        }
 
        if (map)
index 4c96829510c916c89c64c4ee97e87d02d52035a5..a0e88c49610746d4686002d0aaa17de741b178a9 100644 (file)
@@ -33,7 +33,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
                int i;
 
                perf_cpu_map__for_each_cpu(cpu, i, map)
-                       set_bit(cpu.cpu, bm);
+                       __set_bit(cpu.cpu, bm);
        }
 
        if (map)
index 4ee96b3c755b73d5bdddaad55ae290254329cec1..38dc4524b7e862776ba1cb3934a2d2cb72ff1932 100644 (file)
@@ -58,14 +58,14 @@ void affinity__set(struct affinity *a, int cpu)
                return;
 
        a->changed = true;
-       set_bit(cpu, a->sched_cpus);
+       __set_bit(cpu, a->sched_cpus);
        /*
         * We ignore errors because affinity is just an optimization.
         * This could happen for example with isolated CPUs or cpusets.
         * In this case the IPIs inside the kernel's perf API still work.
         */
        sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
-       clear_bit(cpu, a->sched_cpus);
+       __clear_bit(cpu, a->sched_cpus);
 }
 
 static void __affinity__cleanup(struct affinity *a)
index 98dfaf84bd13798f69b25c87a8b67e289225a2a0..dc2ae397d400ea6e5941ed3d5205fc00521af6bc 100644 (file)
@@ -79,12 +79,12 @@ struct perf_file_attr {
 
 void perf_header__set_feat(struct perf_header *header, int feat)
 {
-       set_bit(feat, header->adds_features);
+       __set_bit(feat, header->adds_features);
 }
 
 void perf_header__clear_feat(struct perf_header *header, int feat)
 {
-       clear_bit(feat, header->adds_features);
+       __clear_bit(feat, header->adds_features);
 }
 
 bool perf_header__has_feat(const struct perf_header *header, int feat)
@@ -1358,7 +1358,7 @@ static int memory_node__read(struct memory_node *n, unsigned long idx)
        rewinddir(dir);
 
        for_each_memory(phys, dir) {
-               set_bit(phys, n->set);
+               __set_bit(phys, n->set);
        }
 
        closedir(dir);
@@ -3952,7 +3952,7 @@ int perf_file_header__read(struct perf_file_header *header,
 
                if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
                        bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
-                       set_bit(HEADER_BUILD_ID, header->adds_features);
+                       __set_bit(HEADER_BUILD_ID, header->adds_features);
                }
        }
 
index a4dff881be39b65f4d2b1f878b76ecb30f5b85d8..49093b21ee2da034e6634ab7dbf34dd08045faeb 100644 (file)
@@ -111,7 +111,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, i
                        pr_err("Failed to allocate node mask for mbind: error %m\n");
                        return -1;
                }
-               set_bit(node_index, node_mask);
+               __set_bit(node_index, node_mask);
                if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
                        pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
                                data, data + mmap_len, node_index);
@@ -256,7 +256,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
        for (idx = 0; idx < nr_cpus; idx++) {
                cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
                if (cpu__get_node(cpu) == node)
-                       set_bit(cpu.cpu, mask->bits);
+                       __set_bit(cpu.cpu, mask->bits);
        }
 }
 
@@ -270,7 +270,7 @@ static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *
        if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
                build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
        else if (mp->affinity == PERF_AFFINITY_CPU)
-               set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
+               __set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
 
        return 0;
 }
index 03284059175f7f43b29826f57d13790e9a3891c3..371d8f7a3de3c49a1d25094d26cd9d26cd9d22d5 100644 (file)
@@ -1513,7 +1513,7 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
 
        memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
        for (b = from; b <= to; b++)
-               set_bit(b, bits);
+               __set_bit(b, bits);
 }
 
 void perf_pmu__del_formats(struct list_head *formats)
index a5d945415bbc119968cb897674e30ee6d2961833..5b602b6d4685413300a39ac54e96a77356277a80 100644 (file)
@@ -365,7 +365,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
 
        sprintf(handler, "%s::%s", event->system, event->name);
 
-       if (!test_and_set_bit(event->id, events_defined))
+       if (!__test_and_set_bit(event->id, events_defined))
                define_event_symbols(event, handler, event->print_fmt.args);
 
        s = nsecs / NSEC_PER_SEC;
index 1f2040f36d4e937193d26cca1b2d9b5afa4e8b81..0f229fa29163eacd2719777e863a87891954eeae 100644 (file)
@@ -933,7 +933,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
 
        sprintf(handler_name, "%s__%s", event->system, event->name);
 
-       if (!test_and_set_bit(event->id, events_defined))
+       if (!__test_and_set_bit(event->id, events_defined))
                define_event_symbols(event, handler_name, event->print_fmt.args);
 
        handler = get_handler(handler_name);
index 1a4f10de29ffebd29b4c6d99512d022a38bdd8b2..873fd51ec1b21fb84ddeab5f017708a1dced88f8 100644 (file)
@@ -2748,7 +2748,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
                        goto out_delete_map;
                }
 
-               set_bit(cpu.cpu, cpu_bitmap);
+               __set_bit(cpu.cpu, cpu_bitmap);
        }
 
        err = 0;
index 1e0c731fc5396e31a1c1e315e15bb7793d0fcacb..5c62d3118c41fd75cb827c102a8946ac79c4ed7e 100644 (file)
@@ -741,7 +741,7 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
                        break;
                }
 
-               set_bit(c.cpu, cpumask_bits(b));
+               __set_bit(c.cpu, cpumask_bits(b));
        }
 
        perf_cpu_map__put(m);