libperf: Lazily allocate/size mmap event copy
authorIan Rogers <irogers@google.com>
Mon, 27 Nov 2023 22:08:14 +0000 (14:08 -0800)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 30 Nov 2023 22:25:19 +0000 (19:25 -0300)
The event copy in the mmap is used to have storage to read an event. Not
all users of mmaps read the events, such as perf record. The amount of
buffer was also statically set to PERF_SAMPLE_MAX_SIZE rather than the
amount necessary from the header's event size.

Switch to a model where the event_copy is reallocated if too small to
the event's size. This adds the potential for the event to move, so if a
copy of the event pointer were stored it could be broken. All the
current users do:

  while(event = perf_mmap__read_event()) { ... }

and so they would be broken due to the event being overwritten if they
had stored the pointer. Manual inspection and address sanitizer testing
also shows the event pointer not being stored.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Dmitrii Dolgov <9erthalion6@gmail.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Guilherme Amadio <amadio@gentoo.org>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Li Dong <lidong@vivo.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Steinar H. Gunderson <sesse@google.com>
Cc: Vincent Whitchurch <vincent.whitchurch@axis.com>
Cc: Wenyu Liu <liuwenyu7@huawei.com>
Cc: Yang Jihong <yangjihong1@huawei.com>
Link: https://lore.kernel.org/r/20231127220902.1315692-3-irogers@google.com
[ Replace two lines with equivalent zfree(&map->event_copy) ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/lib/perf/include/internal/mmap.h
tools/lib/perf/mmap.c

index 5a062af8e9d8e2200aaecc3e110b7a750c2f78e7..5f08cab61ecec6d25cd1a80e4a1c5bd83a0d12e3 100644 (file)
@@ -33,7 +33,8 @@ struct perf_mmap {
        bool                     overwrite;
        u64                      flush;
        libperf_unmap_cb_t       unmap_cb;
-       char                     event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+       void                    *event_copy;
+       size_t                   event_copy_sz;
        struct perf_mmap        *next;
 };
 
index 2184814b37dd393e3a6a0fb10c6ccb4db99df042..0c903c2372c97850ab7d3fddea63ddd67bfd40c9 100644 (file)
@@ -19,6 +19,7 @@
 void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
                     bool overwrite, libperf_unmap_cb_t unmap_cb)
 {
+       /* Assume fields were zero initialized. */
        map->fd = -1;
        map->overwrite = overwrite;
        map->unmap_cb  = unmap_cb;
@@ -51,13 +52,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
 
 void perf_mmap__munmap(struct perf_mmap *map)
 {
-       if (map && map->base != NULL) {
+       if (!map)
+               return;
+
+       zfree(&map->event_copy);
+       map->event_copy_sz = 0;
+       if (map->base) {
                munmap(map->base, perf_mmap__mmap_len(map));
                map->base = NULL;
                map->fd = -1;
                refcount_set(&map->refcnt, 0);
        }
-       if (map && map->unmap_cb)
+       if (map->unmap_cb)
                map->unmap_cb(map);
 }
 
@@ -223,9 +229,17 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
                 */
                if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
                        unsigned int offset = *startp;
-                       unsigned int len = min(sizeof(*event), size), cpy;
+                       unsigned int len = size, cpy;
                        void *dst = map->event_copy;
 
+                       if (size > map->event_copy_sz) {
+                               dst = realloc(map->event_copy, size);
+                               if (!dst)
+                                       return NULL;
+                               map->event_copy = dst;
+                               map->event_copy_sz = size;
+                       }
+
                        do {
                                cpy = min(map->mask + 1 - (offset & map->mask), len);
                                memcpy(dst, &data[offset & map->mask], cpy);