tracing: Allow for max buffer data size trace_marker writes
authorSteven Rostedt (Google) <rostedt@goodmis.org>
Tue, 12 Dec 2023 18:19:01 +0000 (13:19 -0500)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Tue, 19 Dec 2023 04:14:16 +0000 (23:14 -0500)
Allow a trace write to be as big as the ring buffer tracing data will
allow. Currently, it only allows writes of 1KB in size, but there's no
reason that it cannot allow what the ring buffer can hold.

Link: https://lore.kernel.org/linux-trace-kernel/20231212131901.5f501e72@gandalf.local.home
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
include/linux/ring_buffer.h
kernel/trace/ring_buffer.c
kernel/trace/trace.c

index 782e14f62201f7c76e911886b3ef263d116a3298..b1b03b2c0f08e1362d3678d01f6fa47ad8e34966 100644 (file)
@@ -141,6 +141,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
 
 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer);
 
 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
index bfe2697a92ee4ad13ba257ece9255aa9c69560e0..16b640d824f92800ea0c2ca7d4618f7b838e182a 100644 (file)
@@ -5190,6 +5190,21 @@ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_size);
 
+/**
+ * ring_buffer_max_event_size - return the max data size of an event
+ * @buffer: The ring buffer.
+ *
+ * Returns the maximum size an event can be.
+ */
+unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
+{
+       /* If abs timestamp is requested, events have a timestamp too */
+       if (ring_buffer_time_stamp_abs(buffer))
+               return BUF_MAX_DATA_SIZE - RB_LEN_TIME_EXTEND;
+       return BUF_MAX_DATA_SIZE;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
+
 static void rb_clear_buffer_page(struct buffer_page *page)
 {
        local_set(&page->write, 0);
index 59e39b652afb5b374a03e3f6c40242f7f94e3b04..dba1328e454b35f690e9fdd1be214fc7752cdac8 100644 (file)
@@ -7278,8 +7278,9 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        enum event_trigger_type tt = ETT_NONE;
        struct trace_buffer *buffer;
        struct print_entry *entry;
+       int meta_size;
        ssize_t written;
-       int size;
+       size_t size;
        int len;
 
 /* Used in tracing_mark_raw_write() as well */
@@ -7292,12 +7293,12 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        if (!(tr->trace_flags & TRACE_ITER_MARKERS))
                return -EINVAL;
 
-       if (cnt > TRACE_BUF_SIZE)
-               cnt = TRACE_BUF_SIZE;
-
-       BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
+       if ((ssize_t)cnt < 0)
+               return -EINVAL;
 
-       size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
+       meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
+ again:
+       size = cnt + meta_size;
 
        /* If less than "<faulted>", then make sure we can still add that */
        if (cnt < FAULTED_SIZE)
@@ -7306,9 +7307,25 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        buffer = tr->array_buffer.buffer;
        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
                                            tracing_gen_ctx());
-       if (unlikely(!event))
+       if (unlikely(!event)) {
+               /*
+                * If the size was greater than what was allowed, then
+                * make it smaller and try again.
+                */
+               if (size > ring_buffer_max_event_size(buffer)) {
+                       /* cnt < FAULTED size should never be bigger than max */
+                       if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
+                               return -EBADF;
+                       cnt = ring_buffer_max_event_size(buffer) - meta_size;
+                       /* The above should only happen once */
+                       if (WARN_ON_ONCE(cnt + meta_size == size))
+                               return -EBADF;
+                       goto again;
+               }
+
                /* Ring buffer disabled, return as if not open for write */
                return -EBADF;
+       }
 
        entry = ring_buffer_event_data(event);
        entry->ip = _THIS_IP_;