{
struct io_uring_cqe *cqe;
+ trace_io_uring_complete(ctx, ki_user_data, res);
+
/*
* If we can't get a cq entry, userspace overflowed the
* submission (by quite a lot). Increment the overflow count in
s.has_user = *mm != NULL;
s.in_async = true;
s.needs_fixed_file = true;
- trace_io_uring_submit_sqe(ctx, true, true);
+ trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, true);
io_submit_sqe(ctx, &s, statep, &link);
submitted++;
}
s.needs_fixed_file = false;
s.ring_fd = ring_fd;
submit++;
- trace_io_uring_submit_sqe(ctx, true, false);
+ trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, false);
io_submit_sqe(ctx, &s, statep, &link);
}
TP_printk("request %p, link %p", __entry->req, __entry->link)
);
+/**
+ * io_uring_complete - called when completing an SQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
+ * @res: result of the request
+ *
+ */
+TRACE_EVENT(io_uring_complete,
+
+ TP_PROTO(void *ctx, u64 user_data, long res),
+
+ TP_ARGS(ctx, user_data, res),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u64, user_data )
+ __field( long, res )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->user_data = user_data;
+ __entry->res = res;
+ ),
+
+ TP_printk("ring %p, user_data 0x%llx, result %ld",
+ __entry->ctx, (unsigned long long)__entry->user_data,
+ __entry->res)
+);
+
+
/**
* io_uring_submit_sqe - called before submitting one SQE
*
- * @ctx: pointer to a ring context structure
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
* @force_nonblock: whether a context blocking or not
* @sq_thread: true if sq_thread has submitted this SQE
*
*/
TRACE_EVENT(io_uring_submit_sqe,
- TP_PROTO(void *ctx, bool force_nonblock, bool sq_thread),
+ TP_PROTO(void *ctx, u64 user_data, bool force_nonblock, bool sq_thread),
- TP_ARGS(ctx, force_nonblock, sq_thread),
+ TP_ARGS(ctx, user_data, force_nonblock, sq_thread),
TP_STRUCT__entry (
- __field( void *, ctx )
+ __field( void *, ctx )
+ __field( u64, user_data )
__field( bool, force_nonblock )
- __field( bool, sq_thread )
+ __field( bool, sq_thread )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = ctx;
+ __entry->user_data = user_data;
__entry->force_nonblock = force_nonblock;
- __entry->sq_thread = sq_thread;
+ __entry->sq_thread = sq_thread;
),
- TP_printk("ring %p, non block %d, sq_thread %d",
- __entry->ctx, __entry->force_nonblock, __entry->sq_thread)
+ TP_printk("ring %p, user data 0x%llx, non block %d, sq_thread %d",
+ __entry->ctx, (unsigned long long) __entry->user_data,
+ __entry->force_nonblock, __entry->sq_thread)
);
#endif /* _TRACE_IO_URING_H */