svcrdma: Clean up chunk tracepoints
authorChuck Lever <chuck.lever@oracle.com>
Thu, 11 Jun 2020 17:28:28 +0000 (13:28 -0400)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 30 Nov 2020 18:00:23 +0000 (13:00 -0500)
We already have trace_svcrdma_decode_rseg(), which records each
ingress Read segment. Instead of reporting those again when they
are about to be posted as RDMA Reads, let's fire one tracepoint
before posting each type of chunk.

So we'll get:

        nfsd-1998  [002]   321.666615: svcrdma_decode_rseg:  cq.id=4 cid=42 segno=0 position=0 192@0x013ca9ebfae14000:0xb0010b05
        nfsd-1998  [002]   321.666615: svcrdma_decode_rseg:  cq.id=4 cid=42 segno=1 position=0 7688@0x013ca9ebf914e000:0xb0010a05
        nfsd-1998  [002]   321.666615: svcrdma_decode_rseg:  cq.id=4 cid=42 segno=2 position=0 28@0x013ca9ebfae15000:0xb0010905
        nfsd-1998  [002]   321.666622: svcrdma_decode_rqst:  cq.id=4 cid=42 xid=0x013ca9eb vers=1 credits=128 proc=RDMA_NOMSG hdrlen=100

        nfsd-1998  [002]   321.666642: svcrdma_post_read_chunk: cq.id=3 cid=112 sqecount=3

kworker/2:1H-221   [002]   321.673949: svcrdma_wc_read:      cq.id=3 cid=112 status=SUCCESS (0/0x0)

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
include/trace/events/rpcrdma.h
net/sunrpc/xprtrdma/svc_rdma_rw.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c

index 054dedd0280cb44ef6f4712891a94bd6bbbb3d1a..896aafc37b090930d7aea3b274631cf87bdfaa7d 100644 (file)
@@ -1410,45 +1410,6 @@ DEFINE_BADREQ_EVENT(drop);
 DEFINE_BADREQ_EVENT(badproc);
 DEFINE_BADREQ_EVENT(parse);
 
-DECLARE_EVENT_CLASS(svcrdma_segment_event,
-       TP_PROTO(
-               u32 handle,
-               u32 length,
-               u64 offset
-       ),
-
-       TP_ARGS(handle, length, offset),
-
-       TP_STRUCT__entry(
-               __field(u32, handle)
-               __field(u32, length)
-               __field(u64, offset)
-       ),
-
-       TP_fast_assign(
-               __entry->handle = handle;
-               __entry->length = length;
-               __entry->offset = offset;
-       ),
-
-       TP_printk("%u@0x%016llx:0x%08x",
-               __entry->length, (unsigned long long)__entry->offset,
-               __entry->handle
-       )
-);
-
-#define DEFINE_SEGMENT_EVENT(name)                                     \
-               DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\
-                               TP_PROTO(                               \
-                                       u32 handle,                     \
-                                       u32 length,                     \
-                                       u64 offset                      \
-                               ),                                      \
-                               TP_ARGS(handle, length, offset))
-
-DEFINE_SEGMENT_EVENT(send_rseg);
-DEFINE_SEGMENT_EVENT(send_wseg);
-
 TRACE_EVENT(svcrdma_encode_wseg,
        TP_PROTO(
                const struct svc_rdma_send_ctxt *ctxt,
@@ -1558,62 +1519,6 @@ TRACE_EVENT(svcrdma_decode_wseg,
        )
 );
 
-DECLARE_EVENT_CLASS(svcrdma_chunk_event,
-       TP_PROTO(
-               u32 length
-       ),
-
-       TP_ARGS(length),
-
-       TP_STRUCT__entry(
-               __field(u32, length)
-       ),
-
-       TP_fast_assign(
-               __entry->length = length;
-       ),
-
-       TP_printk("length=%u",
-               __entry->length
-       )
-);
-
-#define DEFINE_CHUNK_EVENT(name)                                       \
-               DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name,       \
-                               TP_PROTO(                               \
-                                       u32 length                      \
-                               ),                                      \
-                               TP_ARGS(length))
-
-DEFINE_CHUNK_EVENT(send_pzr);
-DEFINE_CHUNK_EVENT(encode_write_chunk);
-DEFINE_CHUNK_EVENT(send_write_chunk);
-DEFINE_CHUNK_EVENT(encode_read_chunk);
-DEFINE_CHUNK_EVENT(send_reply_chunk);
-
-TRACE_EVENT(svcrdma_send_read_chunk,
-       TP_PROTO(
-               u32 length,
-               u32 position
-       ),
-
-       TP_ARGS(length, position),
-
-       TP_STRUCT__entry(
-               __field(u32, length)
-               __field(u32, position)
-       ),
-
-       TP_fast_assign(
-               __entry->length = length;
-               __entry->position = position;
-       ),
-
-       TP_printk("length=%u position=%u",
-               __entry->length, __entry->position
-       )
-);
-
 DECLARE_EVENT_CLASS(svcrdma_error_event,
        TP_PROTO(
                __be32 xid
@@ -1936,7 +1841,7 @@ TRACE_EVENT(svcrdma_rq_post_err,
        )
 );
 
-TRACE_EVENT(svcrdma_post_chunk,
+DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
        TP_PROTO(
                const struct rpc_rdma_cid *cid,
                int sqecount
@@ -1962,6 +1867,19 @@ TRACE_EVENT(svcrdma_post_chunk,
        )
 );
 
+#define DEFINE_POST_CHUNK_EVENT(name)                                  \
+               DEFINE_EVENT(svcrdma_post_chunk_class,                  \
+                               svcrdma_post_##name##_chunk,            \
+                               TP_PROTO(                               \
+                                       const struct rpc_rdma_cid *cid, \
+                                       int sqecount                    \
+                               ),                                      \
+                               TP_ARGS(cid, sqecount))
+
+DEFINE_POST_CHUNK_EVENT(read);
+DEFINE_POST_CHUNK_EVENT(write);
+DEFINE_POST_CHUNK_EVENT(reply);
+
 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
 
index 4a0ece9aa38583f16cc1e96813491bb466f3f7c0..b04c700862e930d9b9f2724a1f0689e70db38767 100644 (file)
@@ -358,7 +358,6 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
        do {
                if (atomic_sub_return(cc->cc_sqecount,
                                      &rdma->sc_sq_avail) > 0) {
-                       trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount);
                        ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
                        if (ret)
                                break;
@@ -470,8 +469,6 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
                if (ret < 0)
                        return -EIO;
 
-               trace_svcrdma_send_wseg(seg->rs_handle, write_len, offset);
-
                list_add(&ctxt->rw_list, &cc->cc_rwctxts);
                cc->cc_sqecount += ret;
                if (write_len == seg->rs_length - info->wi_seg_off) {
@@ -590,21 +587,22 @@ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
                              const struct xdr_buf *xdr)
 {
        struct svc_rdma_write_info *info;
+       struct svc_rdma_chunk_ctxt *cc;
        int ret;
 
        info = svc_rdma_write_info_alloc(rdma, chunk);
        if (!info)
                return -ENOMEM;
+       cc = &info->wi_cc;
 
        ret = svc_rdma_xb_write(xdr, info);
        if (ret != xdr->len)
                goto out_err;
 
-       ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
+       trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
+       ret = svc_rdma_post_chunk_ctxt(cc);
        if (ret < 0)
                goto out_err;
-
-       trace_svcrdma_send_write_chunk(xdr->page_len);
        return xdr->len;
 
 out_err:
@@ -630,6 +628,7 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
                              const struct xdr_buf *xdr)
 {
        struct svc_rdma_write_info *info;
+       struct svc_rdma_chunk_ctxt *cc;
        struct svc_rdma_chunk *chunk;
        int ret;
 
@@ -640,17 +639,18 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
        info = svc_rdma_write_info_alloc(rdma, chunk);
        if (!info)
                return -ENOMEM;
+       cc = &info->wi_cc;
 
        ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
                                      svc_rdma_xb_write, info);
        if (ret < 0)
                goto out_err;
 
-       ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
+       trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount);
+       ret = svc_rdma_post_chunk_ctxt(cc);
        if (ret < 0)
                goto out_err;
 
-       trace_svcrdma_send_reply_chunk(xdr->len);
        return xdr->len;
 
 out_err:
@@ -737,10 +737,8 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
                if (ret < 0)
                        break;
 
-               trace_svcrdma_send_rseg(handle, length, offset);
                info->ri_chunklen += length;
        }
-
        return ret;
 }
 
@@ -762,8 +760,6 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
        if (ret < 0)
                goto out;
 
-       trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position);
-
        head->rc_hdr_count = 0;
 
        /* Split the Receive buffer between the head and tail
@@ -818,8 +814,6 @@ static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
        if (ret < 0)
                goto out;
 
-       trace_svcrdma_send_pzr(info->ri_chunklen);
-
        head->rc_arg.len += info->ri_chunklen;
        head->rc_arg.buflen += info->ri_chunklen;
 
@@ -876,6 +870,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
                             struct svc_rdma_recv_ctxt *head, __be32 *p)
 {
        struct svc_rdma_read_info *info;
+       struct svc_rdma_chunk_ctxt *cc;
        int ret;
 
        /* The request (with page list) is constructed in
@@ -893,6 +888,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
        info = svc_rdma_read_info_alloc(rdma);
        if (!info)
                return -ENOMEM;
+       cc = &info->ri_cc;
        info->ri_readctxt = head;
        info->ri_pageno = 0;
        info->ri_pageoff = 0;
@@ -905,7 +901,8 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
        if (ret < 0)
                goto out_err;
 
-       ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
+       trace_svcrdma_post_read_chunk(&cc->cc_cid, cc->cc_sqecount);
+       ret = svc_rdma_post_chunk_ctxt(cc);
        if (ret < 0)
                goto out_err;
        svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
index 78fa57ce424f9dc00348fb6505408a96561b7839..68af79d4f04fc563a8419c6237cae19b8c4c737e 100644 (file)
@@ -411,8 +411,6 @@ static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt,
        unsigned int segno;
        ssize_t len, ret;
 
-       trace_svcrdma_encode_write_chunk(remaining);
-
        len = 0;
        ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
        if (ret < 0)