DEFINE_BADREQ_EVENT(badproc);
DEFINE_BADREQ_EVENT(parse);
-DECLARE_EVENT_CLASS(svcrdma_segment_event,
- TP_PROTO(
- u32 handle,
- u32 length,
- u64 offset
- ),
-
- TP_ARGS(handle, length, offset),
-
- TP_STRUCT__entry(
- __field(u32, handle)
- __field(u32, length)
- __field(u64, offset)
- ),
-
- TP_fast_assign(
- __entry->handle = handle;
- __entry->length = length;
- __entry->offset = offset;
- ),
-
- TP_printk("%u@0x%016llx:0x%08x",
- __entry->length, (unsigned long long)__entry->offset,
- __entry->handle
- )
-);
-
-#define DEFINE_SEGMENT_EVENT(name) \
- DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\
- TP_PROTO( \
- u32 handle, \
- u32 length, \
- u64 offset \
- ), \
- TP_ARGS(handle, length, offset))
-
-DEFINE_SEGMENT_EVENT(send_rseg);
-DEFINE_SEGMENT_EVENT(send_wseg);
-
TRACE_EVENT(svcrdma_encode_wseg,
TP_PROTO(
const struct svc_rdma_send_ctxt *ctxt,
)
);
-DECLARE_EVENT_CLASS(svcrdma_chunk_event,
- TP_PROTO(
- u32 length
- ),
-
- TP_ARGS(length),
-
- TP_STRUCT__entry(
- __field(u32, length)
- ),
-
- TP_fast_assign(
- __entry->length = length;
- ),
-
- TP_printk("length=%u",
- __entry->length
- )
-);
-
-#define DEFINE_CHUNK_EVENT(name) \
- DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name, \
- TP_PROTO( \
- u32 length \
- ), \
- TP_ARGS(length))
-
-DEFINE_CHUNK_EVENT(send_pzr);
-DEFINE_CHUNK_EVENT(encode_write_chunk);
-DEFINE_CHUNK_EVENT(send_write_chunk);
-DEFINE_CHUNK_EVENT(encode_read_chunk);
-DEFINE_CHUNK_EVENT(send_reply_chunk);
-
-TRACE_EVENT(svcrdma_send_read_chunk,
- TP_PROTO(
- u32 length,
- u32 position
- ),
-
- TP_ARGS(length, position),
-
- TP_STRUCT__entry(
- __field(u32, length)
- __field(u32, position)
- ),
-
- TP_fast_assign(
- __entry->length = length;
- __entry->position = position;
- ),
-
- TP_printk("length=%u position=%u",
- __entry->length, __entry->position
- )
-);
-
DECLARE_EVENT_CLASS(svcrdma_error_event,
TP_PROTO(
__be32 xid
)
);
-TRACE_EVENT(svcrdma_post_chunk,
+DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
TP_PROTO(
const struct rpc_rdma_cid *cid,
int sqecount
)
);
+#define DEFINE_POST_CHUNK_EVENT(name) \
+ DEFINE_EVENT(svcrdma_post_chunk_class, \
+ svcrdma_post_##name##_chunk, \
+ TP_PROTO( \
+ const struct rpc_rdma_cid *cid, \
+ int sqecount \
+ ), \
+ TP_ARGS(cid, sqecount))
+
+DEFINE_POST_CHUNK_EVENT(read);
+DEFINE_POST_CHUNK_EVENT(write);
+DEFINE_POST_CHUNK_EVENT(reply);
+
DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
do {
if (atomic_sub_return(cc->cc_sqecount,
&rdma->sc_sq_avail) > 0) {
- trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount);
ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
if (ret)
break;
if (ret < 0)
return -EIO;
- trace_svcrdma_send_wseg(seg->rs_handle, write_len, offset);
-
list_add(&ctxt->rw_list, &cc->cc_rwctxts);
cc->cc_sqecount += ret;
if (write_len == seg->rs_length - info->wi_seg_off) {
const struct xdr_buf *xdr)
{
struct svc_rdma_write_info *info;
+ struct svc_rdma_chunk_ctxt *cc;
int ret;
info = svc_rdma_write_info_alloc(rdma, chunk);
if (!info)
return -ENOMEM;
+ cc = &info->wi_cc;
ret = svc_rdma_xb_write(xdr, info);
if (ret != xdr->len)
goto out_err;
- ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
+ trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
+ ret = svc_rdma_post_chunk_ctxt(cc);
if (ret < 0)
goto out_err;
-
- trace_svcrdma_send_write_chunk(xdr->page_len);
return xdr->len;
out_err:
const struct xdr_buf *xdr)
{
struct svc_rdma_write_info *info;
+ struct svc_rdma_chunk_ctxt *cc;
struct svc_rdma_chunk *chunk;
int ret;
info = svc_rdma_write_info_alloc(rdma, chunk);
if (!info)
return -ENOMEM;
+ cc = &info->wi_cc;
ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
svc_rdma_xb_write, info);
if (ret < 0)
goto out_err;
- ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
+ trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount);
+ ret = svc_rdma_post_chunk_ctxt(cc);
if (ret < 0)
goto out_err;
- trace_svcrdma_send_reply_chunk(xdr->len);
return xdr->len;
out_err:
if (ret < 0)
break;
- trace_svcrdma_send_rseg(handle, length, offset);
info->ri_chunklen += length;
}
-
return ret;
}
if (ret < 0)
goto out;
- trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position);
-
head->rc_hdr_count = 0;
/* Split the Receive buffer between the head and tail
if (ret < 0)
goto out;
- trace_svcrdma_send_pzr(info->ri_chunklen);
-
head->rc_arg.len += info->ri_chunklen;
head->rc_arg.buflen += info->ri_chunklen;
struct svc_rdma_recv_ctxt *head, __be32 *p)
{
struct svc_rdma_read_info *info;
+ struct svc_rdma_chunk_ctxt *cc;
int ret;
/* The request (with page list) is constructed in
info = svc_rdma_read_info_alloc(rdma);
if (!info)
return -ENOMEM;
+ cc = &info->ri_cc;
info->ri_readctxt = head;
info->ri_pageno = 0;
info->ri_pageoff = 0;
if (ret < 0)
goto out_err;
- ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
+ trace_svcrdma_post_read_chunk(&cc->cc_cid, cc->cc_sqecount);
+ ret = svc_rdma_post_chunk_ctxt(cc);
if (ret < 0)
goto out_err;
svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);