IB/hfi1: Add traces for TID RDMA READ
authorKaike Wan <kaike.wan@intel.com>
Wed, 11 Sep 2019 11:30:41 +0000 (07:30 -0400)
committerJason Gunthorpe <jgg@mellanox.com>
Fri, 13 Sep 2019 19:59:55 +0000 (16:59 -0300)
This patch adds traces to debug packet loss and retry for TID RDMA READ
protocol.

Link: https://lore.kernel.org/r/20190911113041.126040.64541.stgit@awfm-01.aw.intel.com
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Kaike Wan <kaike.wan@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/trace_tid.h

index 024a7c2b6124563cbd317b10cc3c0210b9de1b2e..eeca08d3b47064f79ccf990d7de086337e3e7125 100644 (file)
@@ -1483,6 +1483,11 @@ static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
                        req->ack_pending = cur_seg - req->comp_seg;
                        priv->pending_tid_r_segs += req->ack_pending;
                        qp->s_num_rd_atomic += req->ack_pending;
+                       trace_hfi1_tid_req_update_num_rd_atomic(qp, 0,
+                                                               wqe->wr.opcode,
+                                                               wqe->psn,
+                                                               wqe->lpsn,
+                                                               req);
                } else {
                        priv->pending_tid_r_segs += req->total_segs;
                        qp->s_num_rd_atomic += req->total_segs;
index 6141f4edc6bfa00423b757631fa22aeb43485cd1..b4dcc4d29f84e0e10fcd312b97b520fda0507fd8 100644 (file)
@@ -2646,6 +2646,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
        u32 fpsn;
 
        lockdep_assert_held(&qp->r_lock);
+       trace_hfi1_rsp_read_kdeth_eflags(qp, ibpsn);
+       trace_hfi1_sender_read_kdeth_eflags(qp);
+       trace_hfi1_tid_read_sender_kdeth_eflags(qp, 0);
        spin_lock(&qp->s_lock);
        /* If the psn is out of valid range, drop the packet */
        if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
@@ -2710,6 +2713,8 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
                goto s_unlock;
 
        req = wqe_to_tid_req(wqe);
+       trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn,
+                                            wqe->lpsn, req);
        switch (rcv_type) {
        case RHF_RCV_TYPE_EXPECTED:
                switch (rte) {
@@ -2724,6 +2729,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
                         * packets that could be still in the fabric.
                         */
                        flow = &req->flows[req->clear_tail];
+                       trace_hfi1_tid_flow_read_kdeth_eflags(qp,
+                                                             req->clear_tail,
+                                                             flow);
                        if (priv->s_flags & HFI1_R_TID_SW_PSN) {
                                diff = cmp_psn(psn,
                                               flow->flow_state.r_next_psn);
index 4388b594ed1b7c3f6000b40e8b8254275cf364a3..343fb9894a820de96f3308cb4153b5e675e3bcf9 100644 (file)
@@ -627,6 +627,12 @@ DEFINE_EVENT(/* event */
        TP_ARGS(qp, index, flow)
 );
 
+DEFINE_EVENT(/* event */
+       hfi1_tid_flow_template, hfi1_tid_flow_read_kdeth_eflags,
+       TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+       TP_ARGS(qp, index, flow)
+);
+
 DECLARE_EVENT_CLASS(/* tid_node */
        hfi1_tid_node_template,
        TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
@@ -851,6 +857,12 @@ DEFINE_EVENT(/* event */
        TP_ARGS(qp, psn)
 );
 
+DEFINE_EVENT(/* event */
+       hfi1_responder_info_template, hfi1_rsp_read_kdeth_eflags,
+       TP_PROTO(struct rvt_qp *qp, u32 psn),
+       TP_ARGS(qp, psn)
+);
+
 DECLARE_EVENT_CLASS(/* sender_info */
        hfi1_sender_info_template,
        TP_PROTO(struct rvt_qp *qp),
@@ -955,6 +967,12 @@ DEFINE_EVENT(/* event */
        TP_ARGS(qp)
 );
 
+DEFINE_EVENT(/* event */
+       hfi1_sender_info_template, hfi1_sender_read_kdeth_eflags,
+       TP_PROTO(struct rvt_qp *qp),
+       TP_ARGS(qp)
+);
+
 DECLARE_EVENT_CLASS(/* tid_read_sender */
        hfi1_tid_read_sender_template,
        TP_PROTO(struct rvt_qp *qp, char newreq),
@@ -1015,6 +1033,12 @@ DEFINE_EVENT(/* event */
        TP_ARGS(qp, newreq)
 );
 
+DEFINE_EVENT(/* event */
+       hfi1_tid_read_sender_template, hfi1_tid_read_sender_kdeth_eflags,
+       TP_PROTO(struct rvt_qp *qp, char newreq),
+       TP_ARGS(qp, newreq)
+);
+
 DECLARE_EVENT_CLASS(/* tid_rdma_request */
        hfi1_tid_rdma_request_template,
        TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
@@ -1215,6 +1239,13 @@ DEFINE_EVENT(/* event */
        TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
 );
 
+DEFINE_EVENT(/* event */
+       hfi1_tid_rdma_request_template, hfi1_tid_req_read_kdeth_eflags,
+       TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+                struct tid_rdma_request *req),
+       TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
 DEFINE_EVENT(/* event */
        hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write,
        TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
@@ -1229,6 +1260,13 @@ DEFINE_EVENT(/* event */
        TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
 );
 
+DEFINE_EVENT(/* event */
+       hfi1_tid_rdma_request_template, hfi1_tid_req_update_num_rd_atomic,
+       TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+                struct tid_rdma_request *req),
+       TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
 DECLARE_EVENT_CLASS(/* rc_rcv_err */
        hfi1_rc_rcv_err_template,
        TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),