struct svc_rdma_send_ctxt *sctxt,
                                    struct svc_rdma_recv_ctxt *rctxt,
                                    int status);
+extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail);
 extern int svc_rdma_sendto(struct svc_rqst *);
 extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
                                   unsigned int length);
 
 
        trace_svcrdma_wc_write(wc, &cc->cc_cid);
 
-       atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
-       wake_up(&rdma->sc_send_wait);
+       svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
 
        if (unlikely(wc->status != IB_WC_SUCCESS))
                svc_xprt_deferred_close(&rdma->sc_xprt);
 
        trace_svcrdma_wc_read(wc, &cc->cc_cid);
 
-       atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
-       wake_up(&rdma->sc_send_wait);
-
+       svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
        cc->cc_status = wc->status;
        complete(&cc->cc_done);
        return;
 
        spin_unlock(&rdma->sc_send_lock);
 }
 
+/**
+ * svc_rdma_wake_send_waiters - manage Send Queue accounting
+ * @rdma: controlling transport
+ * @avail: Number of additional SQEs that are now available
+ *
+ */
+void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
+{
+       atomic_add(avail, &rdma->sc_sq_avail);
+       smp_mb__after_atomic();
+       if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
+               wake_up(&rdma->sc_send_wait);
+}
+
 /**
  * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
  * @cq: Completion Queue context
 
        trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
 
+       svc_rdma_wake_send_waiters(rdma, 1);
        complete(&ctxt->sc_done);
 
-       atomic_inc(&rdma->sc_sq_avail);
-       wake_up(&rdma->sc_send_wait);
-
        if (unlikely(wc->status != IB_WC_SUCCESS))
                svc_xprt_deferred_close(&rdma->sc_xprt);
 }