The original purpose of this expensive call is to prevent a long
queue of requests from blocking other work.
The cond_resched() call is unnecessary after just a single send
operation.
For longer queues, instead of invoking the kernel scheduler, simply
release the transport send lock and return to the RPC scheduler.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
 {
        struct rpc_rqst *next, *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
-       int status;
+       int counter, status;
 
        spin_lock(&xprt->queue_lock);
+       counter = 0;
        while (!list_empty(&xprt->xmit_queue)) {
+               if (++counter == 20)
+                       break;
                next = list_first_entry(&xprt->xmit_queue,
                                struct rpc_rqst, rq_xmit);
                xprt_pin_rqst(next);
                status = xprt_request_transmit(next, task);
                if (status == -EBADMSG && next != req)
                        status = 0;
-               cond_resched();
                spin_lock(&xprt->queue_lock);
                xprt_unpin_rqst(next);
                if (status == 0) {