if (qp->valid) {
                qp->comp.timeout = 1;
-               rxe_run_task(&qp->comp.task, 1);
+               rxe_sched_task(&qp->comp.task);
        }
 }
 
        if (must_sched != 0)
                rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
 
-       rxe_run_task(&qp->comp.task, must_sched);
+       if (must_sched)
+               rxe_sched_task(&qp->comp.task);
+       else
+               rxe_run_task(&qp->comp.task);
 }
 
 static inline enum comp_state get_wqe(struct rxe_qp *qp,
                                        qp->comp.psn = pkt->psn;
                                        if (qp->req.wait_psn) {
                                                qp->req.wait_psn = 0;
-                                               rxe_run_task(&qp->req.task, 0);
+                                               rxe_run_task(&qp->req.task);
                                        }
                                }
                                return COMPST_ERROR_RETRY;
         */
        if (qp->req.wait_fence) {
                qp->req.wait_fence = 0;
-               rxe_run_task(&qp->req.task, 0);
+               rxe_run_task(&qp->req.task);
        }
 }
 
                if (qp->req.need_rd_atomic) {
                        qp->comp.timeout_retry = 0;
                        qp->req.need_rd_atomic = 0;
-                       rxe_run_task(&qp->req.task, 0);
+                       rxe_run_task(&qp->req.task);
                }
        }
 
 
                if (qp->req.wait_psn) {
                        qp->req.wait_psn = 0;
-                       rxe_run_task(&qp->req.task, 1);
+                       rxe_sched_task(&qp->req.task);
                }
        }
 
 
                        if (qp->req.wait_psn) {
                                qp->req.wait_psn = 0;
-                               rxe_run_task(&qp->req.task, 1);
+                               rxe_sched_task(&qp->req.task);
                        }
 
                        state = COMPST_DONE;
                                                        RXE_CNT_COMP_RETRY);
                                        qp->req.need_retry = 1;
                                        qp->comp.started_retry = 1;
-                                       rxe_run_task(&qp->req.task, 0);
+                                       rxe_run_task(&qp->req.task);
                                }
                                goto done;
 
 
 
        if (unlikely(qp->need_req_skb &&
                     skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
-               rxe_run_task(&qp->req.task, 1);
+               rxe_sched_task(&qp->req.task);
 
        rxe_put(qp);
 }
        if ((qp_type(qp) != IB_QPT_RC) &&
            (pkt->mask & RXE_END_MASK)) {
                pkt->wqe->state = wqe_state_done;
-               rxe_run_task(&qp->comp.task, 1);
+               rxe_sched_task(&qp->comp.task);
        }
 
        rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
 
                if (qp->req.state != QP_STATE_DRAINED) {
                        qp->req.state = QP_STATE_DRAIN;
                        if (qp_type(qp) == IB_QPT_RC)
-                               rxe_run_task(&qp->comp.task, 1);
+                               rxe_sched_task(&qp->comp.task);
                        else
                                __rxe_do_task(&qp->comp.task);
-                       rxe_run_task(&qp->req.task, 1);
+                       rxe_sched_task(&qp->req.task);
                }
        }
 }
        qp->attr.qp_state = IB_QPS_ERR;
 
        /* drain work and packet queues */
-       rxe_run_task(&qp->resp.task, 1);
+       rxe_sched_task(&qp->resp.task);
 
        if (qp_type(qp) == IB_QPT_RC)
-               rxe_run_task(&qp->comp.task, 1);
+               rxe_sched_task(&qp->comp.task);
        else
                __rxe_do_task(&qp->comp.task);
-       rxe_run_task(&qp->req.task, 1);
+       rxe_sched_task(&qp->req.task);
 }
 
 /* called by the modify qp verb */
 
        /* request a send queue retry */
        qp->req.need_retry = 1;
        qp->req.wait_for_rnr_timer = 0;
-       rxe_run_task(&qp->req.task, 1);
+       rxe_sched_task(&qp->req.task);
 }
 
 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
         * which can lead to a deadlock. So go ahead and complete
         * it now.
         */
-       rxe_run_task(&qp->comp.task, 1);
+       rxe_sched_task(&qp->comp.task);
 
        return 0;
 }
                                                       qp->req.wqe_index);
                        wqe->state = wqe_state_done;
                        wqe->status = IB_WC_SUCCESS;
-                       rxe_run_task(&qp->comp.task, 0);
+                       rxe_run_task(&qp->comp.task);
                        goto done;
                }
                payload = mtu;
                rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
 
                if (err == -EAGAIN) {
-                       rxe_run_task(&qp->req.task, 1);
+                       rxe_sched_task(&qp->req.task);
                        goto exit;
                }
 
        qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
        wqe->state = wqe_state_error;
        qp->req.state = QP_STATE_ERROR;
-       rxe_run_task(&qp->comp.task, 0);
+       rxe_run_task(&qp->comp.task);
 exit:
        ret = -EAGAIN;
 out:
 
        must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
                        (skb_queue_len(&qp->req_pkts) > 1);
 
-       rxe_run_task(&qp->resp.task, must_sched);
+       if (must_sched)
+               rxe_sched_task(&qp->resp.task);
+       else
+               rxe_run_task(&qp->resp.task);
 }
 
 static inline enum resp_states get_req(struct rxe_qp *qp,
 
        tasklet_kill(&task->tasklet);
 }
 
-void rxe_run_task(struct rxe_task *task, int sched)
+void rxe_run_task(struct rxe_task *task)
 {
        if (task->destroyed)
                return;
 
-       if (sched)
-               tasklet_schedule(&task->tasklet);
-       else
-               rxe_do_task(&task->tasklet);
+       rxe_do_task(&task->tasklet);
+}
+
+void rxe_sched_task(struct rxe_task *task)
+{
+       if (task->destroyed)
+               return;
+
+       tasklet_schedule(&task->tasklet);
 }
 
 void rxe_disable_task(struct rxe_task *task)
 
  */
 void rxe_do_task(struct tasklet_struct *t);
 
-/* run a task, else schedule it to run as a tasklet, The decision
- * to run or schedule tasklet is based on the parameter sched.
- */
-void rxe_run_task(struct rxe_task *task, int sched);
+void rxe_run_task(struct rxe_task *task);
+
+void rxe_sched_task(struct rxe_task *task);
 
 /* keep a task from scheduling */
 void rxe_disable_task(struct rxe_task *task);
 
                wr = next;
        }
 
-       rxe_run_task(&qp->req.task, 1);
+       rxe_sched_task(&qp->req.task);
        if (unlikely(qp->req.state == QP_STATE_ERROR))
-               rxe_run_task(&qp->comp.task, 1);
+               rxe_sched_task(&qp->comp.task);
 
        return err;
 }
 
        if (qp->is_user) {
                /* Utilize process context to do protocol processing */
-               rxe_run_task(&qp->req.task, 0);
+               rxe_run_task(&qp->req.task);
                return 0;
        } else
                return rxe_post_send_kernel(qp, wr, bad_wr);
        spin_unlock_irqrestore(&rq->producer_lock, flags);
 
        if (qp->resp.state == QP_STATE_ERROR)
-               rxe_run_task(&qp->resp.task, 1);
+               rxe_sched_task(&qp->resp.task);
 
 err1:
        return err;