enum nvme_tcp_queue_flags {
        NVME_TCP_Q_ALLOCATED    = 0,
        NVME_TCP_Q_LIVE         = 1,
+       NVME_TCP_Q_POLLING      = 2,
 };
 
 enum nvme_tcp_recv_state {
 
        read_lock_bh(&sk->sk_callback_lock);
        queue = sk->sk_user_data;
-       if (likely(queue && queue->rd_enabled))
+       if (likely(queue && queue->rd_enabled) &&
+           !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
                queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
        read_unlock_bh(&sk->sk_callback_lock);
 }
        if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
                return 0;
 
+       set_bit(NVME_TCP_Q_POLLING, &queue->flags);
        if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
                sk_busy_loop(sk, true);
        nvme_tcp_try_recv(queue);
+       clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
        return queue->nr_cqe;
 }