static struct list_head cache_defer_hash[DFR_HASHSIZE];
 static int cache_defer_cnt;
 
+struct thread_deferred_req {
+       struct cache_deferred_req handle;
+       struct completion completion;
+};
+static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
+{
+       struct thread_deferred_req *dr =
+               container_of(dreq, struct thread_deferred_req, handle);
+       complete(&dr->completion);
+}
+
 static int cache_defer_req(struct cache_req *req, struct cache_head *item)
 {
        struct cache_deferred_req *dreq, *discard;
        int hash = DFR_HASH(item);
+       struct thread_deferred_req sleeper;
 
        if (cache_defer_cnt >= DFR_MAX) {
                /* too much in the cache, randomly drop this one,
                if (net_random()&1)
                        return -ENOMEM;
        }
-       dreq = req->defer(req);
+       if (req->thread_wait) {
+               dreq = &sleeper.handle;
+               sleeper.completion =
+                       COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
+               dreq->revisit = cache_restart_thread;
+       } else
+               dreq = req->defer(req);
+
+ retry:
        if (dreq == NULL)
                return -ENOMEM;
 
                cache_revisit_request(item);
                return -EAGAIN;
        }
+
+       if (dreq == &sleeper.handle) {
+               if (wait_for_completion_interruptible_timeout(
+                           &sleeper.completion, req->thread_wait) <= 0) {
+                       /* The completion wasn't completed, so we need
+                        * to clean up
+                        */
+                       spin_lock(&cache_defer_lock);
+                       if (!list_empty(&sleeper.handle.hash)) {
+                               list_del_init(&sleeper.handle.recent);
+                               list_del_init(&sleeper.handle.hash);
+                               cache_defer_cnt--;
+                               spin_unlock(&cache_defer_lock);
+                       } else {
+                               /* cache_revisit_request already removed
+                                * this from the hash table, but hasn't
+                                * called ->revisit yet.  It will very soon
+                                * and we need to wait for it.
+                                */
+                               spin_unlock(&cache_defer_lock);
+                               wait_for_completion(&sleeper.completion);
+                       }
+               }
+               if (test_bit(CACHE_PENDING, &item->flags)) {
+                       /* item is still pending, try request
+                        * deferral
+                        */
+                       dreq = req->defer(req);
+                       goto retry;
+               }
+               /* only return success if we actually deferred the
+                * request.  In this case we waited until it was
+                * answered so no deferral has happened - rather
+                * an answer already exists.
+                */
+               return -EEXIST;
+       }
        return 0;
 }
 
 
        if (signalled() || kthread_should_stop())
                return -EINTR;
 
+       /* Normally we will wait up to 5 seconds for any required
+        * cache information to be provided.
+        */
+       rqstp->rq_chandle.thread_wait = 5*HZ;
+
        spin_lock_bh(&pool->sp_lock);
        xprt = svc_xprt_dequeue(pool);
        if (xprt) {
                svc_xprt_get(xprt);
                rqstp->rq_reserved = serv->sv_max_mesg;
                atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+
+               /* As there is a shortage of threads and this request
+                * had to be queue, don't allow the thread to wait so
+                * long for cache updates.
+                */
+               rqstp->rq_chandle.thread_wait = 1*HZ;
        } else {
                /* No data pending. Go to sleep */
                svc_thread_enqueue(pool, rqstp);