/*
  * nfs_page_group_lock - lock the head of the page group
  * @req - request in group that is to be locked
- * @nonblock - if true don't block waiting for lock
  *
- * this lock must be held if modifying the page group list
+ * this lock must be held when traversing or modifying the page
+ * group list
  *
- * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
- * result from wait_on_bit_lock
- *
- * NOTE: calling with nonblock=false should always have set the
- *       lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
- *       with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
+ * return 0 on success, < 0 on error
  */
 int
-nfs_page_group_lock(struct nfs_page *req, bool nonblock)
+nfs_page_group_lock(struct nfs_page *req)
 {
        struct nfs_page *head = req->wb_head;
 
        if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
                return 0;
 
-       if (!nonblock) {
-               set_bit(PG_CONTENDED1, &head->wb_flags);
-               smp_mb__after_atomic();
-               return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
+       set_bit(PG_CONTENDED1, &head->wb_flags);
+       smp_mb__after_atomic();
+       return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
                                TASK_UNINTERRUPTIBLE);
-       }
-
-       return -EAGAIN;
 }
 
 /*
 {
        bool ret;
 
-       nfs_page_group_lock(req, false);
+       nfs_page_group_lock(req);
        ret = nfs_page_group_sync_on_bit_locked(req, bit);
        nfs_page_group_unlock(req);
 
        unsigned int bytes_left = 0;
        unsigned int offset, pgbase;
 
-       nfs_page_group_lock(req, false);
+       nfs_page_group_lock(req);
 
        subreq = req;
        bytes_left = subreq->wb_bytes;
                        if (mirror->pg_recoalesce)
                                return 0;
                        /* retry add_request for this subreq */
-                       nfs_page_group_lock(req, false);
+                       nfs_page_group_lock(req);
                        continue;
                }
 
 
        for (midx = 0; midx < desc->pg_mirror_count; midx++) {
                if (midx) {
-                       nfs_page_group_lock(req, false);
+                       nfs_page_group_lock(req);
 
                        /* find the last request */
                        for (lastreq = req->wb_head;
 
        unsigned int pos = 0;
        unsigned int len = nfs_page_length(req->wb_page);
 
-       nfs_page_group_lock(req, false);
+       nfs_page_group_lock(req);
 
        do {
                tmp = nfs_page_group_search_locked(req->wb_head, pos);
        }
        spin_unlock(&inode->i_lock);
 
-       ret = nfs_page_group_lock(head, false);
+       ret = nfs_page_group_lock(head);
        if (ret < 0) {
                nfs_unlock_and_release_request(head);
                return ERR_PTR(ret);
                        nfs_page_group_unlock(head);
                        ret = nfs_wait_on_request(subreq);
                        if (!ret)
-                               ret = nfs_page_group_lock(head, false);
+                               ret = nfs_page_group_lock(head);
                        if (ret < 0) {
                                nfs_unroll_locks(inode, head, subreq);
                                nfs_release_request(subreq);
 
 extern  int nfs_wait_on_request(struct nfs_page *);
 extern void nfs_unlock_request(struct nfs_page *req);
 extern void nfs_unlock_and_release_request(struct nfs_page *);
-extern int nfs_page_group_lock(struct nfs_page *, bool);
+extern int nfs_page_group_lock(struct nfs_page *);
 extern void nfs_page_group_unlock(struct nfs_page *);
 extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
 extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);