if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
                goto retry_alloc;
 out_ret:
-       ret = block_page_mkwrite_return(err);
+       ret = vmf_fs_error(err);
 out:
        filemap_invalidate_unlock_shared(mapping);
        sb_end_pagefault(inode->i_sb);
 
 
        sb_end_pagefault(inode->i_sb);
 err:
-       return block_page_mkwrite_return(err);
+       return vmf_fs_error(err);
 }
 
 static const struct vm_operations_struct f2fs_file_vm_ops = {
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
        err = gfs2_glock_nq(&gh);
        if (err) {
-               ret = block_page_mkwrite_return(err);
+               ret = vmf_fs_error(err);
                goto out_uninit;
        }
 
 
        err = gfs2_rindex_update(sdp);
        if (err) {
-               ret = block_page_mkwrite_return(err);
+               ret = vmf_fs_error(err);
                goto out_unlock;
        }
 
        ap.target = data_blocks + ind_blocks;
        err = gfs2_quota_lock_check(ip, &ap);
        if (err) {
-               ret = block_page_mkwrite_return(err);
+               ret = vmf_fs_error(err);
                goto out_unlock;
        }
        err = gfs2_inplace_reserve(ip, &ap);
        if (err) {
-               ret = block_page_mkwrite_return(err);
+               ret = vmf_fs_error(err);
                goto out_quota_unlock;
        }
 
        }
        err = gfs2_trans_begin(sdp, rblocks, 0);
        if (err) {
-               ret = block_page_mkwrite_return(err);
+               ret = vmf_fs_error(err);
                goto out_trans_fail;
        }
 
        if (gfs2_is_stuffed(ip)) {
                err = gfs2_unstuff_dinode(ip);
                if (err) {
-                       ret = block_page_mkwrite_return(err);
+                       ret = vmf_fs_error(err);
                        goto out_trans_end;
                }
        }
 
        err = gfs2_allocate_page_backing(page, length);
        if (err)
-               ret = block_page_mkwrite_return(err);
+               ret = vmf_fs_error(err);
 
 out_page_locked:
        if (ret != VM_FAULT_LOCKED)
        gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
        err = gfs2_glock_nq(&gh);
        if (err) {
-               ret = block_page_mkwrite_return(err);
+               ret = vmf_fs_error(err);
                goto out_uninit;
        }
        ret = filemap_fault(vmf);
 
        return VM_FAULT_LOCKED;
 out_unlock:
        folio_unlock(folio);
-       return block_page_mkwrite_return(ret);
+       return vmf_fs_error(ret);
 }
 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
 
 
        wait_for_stable_page(page);
  out:
        sb_end_pagefault(inode->i_sb);
-       return block_page_mkwrite_return(ret);
+       return vmf_fs_error(ret);
 }
 
 static const struct vm_operations_struct nilfs_file_vm_ops = {
 
                err = block_commit_write(page, 0, end);
        if (err < 0) {
                unlock_page(page);
-               ret = block_page_mkwrite_return(err);
+               ret = vmf_fs_error(err);
                goto out_unlock;
        }
 out_dirty:
 
 int block_commit_write(struct page *page, unsigned from, unsigned to);
 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                                get_block_t get_block);
-/* Convert errno to return value from ->page_mkwrite() call */
-static inline vm_fault_t block_page_mkwrite_return(int err)
-{
-       if (err == 0)
-               return VM_FAULT_LOCKED;
-       if (err == -EFAULT || err == -EAGAIN)
-               return VM_FAULT_NOPAGE;
-       if (err == -ENOMEM)
-               return VM_FAULT_OOM;
-       /* -ENOSPC, -EDQUOT, -EIO ... */
-       return VM_FAULT_SIGBUS;
-}
 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
 
 
        return VM_FAULT_SIGBUS;
 }
 
+/*
+ * Convert errno to return value for ->page_mkwrite() calls.
+ *
+ * This should eventually be merged with vmf_error() above, but will need a
+ * careful audit of all vmf_error() callers.
+ */
+static inline vm_fault_t vmf_fs_error(int err)
+{
+       if (err == 0)
+               return VM_FAULT_LOCKED;
+       if (err == -EFAULT || err == -EAGAIN)
+               return VM_FAULT_NOPAGE;
+       if (err == -ENOMEM)
+               return VM_FAULT_OOM;
+       /* -ENOSPC, -EDQUOT, -EIO ... */
+       return VM_FAULT_SIGBUS;
+}
+
 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                         unsigned int foll_flags);