/* Remove entry from mbcache when EA inode is getting evicted */
 void ext4_evict_ea_inode(struct inode *inode)
 {
-       if (EA_INODE_CACHE(inode))
-               mb_cache_entry_delete(EA_INODE_CACHE(inode),
-                       ext4_xattr_inode_get_hash(inode), inode->i_ino);
+       struct mb_cache_entry *oe;
+
+       if (!EA_INODE_CACHE(inode))
+               return;
+       /* Wait for entry to get unused so that we can remove it */
+       while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode),
+                       ext4_xattr_inode_get_hash(inode), inode->i_ino))) {
+               mb_cache_entry_wait_unused(oe);
+               mb_cache_entry_put(EA_INODE_CACHE(inode), oe);
+       }
 }
 
 static int
        if (error)
                goto out;
 
+retry_ref:
        lock_buffer(bh);
        hash = le32_to_cpu(BHDR(bh)->h_hash);
        ref = le32_to_cpu(BHDR(bh)->h_refcount);
                 * This must happen under buffer lock for
                 * ext4_xattr_block_set() to reliably detect freed block
                 */
-               if (ea_block_cache)
-                       mb_cache_entry_delete(ea_block_cache, hash,
-                                             bh->b_blocknr);
+               if (ea_block_cache) {
+                       struct mb_cache_entry *oe;
+
+                       oe = mb_cache_entry_delete_or_get(ea_block_cache, hash,
+                                                         bh->b_blocknr);
+                       if (oe) {
+                               unlock_buffer(bh);
+                               mb_cache_entry_wait_unused(oe);
+                               mb_cache_entry_put(ea_block_cache, oe);
+                               goto retry_ref;
+                       }
+               }
                get_bh(bh);
                unlock_buffer(bh);
 
                         * ext4_xattr_block_set() to reliably detect modified
                         * block
                         */
-                       if (ea_block_cache)
-                               mb_cache_entry_delete(ea_block_cache, hash,
-                                                     bs->bh->b_blocknr);
+                       if (ea_block_cache) {
+                               struct mb_cache_entry *oe;
+
+                               oe = mb_cache_entry_delete_or_get(ea_block_cache,
+                                       hash, bs->bh->b_blocknr);
+                               if (oe) {
+                                       /*
+                                        * Xattr block is getting reused. Leave
+                                        * it alone.
+                                        */
+                                       mb_cache_entry_put(ea_block_cache, oe);
+                                       goto clone_block;
+                               }
+                       }
                        ea_bdebug(bs->bh, "modifying in-place");
                        error = ext4_xattr_set_entry(i, s, handle, inode,
                                                     true /* is_block */);
                                goto cleanup;
                        goto inserted;
                }
+clone_block:
                unlock_buffer(bs->bh);
                ea_bdebug(bs->bh, "cloning");
                s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS);
                                lock_buffer(new_bh);
                                /*
                                 * We have to be careful about races with
-                                * freeing, rehashing or adding references to
-                                * xattr block. Once we hold buffer lock xattr
-                                * block's state is stable so we can check
-                                * whether the block got freed / rehashed or
-                                * not.  Since we unhash mbcache entry under
-                                * buffer lock when freeing / rehashing xattr
-                                * block, checking whether entry is still
-                                * hashed is reliable. Same rules hold for
-                                * e_reusable handling.
+                                * adding references to xattr block. Once we
+                                * hold buffer lock xattr block's state is
+                                * stable so we can check the additional
+                                * reference fits.
                                 */
-                               if (hlist_bl_unhashed(&ce->e_hash_list) ||
-                                   !ce->e_reusable) {
+                               ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
+                               if (ref > EXT4_XATTR_REFCOUNT_MAX) {
                                        /*
                                         * Undo everything and check mbcache
                                         * again.
                                        new_bh = NULL;
                                        goto inserted;
                                }
-                               ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
                                BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
-                               if (ref >= EXT4_XATTR_REFCOUNT_MAX)
+                               if (ref == EXT4_XATTR_REFCOUNT_MAX)
                                        ce->e_reusable = 0;
                                ea_bdebug(new_bh, "reusing; refcount now=%d",
                                          ref);