*/
 void
 xfs_trans_log_inode(
-       xfs_trans_t     *tp,
-       xfs_inode_t     *ip,
-       uint            flags)
+       struct xfs_trans        *tp,
+       struct xfs_inode        *ip,
+       uint                    flags)
 {
-       struct inode    *inode = VFS_I(ip);
+       struct xfs_inode_log_item *iip = ip->i_itemp;
+       struct inode            *inode = VFS_I(ip);
+       uint                    iversion_flags = 0;
 
-       ASSERT(ip->i_itemp != NULL);
+       ASSERT(iip);
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
 
+       tp->t_flags |= XFS_TRANS_DIRTY;
+
        /*
         * Don't bother with i_lock for the I_DIRTY_TIME check here, as races
         * don't matter - we either will need an extra transaction in 24 hours
                spin_unlock(&inode->i_lock);
        }
 
-       /*
-        * Record the specific change for fdatasync optimisation. This
-        * allows fdatasync to skip log forces for inodes that are only
-        * timestamp dirty. We do this before the change count so that
-        * the core being logged in this case does not impact on fdatasync
-        * behaviour.
-        */
-       ip->i_itemp->ili_fsync_fields |= flags;
-
        /*
         * First time we log the inode in a transaction, bump the inode change
         * counter if it is configured for this to occur. While we have the
         * set however, then go ahead and bump the i_version counter
         * unconditionally.
         */
-       if (!test_and_set_bit(XFS_LI_DIRTY, &ip->i_itemp->ili_item.li_flags) &&
-           IS_I_VERSION(VFS_I(ip))) {
-               if (inode_maybe_inc_iversion(VFS_I(ip), flags & XFS_ILOG_CORE))
-                       flags |= XFS_ILOG_CORE;
+       if (!test_and_set_bit(XFS_LI_DIRTY, &iip->ili_item.li_flags)) {
+               if (IS_I_VERSION(inode) &&
+                   inode_maybe_inc_iversion(inode, flags & XFS_ILOG_CORE))
+                       iversion_flags = XFS_ILOG_CORE;
        }
 
-       tp->t_flags |= XFS_TRANS_DIRTY;
+       /*
+        * Record the specific change for fdatasync optimisation. This allows
+        * fdatasync to skip log forces for inodes that are only timestamp
+        * dirty.
+        */
+       spin_lock(&iip->ili_lock);
+       iip->ili_fsync_fields |= flags;
 
        /*
-        * Always OR in the bits from the ili_last_fields field.
-        * This is to coordinate with the xfs_iflush() and xfs_iflush_done()
-        * routines in the eventual clearing of the ili_fields bits.
-        * See the big comment in xfs_iflush() for an explanation of
-        * this coordination mechanism.
+        * Always OR in the bits from the ili_last_fields field.  This is to
+        * coordinate with the xfs_iflush() and xfs_iflush_done() routines in
+        * the eventual clearing of the ili_fields bits.  See the big comment in
+        * xfs_iflush() for an explanation of this coordination mechanism.
         */
-       flags |= ip->i_itemp->ili_last_fields;
-       ip->i_itemp->ili_fields |= flags;
+       iip->ili_fields |= (flags | iip->ili_last_fields | iversion_flags);
+       spin_unlock(&iip->ili_lock);
 }
 
 int
 
 {
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_inode_log_item *iip = ip->i_itemp;
        struct xfs_mount        *mp = ip->i_mount;
        int                     error = 0;
        int                     log_flushed = 0;
        xfs_ilock(ip, XFS_ILOCK_SHARED);
        if (xfs_ipincount(ip)) {
                if (!datasync ||
-                   (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
-                       lsn = ip->i_itemp->ili_last_lsn;
+                   (iip->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
+                       lsn = iip->ili_last_lsn;
        }
 
        if (lsn) {
                error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
-               ip->i_itemp->ili_fsync_fields = 0;
+               spin_lock(&iip->ili_lock);
+               iip->ili_fsync_fields = 0;
+               spin_unlock(&iip->ili_lock);
        }
        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
 
                                continue;
 
                        iip = ip->i_itemp;
+                       spin_lock(&iip->ili_lock);
                        iip->ili_last_fields = iip->ili_fields;
                        iip->ili_fields = 0;
                        iip->ili_fsync_fields = 0;
+                       spin_unlock(&iip->ili_lock);
                        xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
                                                &iip->ili_item.li_lsn);
 
 {
        int                     error;
        struct xfs_icluster     xic = { 0 };
+       struct xfs_inode_log_item *iip = ip->i_itemp;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(VFS_I(ip)->i_nlink == 0);
        ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
 
        /* Don't attempt to replay owner changes for a deleted inode */
-       ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
+       spin_lock(&iip->ili_lock);
+       iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
+       spin_unlock(&iip->ili_lock);
 
        /*
         * Bump the generation count so no one will be confused
         * know that the information those bits represent is permanently on
         * disk.  As long as the flush completes before the inode is logged
         * again, then both ili_fields and ili_last_fields will be cleared.
-        *
-        * We can play with the ili_fields bits here, because the inode lock
-        * must be held exclusively in order to set bits there and the flush
-        * lock protects the ili_last_fields bits.  Store the current LSN of the
-        * inode so that we can tell whether the item has moved in the AIL from
-        * xfs_iflush_done().  In order to read the lsn we need the AIL lock,
-        * because it is a 64 bit value that cannot be read atomically.
         */
        error = 0;
 flush_out:
+       spin_lock(&iip->ili_lock);
        iip->ili_last_fields = iip->ili_fields;
        iip->ili_fields = 0;
        iip->ili_fsync_fields = 0;
+       spin_unlock(&iip->ili_lock);
 
+       /*
+        * Store the current LSN of the inode so that we can tell whether the
+        * item has moved in the AIL from xfs_iflush_done().
+        */
        xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
                                &iip->ili_item.li_lsn);
 
 
        iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, 0);
 
        iip->ili_inode = ip;
+       spin_lock_init(&iip->ili_lock);
        xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
                                                &xfs_inode_item_ops);
 }
        list_for_each_entry_safe(blip, n, &tmp, li_bio_list) {
                list_del_init(&blip->li_bio_list);
                iip = INODE_ITEM(blip);
+
+               spin_lock(&iip->ili_lock);
                iip->ili_last_fields = 0;
+               spin_unlock(&iip->ili_lock);
+
                xfs_ifunlock(iip->ili_inode);
        }
        list_del(&tmp);
                 * Clear the inode logging fields so no more flushes are
                 * attempted.
                 */
+               spin_lock(&iip->ili_lock);
                iip->ili_last_fields = 0;
                iip->ili_fields = 0;
                iip->ili_fsync_fields = 0;
+               spin_unlock(&iip->ili_lock);
        }
        /*
         * Release the inode's flush lock since we're done with it.
 
 struct xfs_inode_log_item {
        struct xfs_log_item     ili_item;          /* common portion */
        struct xfs_inode        *ili_inode;        /* inode ptr */
-       xfs_lsn_t               ili_flush_lsn;     /* lsn at last flush */
-       xfs_lsn_t               ili_last_lsn;      /* lsn at last transaction */
-       unsigned short          ili_lock_flags;    /* lock flags */
+       unsigned short          ili_lock_flags;    /* inode lock flags */
+       /*
+        * The ili_lock protects the interactions between the dirty state and
+        * the flush state of the inode log item. This allows us to do atomic
+        * modifications of multiple state fields without having to hold a
+        * specific inode lock to serialise them.
+        *
+        * We need atomic changes between inode dirtying, inode flushing and
+        * inode completion, but these all hold different combinations of
+        * ILOCK and iflock and hence we need some other method of serialising
+        * updates to the flush state.
+        */
+       spinlock_t              ili_lock;          /* flush state lock */
        unsigned int            ili_last_fields;   /* fields when flushed */
        unsigned int            ili_fields;        /* fields to be logged */
        unsigned int            ili_fsync_fields;  /* logged since last fsync */
+       xfs_lsn_t               ili_flush_lsn;     /* lsn at last flush */
+       xfs_lsn_t               ili_last_lsn;      /* lsn at last transaction */
 };
 
 static inline int xfs_inode_clean(xfs_inode_t *ip)