xfs: clear log incompat feature bits when the log is idle
authorDarrick J. Wong <djwong@kernel.org>
Sun, 8 Aug 2021 15:27:12 +0000 (08:27 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Mon, 9 Aug 2021 22:57:59 +0000 (15:57 -0700)
When there are no ongoing transactions and the log contents have been
checkpointed back into the filesystem, the log performs 'covering',
which is to say that it log a dummy transaction to record the fact that
the tail has caught up with the head.  This is a good time to clear log
incompat feature flags, because they are flags that are temporarily set
to limit the range of kernels that can replay a dirty log.

Since it's possible that some other higher level thread is about to
start logging items protected by a log incompat flag, we create a rwsem
so that upper level threads can coordinate this with the log.  It would
probably be more performant to use a percpu rwsem, but the ability to
/try/ taking the write lock during covering is critical, and percpu
rwsems do not provide that.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com>
fs/xfs/xfs_log.c
fs/xfs/xfs_log.h
fs/xfs/xfs_log_priv.h

index e0d4ffce7a8a377503b422d6a601a07aeb48d190..cc2a0ccfcc30e4b03b201d9ed72219847402163e 100644 (file)
@@ -1362,6 +1362,32 @@ xfs_log_work_queue(
                                msecs_to_jiffies(xfs_syncd_centisecs * 10));
 }
 
+/*
+ * Clear the log incompat flags if we have the opportunity.
+ *
+ * This only happens if we're about to log the second dummy transaction as part
+ * of covering the log and we can get the log incompat feature usage lock.
+ */
+static inline void
+xlog_clear_incompat(
+       struct xlog             *log)
+{
+       struct xfs_mount        *mp = log->l_mp;
+
+       if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
+                               XFS_SB_FEAT_INCOMPAT_LOG_ALL))
+               return;
+
+       if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
+               return;
+
+       if (!down_write_trylock(&log->l_incompat_users))
+               return;
+
+       xfs_clear_incompat_log_features(mp);
+       up_write(&log->l_incompat_users);
+}
+
 /*
  * Every sync period we need to unpin all items in the AIL and push them to
  * disk. If there is nothing dirty, then we might need to cover the log to
@@ -1388,6 +1414,7 @@ xfs_log_worker(
                 * synchronously log the superblock instead to ensure the
                 * superblock is immediately unpinned and can be written back.
                 */
+               xlog_clear_incompat(log);
                xfs_sync_sb(mp, true);
        } else
                xfs_log_force(mp, 0);
@@ -1475,6 +1502,8 @@ xlog_alloc_log(
        }
        log->l_sectBBsize = 1 << log2_size;
 
+       init_rwsem(&log->l_incompat_users);
+
        xlog_get_iclog_buffer_size(mp, log);
 
        spin_lock_init(&log->l_icloglock);
@@ -3973,3 +4002,23 @@ xfs_log_in_recovery(
 
        return log->l_flags & XLOG_ACTIVE_RECOVERY;
 }
+
+/*
+ * Notify the log that we're about to start using a feature that is protected
+ * by a log incompat feature flag.  This will prevent log covering from
+ * clearing those flags.
+ */
+void
+xlog_use_incompat_feat(
+       struct xlog             *log)
+{
+       down_read(&log->l_incompat_users);
+}
+
+/* Notify the log that we've finished using log incompat features. */
+void
+xlog_drop_incompat_feat(
+       struct xlog             *log)
+{
+       up_read(&log->l_incompat_users);
+}
index 813b972e97882914722d52f0671513c0d70e5deb..b274fb9dcd8d6b95ad7e4f8439b094efed815740 100644 (file)
@@ -142,4 +142,7 @@ bool        xfs_log_in_recovery(struct xfs_mount *);
 
 xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
 
+void xlog_use_incompat_feat(struct xlog *log);
+void xlog_drop_incompat_feat(struct xlog *log);
+
 #endif /* __XFS_LOG_H__ */
index f3e79a45d60a6a9423d15abb83d9078ac45822ef..6953f86f866cdd1d8e1a38ef9a8e7d80047c9617 100644 (file)
@@ -456,6 +456,9 @@ struct xlog {
        xfs_lsn_t               l_recovery_lsn;
 
        uint32_t                l_iclog_roundoff;/* padding roundoff */
+
+       /* Users of log incompat features should take a read lock. */
+       struct rw_semaphore     l_incompat_users;
 };
 
 #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \