xfs: introduce all-mounts list for cpu hotplug notifications
authorDave Chinner <dchinner@redhat.com>
Fri, 6 Aug 2021 18:05:38 +0000 (11:05 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Fri, 6 Aug 2021 18:05:38 +0000 (11:05 -0700)
The inode inactivation and CIL tracking percpu structures are
per-xfs_mount structures. That means when we get a CPU dead
notification, we need to then iterate all the per-cpu structure
instances to process them. Rather than keeping linked lists of
per-cpu structures in each subsystem, add a list of all xfs_mounts
that the generic xfs_cpu_dead() function will iterate and call into
each subsystem appropriately.

This allows us to handle both per-mount and global XFS percpu state
from xfs_cpu_dead(), and avoids the need to link subsystem
structures that can be easily found from the xfs_mount into their
own global lists.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
[djwong: expand some comments about mount list setup ordering rules]
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
fs/xfs/xfs_mount.h
fs/xfs/xfs_super.c

index c78b63fe779aaeb417c6c4949a2694f1fe4034d9..ed7064596f94a2a6b1cafc3bbb5c969b8a6f7ddb 100644 (file)
@@ -82,6 +82,7 @@ typedef struct xfs_mount {
        xfs_buftarg_t           *m_ddev_targp;  /* saves taking the address */
        xfs_buftarg_t           *m_logdev_targp;/* ptr to log device */
        xfs_buftarg_t           *m_rtdev_targp; /* ptr to rt device */
+       struct list_head        m_mount_list;   /* global mount list */
        /*
         * Optional cache of rt summary level per bitmap block with the
         * invariant that m_rsum_cache[bbno] <= the minimum i for which
index d47fac7c8afd749da2279b58846585d710527528..c2c9c02b9d6242313a0f27103a6ec502d783c67b 100644 (file)
@@ -49,6 +49,28 @@ static struct kset *xfs_kset;                /* top-level xfs sysfs dir */
 static struct xfs_kobj xfs_dbg_kobj;   /* global debug sysfs attrs */
 #endif
 
+#ifdef CONFIG_HOTPLUG_CPU
+static LIST_HEAD(xfs_mount_list);
+static DEFINE_SPINLOCK(xfs_mount_list_lock);
+
+static inline void xfs_mount_list_add(struct xfs_mount *mp)
+{
+       spin_lock(&xfs_mount_list_lock);
+       list_add(&mp->m_mount_list, &xfs_mount_list);
+       spin_unlock(&xfs_mount_list_lock);
+}
+
+static inline void xfs_mount_list_del(struct xfs_mount *mp)
+{
+       spin_lock(&xfs_mount_list_lock);
+       list_del(&mp->m_mount_list);
+       spin_unlock(&xfs_mount_list_lock);
+}
+#else /* !CONFIG_HOTPLUG_CPU */
+static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
+static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
+#endif
+
 enum xfs_dax_mode {
        XFS_DAX_INODE = 0,
        XFS_DAX_ALWAYS = 1,
@@ -1038,6 +1060,7 @@ xfs_fs_put_super(
 
        xfs_freesb(mp);
        free_percpu(mp->m_stats.xs_stats);
+       xfs_mount_list_del(mp);
        xfs_destroy_percpu_counters(mp);
        xfs_destroy_mount_workqueues(mp);
        xfs_close_devices(mp);
@@ -1409,6 +1432,13 @@ xfs_fs_fill_super(
        if (error)
                goto out_destroy_workqueues;
 
+       /*
+        * All percpu data structures requiring cleanup when a cpu goes offline
+        * must be allocated before adding this @mp to the cpu-dead handler's
+        * mount list.
+        */
+       xfs_mount_list_add(mp);
+
        /* Allocate stats memory before we do operations that might use it */
        mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
        if (!mp->m_stats.xs_stats) {
@@ -1617,6 +1647,7 @@ xfs_fs_fill_super(
  out_free_stats:
        free_percpu(mp->m_stats.xs_stats);
  out_destroy_counters:
+       xfs_mount_list_del(mp);
        xfs_destroy_percpu_counters(mp);
  out_destroy_workqueues:
        xfs_destroy_mount_workqueues(mp);
@@ -2116,6 +2147,15 @@ static int
 xfs_cpu_dead(
        unsigned int            cpu)
 {
+       struct xfs_mount        *mp, *n;
+
+       spin_lock(&xfs_mount_list_lock);
+       list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
+               spin_unlock(&xfs_mount_list_lock);
+               /* xfs_subsys_dead(mp, cpu); */
+               spin_lock(&xfs_mount_list_lock);
+       }
+       spin_unlock(&xfs_mount_list_lock);
        return 0;
 }