md: factor out a helper from mddev_put()
authorYu Kuai <yukuai3@huawei.com>
Wed, 27 Sep 2023 06:12:40 +0000 (14:12 +0800)
committerSong Liu <song@kernel.org>
Wed, 27 Sep 2023 20:54:26 +0000 (13:54 -0700)
There are no functional changes, prepare to simplify md_seq_ops in next
patch.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20230927061241.1552837-2-yukuai1@huaweicloud.com
drivers/md/md.c

index 76e2cf609883c96f5d7168ca74724285b4caede7..aad91cd72a0600572338ad32e42013974f1abca4 100644 (file)
@@ -616,23 +616,28 @@ static inline struct mddev *mddev_get(struct mddev *mddev)
 
 static void mddev_delayed_delete(struct work_struct *ws);
 
+static void __mddev_put(struct mddev *mddev)
+{
+       if (mddev->raid_disks || !list_empty(&mddev->disks) ||
+           mddev->ctime || mddev->hold_active)
+               return;
+
+       /* Array is not configured at all, and not held active, so destroy it */
+       set_bit(MD_DELETED, &mddev->flags);
+
+       /*
+        * Call queue_work inside the spinlock so that flush_workqueue() after
+        * mddev_find will succeed in waiting for the work to be done.
+        */
+       queue_work(md_misc_wq, &mddev->del_work);
+}
+
 void mddev_put(struct mddev *mddev)
 {
        if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
                return;
-       if (!mddev->raid_disks && list_empty(&mddev->disks) &&
-           mddev->ctime == 0 && !mddev->hold_active) {
-               /* Array is not configured at all, and not held active,
-                * so destroy it */
-               set_bit(MD_DELETED, &mddev->flags);
 
-               /*
-                * Call queue_work inside the spinlock so that
-                * flush_workqueue() after mddev_find will succeed in waiting
-                * for the work to be done.
-                */
-               queue_work(md_misc_wq, &mddev->del_work);
-       }
+       __mddev_put(mddev);
        spin_unlock(&all_mddevs_lock);
 }