"sb_internal",
 };
 
-static inline void super_lock(struct super_block *sb, bool excl)
+static inline void __super_lock(struct super_block *sb, bool excl)
 {
        if (excl)
                down_write(&sb->s_umount);
                up_read(&sb->s_umount);
 }
 
-static inline void super_lock_excl(struct super_block *sb)
+static inline void __super_lock_excl(struct super_block *sb)
 {
-       super_lock(sb, true);
-}
-
-static inline void super_lock_shared(struct super_block *sb)
-{
-       super_lock(sb, false);
+       __super_lock(sb, true);
 }
 
 static inline void super_unlock_excl(struct super_block *sb)
        super_unlock(sb, false);
 }
 
+static inline bool wait_born(struct super_block *sb)
+{
+       unsigned int flags;
+
+       /*
+        * Pairs with smp_store_release() in super_wake() and ensures
+        * that we see SB_BORN or SB_DYING after we're woken.
+        */
+       flags = smp_load_acquire(&sb->s_flags);
+       return flags & (SB_BORN | SB_DYING);
+}
+
+/**
+ * super_lock - wait for superblock to become ready and lock it
+ * @sb: superblock to wait for
+ * @excl: whether exclusive access is required
+ *
+ * If the superblock has neither passed through vfs_get_tree() or
+ * generic_shutdown_super() yet wait for it to happen. Either superblock
+ * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
+ * woken and we'll see SB_DYING.
+ *
+ * The caller must have acquired a temporary reference on @sb->s_count.
+ *
+ * Return: This returns true if SB_BORN was set, false if SB_DYING was
+ *         set. The function acquires s_umount and returns with it held.
+ */
+static __must_check bool super_lock(struct super_block *sb, bool excl)
+{
+
+       lockdep_assert_not_held(&sb->s_umount);
+
+relock:
+       __super_lock(sb, excl);
+
+       /*
+        * Has gone through generic_shutdown_super() in the meantime.
+        * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
+        * grab a reference to this. Tell them so.
+        */
+       if (sb->s_flags & SB_DYING)
+               return false;
+
+       /* Has called ->get_tree() successfully. */
+       if (sb->s_flags & SB_BORN)
+               return true;
+
+       super_unlock(sb, excl);
+
+       /* wait until the superblock is ready or dying */
+       wait_var_event(&sb->s_flags, wait_born(sb));
+
+       /*
+        * Neither SB_BORN nor SB_DYING are ever unset so we never loop.
+        * Just reacquire @sb->s_umount for the caller.
+        */
+       goto relock;
+}
+
+/* wait and acquire read-side of @sb->s_umount */
+static inline bool super_lock_shared(struct super_block *sb)
+{
+       return super_lock(sb, false);
+}
+
+/* wait and acquire write-side of @sb->s_umount */
+static inline bool super_lock_excl(struct super_block *sb)
+{
+       return super_lock(sb, true);
+}
+
+/* wake waiters */
+#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING)
+static void super_wake(struct super_block *sb, unsigned int flag)
+{
+       WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
+       WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
+
+       /*
+        * Pairs with smp_load_acquire() in super_lock() to make sure
+        * all initializations in the superblock are seen by the user
+        * seeing SB_BORN sent.
+        */
+       smp_store_release(&sb->s_flags, sb->s_flags | flag);
+       /*
+        * Pairs with the barrier in prepare_to_wait_event() to make sure
+        * ___wait_var_event() either sees SB_BORN set or
+        * waitqueue_active() check in wake_up_var() sees the waiter.
+        */
+       smp_mb();
+       wake_up_var(&sb->s_flags);
+}
+
 /*
  * One thing we have to be careful of with a per-sb shrinker is that we don't
  * drop the last active reference to the superblock from within the shrinker.
 void deactivate_super(struct super_block *s)
 {
        if (!atomic_add_unless(&s->s_active, -1, 1)) {
-               super_lock_excl(s);
+               __super_lock_excl(s);
                deactivate_locked_super(s);
        }
 }
  */
 static int grab_super(struct super_block *s) __releases(sb_lock)
 {
+       bool born;
+
        s->s_count++;
        spin_unlock(&sb_lock);
-       super_lock_excl(s);
-       if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
+       born = super_lock_excl(s);
+       if (born && atomic_inc_not_zero(&s->s_active)) {
                put_super(s);
                return 1;
        }
 bool super_trylock_shared(struct super_block *sb)
 {
        if (down_read_trylock(&sb->s_umount)) {
-               if (!hlist_unhashed(&sb->s_instances) &&
-                   sb->s_root && (sb->s_flags & SB_BORN))
+               if (!(sb->s_flags & SB_DYING) && sb->s_root &&
+                   (sb->s_flags & SB_BORN))
                        return true;
                super_unlock_shared(sb);
        }
 void retire_super(struct super_block *sb)
 {
        WARN_ON(!sb->s_bdev);
-       super_lock_excl(sb);
+       __super_lock_excl(sb);
        if (sb->s_iflags & SB_I_PERSB_BDI) {
                bdi_unregister(sb->s_bdi);
                sb->s_iflags &= ~SB_I_PERSB_BDI;
        /* should be initialized for __put_super_and_need_restart() */
        hlist_del_init(&sb->s_instances);
        spin_unlock(&sb_lock);
+       /*
+        * Broadcast to everyone that grabbed a temporary reference to this
+        * superblock before we removed it from @fs_supers that the superblock
+        * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
+        * discard this superblock and treat it as dead.
+        */
+       super_wake(sb, SB_DYING);
        super_unlock_excl(sb);
        if (sb->s_bdi != &noop_backing_dev_info) {
                if (sb->s_iflags & SB_I_PERSB_BDI)
        s->s_type = fc->fs_type;
        s->s_iflags |= fc->s_iflags;
        strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
+       /*
+        * Make the superblock visible on @super_blocks and @fs_supers.
+        * It's in a nascent state and users should wait on SB_BORN or
+        * SB_DYING to be set.
+        */
        list_add_tail(&s->s_list, &super_blocks);
        hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
        spin_unlock(&sb_lock);
 
        spin_lock(&sb_lock);
        list_for_each_entry(sb, &super_blocks, s_list) {
-               if (hlist_unhashed(&sb->s_instances))
+               /* Pairs with memory marrier in super_wake(). */
+               if (smp_load_acquire(&sb->s_flags) & SB_DYING)
                        continue;
                sb->s_count++;
                spin_unlock(&sb_lock);
 
        spin_lock(&sb_lock);
        list_for_each_entry(sb, &super_blocks, s_list) {
-               if (hlist_unhashed(&sb->s_instances))
-                       continue;
+               bool born;
+
                sb->s_count++;
                spin_unlock(&sb_lock);
 
-               super_lock_shared(sb);
-               if (sb->s_root && (sb->s_flags & SB_BORN))
+               born = super_lock_shared(sb);
+               if (born && sb->s_root)
                        f(sb, arg);
                super_unlock_shared(sb);
 
 
        spin_lock(&sb_lock);
        hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
+               bool born;
+
                sb->s_count++;
                spin_unlock(&sb_lock);
 
-               super_lock_shared(sb);
-               if (sb->s_root && (sb->s_flags & SB_BORN))
+               born = super_lock_shared(sb);
+               if (born && sb->s_root)
                        f(sb, arg);
                super_unlock_shared(sb);
 
        if (!bdev)
                return NULL;
 
-restart:
        spin_lock(&sb_lock);
        list_for_each_entry(sb, &super_blocks, s_list) {
-               if (hlist_unhashed(&sb->s_instances))
-                       continue;
                if (sb->s_bdev == bdev) {
                        if (!grab_super(sb))
-                               goto restart;
+                               return NULL;
                        super_unlock_excl(sb);
                        return sb;
                }
        struct super_block *sb;
 
        spin_lock(&sb_lock);
-rescan:
        list_for_each_entry(sb, &super_blocks, s_list) {
-               if (hlist_unhashed(&sb->s_instances))
-                       continue;
                if (sb->s_dev ==  dev) {
+                       bool born;
+
                        sb->s_count++;
                        spin_unlock(&sb_lock);
-                       super_lock(sb, excl);
                        /* still alive? */
-                       if (sb->s_root && (sb->s_flags & SB_BORN))
+                       born = super_lock(sb, excl);
+                       if (born && sb->s_root)
                                return sb;
                        super_unlock(sb, excl);
                        /* nope, got unmounted */
                        spin_lock(&sb_lock);
                        __put_super(sb);
-                       goto rescan;
+                       break;
                }
        }
        spin_unlock(&sb_lock);
                if (!hlist_empty(&sb->s_pins)) {
                        super_unlock_excl(sb);
                        group_pin_kill(&sb->s_pins);
-                       super_lock_excl(sb);
+                       __super_lock_excl(sb);
                        if (!sb->s_root)
                                return 0;
                        if (sb->s_writers.frozen != SB_UNFROZEN)
 
 static void do_emergency_remount_callback(struct super_block *sb)
 {
-       super_lock_excl(sb);
-       if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
-           !sb_rdonly(sb)) {
+       bool born = super_lock_excl(sb);
+
+       if (born && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
                struct fs_context *fc;
 
                fc = fs_context_for_reconfigure(sb->s_root,
 
 static void do_thaw_all_callback(struct super_block *sb)
 {
-       super_lock_excl(sb);
-       if (sb->s_root && sb->s_flags & SB_BORN) {
+       bool born = super_lock_excl(sb);
+
+       if (born && sb->s_root) {
                emergency_thaw_bdev(sb);
                thaw_super_locked(sb);
        } else {
  */
 static bool super_lock_shared_active(struct super_block *sb)
 {
-       super_lock_shared(sb);
-       if (!sb->s_root ||
-           (sb->s_flags & (SB_ACTIVE | SB_BORN)) != (SB_ACTIVE | SB_BORN)) {
+       bool born = super_lock_shared(sb);
+
+       if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
                super_unlock_shared(sb);
                return false;
        }
                 */
                super_unlock_excl(s);
                error = setup_bdev_super(s, fc->sb_flags, fc);
-               super_lock_excl(s);
+               __super_lock_excl(s);
                if (!error)
                        error = fill_super(s, fc);
                if (error) {
                 */
                super_unlock_excl(s);
                error = setup_bdev_super(s, flags, NULL);
-               super_lock_excl(s);
+               __super_lock_excl(s);
                if (!error)
                        error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
                if (error) {
        WARN_ON(!sb->s_bdi);
 
        /*
-        * Write barrier is for super_cache_count(). We place it before setting
-        * SB_BORN as the data dependency between the two functions is the
-        * superblock structure contents that we just set up, not the SB_BORN
-        * flag.
+        * super_wake() contains a memory barrier which also care of
+        * ordering for super_cache_count(). We place it before setting
+        * SB_BORN as the data dependency between the two functions is
+        * the superblock structure contents that we just set up, not
+        * the SB_BORN flag.
         */
-       smp_wmb();
-       sb->s_flags |= SB_BORN;
+       super_wake(sb, SB_BORN);
 
        error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
        if (unlikely(error)) {
        int ret;
 
        atomic_inc(&sb->s_active);
-       super_lock_excl(sb);
+       __super_lock_excl(sb);
        if (sb->s_writers.frozen != SB_UNFROZEN) {
                deactivate_locked_super(sb);
                return -EBUSY;
        /* Release s_umount to preserve sb_start_write -> s_umount ordering */
        super_unlock_excl(sb);
        sb_wait_write(sb, SB_FREEZE_WRITE);
-       super_lock_excl(sb);
+       __super_lock_excl(sb);
 
        /* Now we go and block page faults... */
        sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
  */
 int thaw_super(struct super_block *sb)
 {
-       super_lock_excl(sb);
+       __super_lock_excl(sb);
        return thaw_super_locked(sb);
 }
 EXPORT_SYMBOL(thaw_super);