From c62256dda37133a48d56cecc15e4a4d527d4cc46 Mon Sep 17 00:00:00 2001
From: Jens Axboe <axboe@kernel.dk>
Date: Wed, 30 Nov 2022 08:25:46 -0700
Subject: [PATCH] Revert "blk-cgroup: Flush stats at blkgs destruction path"

This reverts commit dae590a6c96c799434e0ff8156ef29b88c257e60.

We've had a few reports on this causing a crash at boot time, because
of a reference issue. While this problem seemginly did exist before
the patch and needs solving separately, this patch makes it a lot
easier to trigger.

Link: https://lore.kernel.org/linux-block/CA+QYu4oxiRKC6hJ7F27whXy-PRBx=Tvb+-7TQTONN8qTtV3aDA@mail.gmail.com/
Link: https://lore.kernel.org/linux-block/69af7ccb-6901-c84c-0e95-5682ccfb750c@acm.org/
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-cgroup.c     | 15 +--------------
 include/linux/cgroup.h |  1 -
 kernel/cgroup/rstat.c  | 20 --------------------
 3 files changed, 1 insertion(+), 35 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 57941d2a8ba33..3e03c0d132537 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1084,12 +1084,10 @@ struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css)
  */
 static void blkcg_destroy_blkgs(struct blkcg *blkcg)
 {
-	int cpu;
-
 	might_sleep();
 
-	css_get(&blkcg->css);
 	spin_lock_irq(&blkcg->lock);
+
 	while (!hlist_empty(&blkcg->blkg_list)) {
 		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
 						struct blkcg_gq, blkcg_node);
@@ -1112,17 +1110,6 @@ static void blkcg_destroy_blkgs(struct blkcg *blkcg)
 	}
 
 	spin_unlock_irq(&blkcg->lock);
-
-	/*
-	 * Flush all the non-empty percpu lockless lists.
-	 */
-	for_each_possible_cpu(cpu) {
-		struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
-
-		if (!llist_empty(lhead))
-			cgroup_rstat_css_cpu_flush(&blkcg->css, cpu);
-	}
-	css_put(&blkcg->css);
 }
 
 /**
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 6c4e66b3fa848..528bd44b59e28 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -766,7 +766,6 @@ void cgroup_rstat_flush(struct cgroup *cgrp);
 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
 void cgroup_rstat_flush_release(void);
-void cgroup_rstat_css_cpu_flush(struct cgroup_subsys_state *css, int cpu);
 
 /*
  * Basic resource stats.
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 910e633869b0f..793ecff290385 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -281,26 +281,6 @@ void cgroup_rstat_flush_release(void)
 	spin_unlock_irq(&cgroup_rstat_lock);
 }
 
-/**
- * cgroup_rstat_css_cpu_flush - flush stats for the given css and cpu
- * @css: target css to be flush
- * @cpu: the cpu that holds the stats to be flush
- *
- * A lightweight rstat flush operation for a given css and cpu.
- * Only the cpu_lock is being held for mutual exclusion, the cgroup_rstat_lock
- * isn't used.
- */
-void cgroup_rstat_css_cpu_flush(struct cgroup_subsys_state *css, int cpu)
-{
-	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
-
-	raw_spin_lock_irq(cpu_lock);
-	rcu_read_lock();
-	css->ss->css_rstat_flush(css, cpu);
-	rcu_read_unlock();
-	raw_spin_unlock_irq(cpu_lock);
-}
-
 int cgroup_rstat_init(struct cgroup *cgrp)
 {
 	int cpu;
-- 
2.30.2