blk-cgroup: show global disk stats in root cgroup io.stat
authorBoris Burkov <boris@bur.io>
Mon, 1 Jun 2020 20:12:05 +0000 (13:12 -0700)
committerJens Axboe <axboe@kernel.dk>
Sat, 18 Jul 2020 02:18:00 +0000 (20:18 -0600)
In order to improve consistency and usability in cgroup stat accounting,
we would like to support the root cgroup's io.stat.

Since the root cgroup has processes doing io even if the system has no
explicitly created cgroups, we need to be careful to avoid overhead in
that case.  For that reason, the rstat algorithms don't handle the root
cgroup, so just turning the file on wouldn't give correct statistics.

To get around this, we simulate flushing the iostat struct by filling it
out directly from global disk stats. The result is a root cgroup io.stat
file consistent with both /proc/diskstats and io.stat.

Note that in order to collect the disk stats, we needed to iterate over
devices. To facilitate that, we had to change the linkage of a disk_type
to external so that it can be used from blk-cgroup.c to iterate over
disks.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Boris Burkov <boris@bur.io>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Documentation/admin-guide/cgroup-v2.rst
block/blk-cgroup.c
block/genhd.c
include/linux/genhd.h

index ce3e05e41724ae18fb0a3b8d3ae4be79538840a2..2d7c5907e0ced1a907fc7e605900927a8d2010e4 100644 (file)
@@ -1483,8 +1483,7 @@ IO Interface Files
 ~~~~~~~~~~~~~~~~~~
 
   io.stat
-       A read-only nested-keyed file which exists on non-root
-       cgroups.
+       A read-only nested-keyed file.
 
        Lines are keyed by $MAJ:$MIN device numbers and not ordered.
        The following nested keys are defined.
index 696d28151c9ad557db4c6f57ae1198c733dd71bb..619a79b51068cd8aa8fa2a5ba4142c9123de4c18 100644 (file)
@@ -782,12 +782,66 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
        rcu_read_unlock();
 }
 
+/*
+ * The rstat algorithms intentionally don't handle the root cgroup to avoid
+ * incurring overhead when no cgroups are defined. For that reason,
+ * cgroup_rstat_flush in blkcg_print_stat does not actually fill out the
+ * iostat in the root cgroup's blkcg_gq.
+ *
+ * However, we would like to re-use the printing code between the root and
+ * non-root cgroups to the extent possible. For that reason, we simulate
+ * flushing the root cgroup's stats by explicitly filling in the iostat
+ * with disk level statistics.
+ */
+static void blkcg_fill_root_iostats(void)
+{
+       struct class_dev_iter iter;
+       struct device *dev;
+
+       class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+       while ((dev = class_dev_iter_next(&iter))) {
+               struct gendisk *disk = dev_to_disk(dev);
+               struct hd_struct *part = disk_get_part(disk, 0);
+               struct blkcg_gq *blkg = blk_queue_root_blkg(disk->queue);
+               struct blkg_iostat tmp;
+               int cpu;
+
+               memset(&tmp, 0, sizeof(tmp));
+               for_each_possible_cpu(cpu) {
+                       struct disk_stats *cpu_dkstats;
+
+                       cpu_dkstats = per_cpu_ptr(part->dkstats, cpu);
+                       tmp.ios[BLKG_IOSTAT_READ] +=
+                               cpu_dkstats->ios[STAT_READ];
+                       tmp.ios[BLKG_IOSTAT_WRITE] +=
+                               cpu_dkstats->ios[STAT_WRITE];
+                       tmp.ios[BLKG_IOSTAT_DISCARD] +=
+                               cpu_dkstats->ios[STAT_DISCARD];
+                       // convert sectors to bytes
+                       tmp.bytes[BLKG_IOSTAT_READ] +=
+                               cpu_dkstats->sectors[STAT_READ] << 9;
+                       tmp.bytes[BLKG_IOSTAT_WRITE] +=
+                               cpu_dkstats->sectors[STAT_WRITE] << 9;
+                       tmp.bytes[BLKG_IOSTAT_DISCARD] +=
+                               cpu_dkstats->sectors[STAT_DISCARD] << 9;
+
+                       u64_stats_update_begin(&blkg->iostat.sync);
+                       blkg_iostat_set(&blkg->iostat.cur, &tmp);
+                       u64_stats_update_end(&blkg->iostat.sync);
+               }
+       }
+}
+
 static int blkcg_print_stat(struct seq_file *sf, void *v)
 {
        struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
        struct blkcg_gq *blkg;
 
-       cgroup_rstat_flush(blkcg->css.cgroup);
+       if (!seq_css(sf)->parent)
+               blkcg_fill_root_iostats();
+       else
+               cgroup_rstat_flush(blkcg->css.cgroup);
+
        rcu_read_lock();
 
        hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
@@ -876,7 +930,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
 static struct cftype blkcg_files[] = {
        {
                .name = "stat",
-               .flags = CFTYPE_NOT_ON_ROOT,
                .seq_show = blkcg_print_stat,
        },
        { }     /* terminate */
index c42a49f2f53704567022e50a92eec83920e4fcb1..8b1e9f48957cb5c6d296fabba14dbc9f05cad22a 100644 (file)
@@ -38,8 +38,6 @@ static struct kobject *block_depr;
 static DEFINE_SPINLOCK(ext_devt_lock);
 static DEFINE_IDR(ext_devt_idr);
 
-static const struct device_type disk_type;
-
 static void disk_check_events(struct disk_events *ev,
                              unsigned int *clearing_ptr);
 static void disk_alloc_events(struct gendisk *disk);
@@ -1587,7 +1585,7 @@ static char *block_devnode(struct device *dev, umode_t *mode,
        return NULL;
 }
 
-static const struct device_type disk_type = {
+const struct device_type disk_type = {
        .name           = "disk",
        .groups         = disk_attr_groups,
        .release        = disk_release,
index 31a54072ffd653d722ca98bc521ab7516821c293..4ab853461dff25ea28ef760b7efdaf2bd680c0a3 100644 (file)
@@ -24,6 +24,7 @@
 #define disk_to_dev(disk)      (&(disk)->part0.__dev)
 #define part_to_dev(part)      (&((part)->__dev))
 
+extern const struct device_type disk_type;
 extern struct device_type part_type;
 extern struct class block_class;