srcu: Dynamically allocate srcu_node array
authorPaul E. McKenney <paulmck@kernel.org>
Sat, 22 Jan 2022 00:13:52 +0000 (16:13 -0800)
committerPaul E. McKenney <paulmck@kernel.org>
Mon, 11 Apr 2022 22:31:02 +0000 (15:31 -0700)
This commit shrinks the srcu_struct structure by converting its ->node
field from a fixed-size compile-time array to a pointer to a dynamically
allocated array.  In kernels built with large values of NR_CPUS that boot
on systems with smaller numbers of CPUs, this can save significant memory.

[ paulmck: Apply kernel test robot feedback. ]

Reported-by: A cast of thousands
Co-developed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Signed-off-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
include/linux/srcutree.h
kernel/rcu/srcutree.c

index 8d1da136a93a354df2ab64e18ef5785e809e7596..8501b6b4594117566e0d1ea1e37a7af2a221fd67 100644 (file)
@@ -60,7 +60,7 @@ struct srcu_node {
  * Per-SRCU-domain structure, similar in function to rcu_state.
  */
 struct srcu_struct {
-       struct srcu_node node[NUM_RCU_NODES];   /* Combining tree. */
+       struct srcu_node *node;                 /* Combining tree. */
        struct srcu_node *level[RCU_NUM_LVLS + 1];
                                                /* First node at each level. */
        int srcu_size_state;                    /* Small-to-big transition state. */
index e23696edd43b0cb022f3866e8b6d10c6f2bc4281..e98cc218e42be4fe81ceec091c718763cf08e593 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/smp.h>
 #include <linux/delay.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/srcu.h>
 
 #include "rcu.h"
@@ -75,12 +76,42 @@ do {                                                                        \
        spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
 
 /*
- * Initialize SRCU combining tree.  Note that statically allocated
+ * Initialize SRCU per-CPU data.  Note that statically allocated
  * srcu_struct structures might already have srcu_read_lock() and
  * srcu_read_unlock() running against them.  So if the is_static parameter
  * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
  */
-static void init_srcu_struct_nodes(struct srcu_struct *ssp)
+static void init_srcu_struct_data(struct srcu_struct *ssp)
+{
+       int cpu;
+       struct srcu_data *sdp;
+
+       /*
+        * Initialize the per-CPU srcu_data array, which feeds into the
+        * leaves of the srcu_node tree.
+        */
+       WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
+                    ARRAY_SIZE(sdp->srcu_unlock_count));
+       for_each_possible_cpu(cpu) {
+               sdp = per_cpu_ptr(ssp->sda, cpu);
+               spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
+               rcu_segcblist_init(&sdp->srcu_cblist);
+               sdp->srcu_cblist_invoking = false;
+               sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
+               sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
+               sdp->mynode = NULL;
+               sdp->cpu = cpu;
+               INIT_WORK(&sdp->work, srcu_invoke_callbacks);
+               timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
+               sdp->ssp = ssp;
+       }
+}
+
+/*
+ * Allocated and initialize SRCU combining tree.  Returns @true if
+ * allocation succeeded and @false otherwise.
+ */
+static bool init_srcu_struct_nodes(struct srcu_struct *ssp)
 {
        int cpu;
        int i;
@@ -92,6 +123,9 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp)
 
        /* Initialize geometry if it has not already been initialized. */
        rcu_init_geometry();
+       ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), GFP_ATOMIC);
+       if (!ssp->node)
+               return false;
 
        /* Work out the overall tree geometry. */
        ssp->level[0] = &ssp->node[0];
@@ -129,30 +163,20 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp)
         * Initialize the per-CPU srcu_data array, which feeds into the
         * leaves of the srcu_node tree.
         */
-       WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
-                    ARRAY_SIZE(sdp->srcu_unlock_count));
        level = rcu_num_lvls - 1;
        snp_first = ssp->level[level];
        for_each_possible_cpu(cpu) {
                sdp = per_cpu_ptr(ssp->sda, cpu);
-               spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
-               rcu_segcblist_init(&sdp->srcu_cblist);
-               sdp->srcu_cblist_invoking = false;
-               sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
-               sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
                sdp->mynode = &snp_first[cpu / levelspread[level]];
                for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
                        if (snp->grplo < 0)
                                snp->grplo = cpu;
                        snp->grphi = cpu;
                }
-               sdp->cpu = cpu;
-               INIT_WORK(&sdp->work, srcu_invoke_callbacks);
-               timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
-               sdp->ssp = ssp;
                sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
        }
        smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
+       return true;
 }
 
 /*
@@ -163,6 +187,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp)
 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
 {
        ssp->srcu_size_state = SRCU_SIZE_SMALL;
+       ssp->node = NULL;
        mutex_init(&ssp->srcu_cb_mutex);
        mutex_init(&ssp->srcu_gp_mutex);
        ssp->srcu_idx = 0;
@@ -175,8 +200,16 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
                ssp->sda = alloc_percpu(struct srcu_data);
        if (!ssp->sda)
                return -ENOMEM;
-       init_srcu_struct_nodes(ssp);
-       ssp->srcu_size_state = SRCU_SIZE_BIG;
+       init_srcu_struct_data(ssp);
+       if (!init_srcu_struct_nodes(ssp)) {
+               if (!is_static) {
+                       free_percpu(ssp->sda);
+                       ssp->sda = NULL;
+                       return -ENOMEM;
+               }
+       } else {
+               ssp->srcu_size_state = SRCU_SIZE_BIG;
+       }
        ssp->srcu_gp_seq_needed_exp = 0;
        ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
        smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
@@ -393,6 +426,8 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
        }
        free_percpu(ssp->sda);
        ssp->sda = NULL;
+       kfree(ssp->node);
+       ssp->node = NULL;
        ssp->srcu_size_state = SRCU_SIZE_SMALL;
 }
 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);