net: page_pool: fix recycle stats for system page_pool allocator
authorLorenzo Bianconi <lorenzo@kernel.org>
Fri, 16 Feb 2024 09:25:43 +0000 (10:25 +0100)
committerJakub Kicinski <kuba@kernel.org>
Mon, 19 Feb 2024 20:30:27 +0000 (12:30 -0800)
Use global percpu page_pool_recycle_stats counter for system page_pool
allocator instead of allocating a separate percpu variable for each
(also percpu) page pool instance.

Reviewed-by: Toke Hoiland-Jorgensen <toke@redhat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://lore.kernel.org/r/87f572425e98faea3da45f76c3c68815c01a20ee.1708075412.git.lorenzo@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/page_pool/types.h
net/core/dev.c
net/core/page_pool.c

index 3590fbe6e3f15477b63bc2362ec360bdc93dd8db..5e43a08d3231c177ef9a35763db57efa85ffda02 100644 (file)
@@ -18,8 +18,9 @@
                                        * Please note DMA-sync-for-CPU is still
                                        * device driver responsibility
                                        */
-#define PP_FLAG_ALL            (PP_FLAG_DMA_MAP |\
-                                PP_FLAG_DMA_SYNC_DEV)
+#define PP_FLAG_SYSTEM_POOL    BIT(2) /* Global system page_pool */
+#define PP_FLAG_ALL            (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
+                                PP_FLAG_SYSTEM_POOL)
 
 /*
  * Fast allocation side cache array/stack
index cc9c2eda65aca62bbb1c08b936520936e51f596e..c588808be77f563c429eb4a2eaee5c8062d99582 100644 (file)
@@ -11738,6 +11738,7 @@ static int net_page_pool_create(int cpuid)
 #if IS_ENABLED(CONFIG_PAGE_POOL)
        struct page_pool_params page_pool_params = {
                .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
+               .flags = PP_FLAG_SYSTEM_POOL,
                .nid = NUMA_NO_NODE,
        };
        struct page_pool *pp_ptr;
index e8b9399d8e32494c9d093b80c421646e666af832..d706fe5548dfe025ee2cff52c3d08870b4cd90f2 100644 (file)
@@ -31,6 +31,8 @@
 #define BIAS_MAX       (LONG_MAX >> 1)
 
 #ifdef CONFIG_PAGE_POOL_STATS
+static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
+
 /* alloc_stat_inc is intended to be used in softirq context */
 #define alloc_stat_inc(pool, __stat)   (pool->alloc_stats.__stat++)
 /* recycle_stat_inc is safe to use when preemption is possible. */
@@ -220,14 +222,23 @@ static int page_pool_init(struct page_pool *pool,
        pool->has_init_callback = !!pool->slow.init_callback;
 
 #ifdef CONFIG_PAGE_POOL_STATS
-       pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
-       if (!pool->recycle_stats)
-               return -ENOMEM;
+       if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL)) {
+               pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
+               if (!pool->recycle_stats)
+                       return -ENOMEM;
+       } else {
+               /* For system page pool instance we use a singular stats object
+                * instead of allocating a separate percpu variable for each
+                * (also percpu) page pool instance.
+                */
+               pool->recycle_stats = &pp_system_recycle_stats;
+       }
 #endif
 
        if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
 #ifdef CONFIG_PAGE_POOL_STATS
-               free_percpu(pool->recycle_stats);
+               if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
+                       free_percpu(pool->recycle_stats);
 #endif
                return -ENOMEM;
        }
@@ -251,7 +262,8 @@ static void page_pool_uninit(struct page_pool *pool)
                put_device(pool->p.dev);
 
 #ifdef CONFIG_PAGE_POOL_STATS
-       free_percpu(pool->recycle_stats);
+       if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
+               free_percpu(pool->recycle_stats);
 #endif
 }