From 5a8a3c1f73c6488d1a2c18ac1f5308b1fd2aa5f0 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 15 Nov 2022 10:50:28 +0100 Subject: [PATCH] mm, slub: retain no free slabs on partial list with CONFIG_SLUB_TINY SLUB will leave a number of slabs on the partial list even if they are empty, to avoid some slab freeing and reallocation. The goal of CONFIG_SLUB_TINY is to minimize memory overhead, so set the limits to 0 for immediate slab page freeing. Signed-off-by: Vlastimil Babka Acked-by: Roman Gushchin Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Mike Rapoport Reviewed-by: Christoph Lameter --- mm/slub.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mm/slub.c b/mm/slub.c index b81ceeb6e6de2..19b6cf74bdfc3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -241,6 +241,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* Enable to log cmpxchg failures */ #undef SLUB_DEBUG_CMPXCHG +#ifndef CONFIG_SLUB_TINY /* * Minimum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. @@ -253,6 +254,10 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) * sort the partial list by the number of objects in use. */ #define MAX_PARTIAL 10 +#else +#define MIN_PARTIAL 0 +#define MAX_PARTIAL 0 +#endif #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_STORE_USER) -- 2.30.2