}
#ifdef CONFIG_SLUB_CPU_PARTIAL
-static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
+static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
{
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct slab *slab, *slab_to_discard = NULL;
}
/*
- * Unfreeze all the cpu partial slabs.
+ * Put all the cpu partial slabs to the node partial list.
*/
-static void unfreeze_partials(struct kmem_cache *s)
+static void put_partials(struct kmem_cache *s)
{
struct slab *partial_slab;
unsigned long flags;
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
if (partial_slab)
- __unfreeze_partials(s, partial_slab);
+ __put_partials(s, partial_slab);
}
-static void unfreeze_partials_cpu(struct kmem_cache *s,
- struct kmem_cache_cpu *c)
+static void put_partials_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c)
{
struct slab *partial_slab;
c->partial = NULL;
if (partial_slab)
- __unfreeze_partials(s, partial_slab);
+ __put_partials(s, partial_slab);
}
/*
static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
{
struct slab *oldslab;
- struct slab *slab_to_unfreeze = NULL;
+ struct slab *slab_to_put = NULL;
unsigned long flags;
int slabs = 0;
* per node partial list. Postpone the actual unfreezing
* outside of the critical section.
*/
- slab_to_unfreeze = oldslab;
+ slab_to_put = oldslab;
oldslab = NULL;
} else {
slabs = oldslab->slabs;
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- if (slab_to_unfreeze) {
- __unfreeze_partials(s, slab_to_unfreeze);
+ if (slab_to_put) {
+ __put_partials(s, slab_to_put);
stat(s, CPU_PARTIAL_DRAIN);
}
}
#else /* CONFIG_SLUB_CPU_PARTIAL */
-static inline void unfreeze_partials(struct kmem_cache *s) { }
-static inline void unfreeze_partials_cpu(struct kmem_cache *s,
- struct kmem_cache_cpu *c) { }
+static inline void put_partials(struct kmem_cache *s) { }
+static inline void put_partials_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c) { }
#endif /* CONFIG_SLUB_CPU_PARTIAL */
stat(s, CPUSLAB_FLUSH);
}
- unfreeze_partials_cpu(s, c);
+ put_partials_cpu(s, c);
}
struct slub_flush_work {
if (c->slab)
flush_slab(s, c);
- unfreeze_partials(s);
+ put_partials(s);
}
static bool has_cpu_slab(int cpu, struct kmem_cache *s)
if (unlikely(!node_match(slab, node) ||
!pfmemalloc_match(slab, gfpflags))) {
slab->next = NULL;
- __unfreeze_partials(s, slab);
+ __put_partials(s, slab);
continue;
}