__memcg_slab_free_hook(s, slab, p, objects, objcgs);
}
+
+static inline
+void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
+ struct obj_cgroup *objcg)
+{
+ if (objcg)
+ obj_cgroup_uncharge(objcg, objects * obj_full_size(s));
+}
#else /* CONFIG_MEMCG_KMEM */
static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
{
void **p, int objects)
{
}
+
+static inline
+void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
+ struct obj_cgroup *objcg)
+{
+}
#endif /* CONFIG_MEMCG_KMEM */
/*
return same;
}
+/*
+ * Internal bulk free of objects that were not initialised by the post alloc
+ * hooks and thus should not be processed by the free hooks
+ */
+static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+{
+ if (!size)
+ return;
+
+ do {
+ struct detached_freelist df;
+
+ size = build_detached_freelist(s, size, p, &df);
+ if (!df.slab)
+ continue;
+
+ do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
+ _RET_IP_);
+ } while (likely(size));
+}
+
/* Note that interrupts must be enabled when calling this function. */
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
EXPORT_SYMBOL(kmem_cache_free_bulk);
#ifndef CONFIG_SLUB_TINY
-static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p, struct obj_cgroup *objcg)
+static inline
+int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ void **p)
{
struct kmem_cache_cpu *c;
unsigned long irqflags;
error:
slub_put_cpu_ptr(s->cpu_slab);
- slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
- kmem_cache_free_bulk(s, i, p);
+ __kmem_cache_free_bulk(s, i, p);
return 0;
}
#else /* CONFIG_SLUB_TINY */
static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p, struct obj_cgroup *objcg)
+ size_t size, void **p)
{
int i;
return i;
error:
- slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
- kmem_cache_free_bulk(s, i, p);
+ __kmem_cache_free_bulk(s, i, p);
return 0;
}
#endif /* CONFIG_SLUB_TINY */
if (unlikely(!s))
return 0;
- i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
+ i = __kmem_cache_alloc_bulk(s, flags, size, p);
/*
* memcg and kmem_cache debug support and memory initialization.
* Done outside of the IRQ disabled fastpath loop.
*/
- if (i != 0)
+ if (likely(i != 0)) {
slab_post_alloc_hook(s, objcg, flags, size, p,
slab_want_init_on_alloc(flags, s), s->object_size);
+ } else {
+ memcg_slab_alloc_error_hook(s, size, objcg);
+ }
+
return i;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);