refill_obj_stock(objcg, size, true);
 }
 
+static inline size_t obj_full_size(struct kmem_cache *s)
+{
+       /*
+        * For each accounted object there is an extra space which is used
+        * to store obj_cgroup membership. Charge it too.
+        */
+       return s->size + sizeof(struct obj_cgroup *);
+}
+
+bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
+                                 gfp_t flags, size_t size, void **p)
+{
+       struct obj_cgroup *objcg;
+       struct slab *slab;
+       unsigned long off;
+       size_t i;
+
+       /*
+        * The obtained objcg pointer is safe to use within the current scope,
+        * defined by current task or set_active_memcg() pair.
+        * obj_cgroup_get() is used to get a permanent reference.
+        */
+       objcg = current_obj_cgroup();
+       if (!objcg)
+               return true;
+
+       /*
+        * slab_alloc_node() avoids the NULL check, so we might be called with a
+        * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
+        * the whole requested size.
+        * return success as there's nothing to free back
+        */
+       if (unlikely(*p == NULL))
+               return true;
+
+       flags &= gfp_allowed_mask;
+
+       if (lru) {
+               int ret;
+               struct mem_cgroup *memcg;
+
+               memcg = get_mem_cgroup_from_objcg(objcg);
+               ret = memcg_list_lru_alloc(memcg, lru, flags);
+               css_put(&memcg->css);
+
+               if (ret)
+                       return false;
+       }
+
+       if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
+               return false;
+
+       for (i = 0; i < size; i++) {
+               slab = virt_to_slab(p[i]);
+
+               if (!slab_obj_exts(slab) &&
+                   alloc_slab_obj_exts(slab, s, flags, false)) {
+                       obj_cgroup_uncharge(objcg, obj_full_size(s));
+                       continue;
+               }
+
+               off = obj_to_index(s, slab, p[i]);
+               obj_cgroup_get(objcg);
+               slab_obj_exts(slab)[off].objcg = objcg;
+               mod_objcg_state(objcg, slab_pgdat(slab),
+                               cache_vmstat_idx(s), obj_full_size(s));
+       }
+
+       return true;
+}
+
+void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
+                           void **p, int objects, struct slabobj_ext *obj_exts)
+{
+       for (int i = 0; i < objects; i++) {
+               struct obj_cgroup *objcg;
+               unsigned int off;
+
+               off = obj_to_index(s, slab, p[i]);
+               objcg = obj_exts[off].objcg;
+               if (!objcg)
+                       continue;
+
+               obj_exts[off].objcg = NULL;
+               obj_cgroup_uncharge(objcg, obj_full_size(s));
+               mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
+                               -obj_full_size(s));
+               obj_cgroup_put(objcg);
+       }
+}
 #endif /* CONFIG_MEMCG_KMEM */
 
 /*
 
        return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
 }
 
+int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
+                        gfp_t gfp, bool new_slab);
+
 #else /* CONFIG_SLAB_OBJ_EXT */
 
 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
 
 #endif /* CONFIG_SLAB_OBJ_EXT */
 
+static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
+{
+       return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+               NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
+}
+
 #ifdef CONFIG_MEMCG_KMEM
+bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
+                                 gfp_t flags, size_t size, void **p);
+void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
+                           void **p, int objects, struct slabobj_ext *obj_exts);
 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
                     enum node_stat_item idx, int nr);
 #endif
 
 #endif
 #endif /* CONFIG_SLUB_DEBUG */
 
-static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
-{
-       return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
-               NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
-}
-
 #ifdef CONFIG_SLAB_OBJ_EXT
 
 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
 #define OBJCGS_CLEAR_MASK      (__GFP_DMA | __GFP_RECLAIMABLE | \
                                __GFP_ACCOUNT | __GFP_NOFAIL)
 
-static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
-                              gfp_t gfp, bool new_slab)
+int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
+                       gfp_t gfp, bool new_slab)
 {
        unsigned int objects = objs_per_slab(s, slab);
        unsigned long new_exts;
 #endif /* CONFIG_SLAB_OBJ_EXT */
 
 #ifdef CONFIG_MEMCG_KMEM
-static inline size_t obj_full_size(struct kmem_cache *s)
-{
-       /*
-        * For each accounted object there is an extra space which is used
-        * to store obj_cgroup membership. Charge it too.
-        */
-       return s->size + sizeof(struct obj_cgroup *);
-}
-
-static bool __memcg_slab_post_alloc_hook(struct kmem_cache *s,
-                                        struct list_lru *lru,
-                                        gfp_t flags, size_t size,
-                                        void **p)
-{
-       struct obj_cgroup *objcg;
-       struct slab *slab;
-       unsigned long off;
-       size_t i;
-
-       /*
-        * The obtained objcg pointer is safe to use within the current scope,
-        * defined by current task or set_active_memcg() pair.
-        * obj_cgroup_get() is used to get a permanent reference.
-        */
-       objcg = current_obj_cgroup();
-       if (!objcg)
-               return true;
-
-       /*
-        * slab_alloc_node() avoids the NULL check, so we might be called with a
-        * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
-        * the whole requested size.
-        * return success as there's nothing to free back
-        */
-       if (unlikely(*p == NULL))
-               return true;
-
-       flags &= gfp_allowed_mask;
-
-       if (lru) {
-               int ret;
-               struct mem_cgroup *memcg;
-
-               memcg = get_mem_cgroup_from_objcg(objcg);
-               ret = memcg_list_lru_alloc(memcg, lru, flags);
-               css_put(&memcg->css);
-
-               if (ret)
-                       return false;
-       }
-
-       if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
-               return false;
-
-       for (i = 0; i < size; i++) {
-               slab = virt_to_slab(p[i]);
-
-               if (!slab_obj_exts(slab) &&
-                   alloc_slab_obj_exts(slab, s, flags, false)) {
-                       obj_cgroup_uncharge(objcg, obj_full_size(s));
-                       continue;
-               }
-
-               off = obj_to_index(s, slab, p[i]);
-               obj_cgroup_get(objcg);
-               slab_obj_exts(slab)[off].objcg = objcg;
-               mod_objcg_state(objcg, slab_pgdat(slab),
-                               cache_vmstat_idx(s), obj_full_size(s));
-       }
-
-       return true;
-}
 
 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
 
        return false;
 }
 
-static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
-                                  void **p, int objects,
-                                  struct slabobj_ext *obj_exts)
-{
-       for (int i = 0; i < objects; i++) {
-               struct obj_cgroup *objcg;
-               unsigned int off;
-
-               off = obj_to_index(s, slab, p[i]);
-               objcg = obj_exts[off].objcg;
-               if (!objcg)
-                       continue;
-
-               obj_exts[off].objcg = NULL;
-               obj_cgroup_uncharge(objcg, obj_full_size(s));
-               mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
-                               -obj_full_size(s));
-               obj_cgroup_put(objcg);
-       }
-}
-
 static __fastpath_inline
 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
                          int objects)