mm, slub: validate slab from partial list or page allocator before making it cpu...
authorVlastimil Babka <vbabka@suse.cz>
Tue, 11 May 2021 14:37:51 +0000 (16:37 +0200)
committerVlastimil Babka <vbabka@suse.cz>
Fri, 3 Sep 2021 23:12:21 +0000 (01:12 +0200)
When we obtain a new slab page from node partial list or page allocator, we
assign it to kmem_cache_cpu, perform some checks, and if they fail, we undo
the assignment.

In order to allow doing the checks without irq disabled, restructure the code
so that the checks are done first, and kmem_cache_cpu.page assignment only
after they pass.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index 7798ba1c614f1ef7a3199e8e39763a1a3ea471da..a5e974defcb7f8d5d812330c807aa76ce182d93d 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2802,10 +2802,8 @@ new_objects:
        lockdep_assert_irqs_disabled();
 
        freelist = get_partial(s, gfpflags, node, &page);
-       if (freelist) {
-               c->page = page;
+       if (freelist)
                goto check_new_page;
-       }
 
        local_irq_restore(flags);
        put_cpu_ptr(s->cpu_slab);
@@ -2818,9 +2816,6 @@ new_objects:
        }
 
        local_irq_save(flags);
-       if (c->page)
-               flush_slab(s, c);
-
        /*
         * No other reference to the page yet so we can
         * muck around with it freely without cmpxchg
@@ -2829,14 +2824,12 @@ new_objects:
        page->freelist = NULL;
 
        stat(s, ALLOC_SLAB);
-       c->page = page;
 
 check_new_page:
 
        if (kmem_cache_debug(s)) {
                if (!alloc_debug_processing(s, page, freelist, addr)) {
                        /* Slab failed checks. Next slab needed */
-                       c->page = NULL;
                        local_irq_restore(flags);
                        goto new_slab;
                } else {
@@ -2855,10 +2848,18 @@ check_new_page:
                 */
                goto return_single;
 
+       if (unlikely(c->page))
+               flush_slab(s, c);
+       c->page = page;
+
        goto load_freelist;
 
 return_single:
 
+       if (unlikely(c->page))
+               flush_slab(s, c);
+       c->page = page;
+
        deactivate_slab(s, page, get_freepointer(s, freelist), c);
        local_irq_restore(flags);
        return freelist;