net: skbuff: drop the word head from skb cache
authorJakub Kicinski <kuba@kernel.org>
Thu, 9 Feb 2023 06:06:42 +0000 (22:06 -0800)
committerDavid S. Miller <davem@davemloft.net>
Fri, 10 Feb 2023 09:10:28 +0000 (09:10 +0000)
skbuff_head_cache is misnamed (perhaps for historical reasons?)
because it does not hold heads. Head is the buffer which skb->data
points to, and also where shinfo lives. struct sk_buff is a metadata
structure, not the head.

Eric recently added skb_small_head_cache (which allocates actual
head buffers), let that serve as an excuse to finally clean this up :)

Leave the user-space visible name intact, it could possibly be uAPI.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/skbuff.h
kernel/bpf/cpumap.c
net/bpf/test_run.c
net/core/skbuff.c
net/core/xdp.c

index c3df3b55da976dba2f5ba72bfa692329479d6750..47ab28a37f2f1f9fb25e575fffe2db1cfd884f65 100644 (file)
@@ -1243,7 +1243,7 @@ static inline void consume_skb(struct sk_buff *skb)
 
 void __consume_stateless_skb(struct sk_buff *skb);
 void  __kfree_skb(struct sk_buff *skb);
-extern struct kmem_cache *skbuff_head_cache;
+extern struct kmem_cache *skbuff_cache;
 
 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
index e0b2d016f0bf996e44aab4c706ddbceb06cc95e5..d2110c1f6fa64da27a0409b5375de17ac8f5de34 100644 (file)
@@ -361,7 +361,7 @@ static int cpu_map_kthread_run(void *data)
                /* Support running another XDP prog on this CPU */
                nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
                if (nframes) {
-                       m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs);
+                       m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs);
                        if (unlikely(m == 0)) {
                                for (i = 0; i < nframes; i++)
                                        skbs[i] = NULL; /* effect: xdp_return_frame */
index 8da0d73b368e1d8efa3e9803347d03728c093f2d..2b954326894f3bd51ace00165f75a07e2c9e34b9 100644 (file)
@@ -234,7 +234,7 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
        int i, n;
        LIST_HEAD(list);
 
-       n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs);
+       n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
        if (unlikely(n == 0)) {
                for (i = 0; i < nframes; i++)
                        xdp_return_frame(frames[i]);
index 70a6088e832682efccf081fa3e6a97cbdeb747ac..13ea10cf8544176dac3dbcf60b1dbff62bf9b506 100644 (file)
@@ -84,7 +84,7 @@
 #include "dev.h"
 #include "sock_destructor.h"
 
-struct kmem_cache *skbuff_head_cache __ro_after_init;
+struct kmem_cache *skbuff_cache __ro_after_init;
 static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
 #ifdef CONFIG_SKB_EXTENSIONS
 static struct kmem_cache *skbuff_ext_cache __ro_after_init;
@@ -285,7 +285,7 @@ static struct sk_buff *napi_skb_cache_get(void)
        struct sk_buff *skb;
 
        if (unlikely(!nc->skb_count)) {
-               nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
+               nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
                                                      GFP_ATOMIC,
                                                      NAPI_SKB_CACHE_BULK,
                                                      nc->skb_cache);
@@ -294,7 +294,7 @@ static struct sk_buff *napi_skb_cache_get(void)
        }
 
        skb = nc->skb_cache[--nc->skb_count];
-       kasan_unpoison_object_data(skbuff_head_cache, skb);
+       kasan_unpoison_object_data(skbuff_cache, skb);
 
        return skb;
 }
@@ -352,7 +352,7 @@ struct sk_buff *slab_build_skb(void *data)
        struct sk_buff *skb;
        unsigned int size;
 
-       skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+       skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
        if (unlikely(!skb))
                return NULL;
 
@@ -403,7 +403,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
 {
        struct sk_buff *skb;
 
-       skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+       skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
        if (unlikely(!skb))
                return NULL;
 
@@ -585,7 +585,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        u8 *data;
 
        cache = (flags & SKB_ALLOC_FCLONE)
-               ? skbuff_fclone_cache : skbuff_head_cache;
+               ? skbuff_fclone_cache : skbuff_cache;
 
        if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
                gfp_mask |= __GFP_MEMALLOC;
@@ -921,7 +921,7 @@ static void kfree_skbmem(struct sk_buff *skb)
 
        switch (skb->fclone) {
        case SKB_FCLONE_UNAVAILABLE:
-               kmem_cache_free(skbuff_head_cache, skb);
+               kmem_cache_free(skbuff_cache, skb);
                return;
 
        case SKB_FCLONE_ORIG:
@@ -1035,7 +1035,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb,
        sa->skb_array[sa->skb_count++] = skb;
 
        if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
-               kmem_cache_free_bulk(skbuff_head_cache, KFREE_SKB_BULK_SIZE,
+               kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
                                     sa->skb_array);
                sa->skb_count = 0;
        }
@@ -1060,8 +1060,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
        }
 
        if (sa.skb_count)
-               kmem_cache_free_bulk(skbuff_head_cache, sa.skb_count,
-                                    sa.skb_array);
+               kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
 }
 EXPORT_SYMBOL(kfree_skb_list_reason);
 
@@ -1215,15 +1214,15 @@ static void napi_skb_cache_put(struct sk_buff *skb)
        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
        u32 i;
 
-       kasan_poison_object_data(skbuff_head_cache, skb);
+       kasan_poison_object_data(skbuff_cache, skb);
        nc->skb_cache[nc->skb_count++] = skb;
 
        if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
                for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
-                       kasan_unpoison_object_data(skbuff_head_cache,
+                       kasan_unpoison_object_data(skbuff_cache,
                                                   nc->skb_cache[i]);
 
-               kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF,
+               kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
                                     nc->skb_cache + NAPI_SKB_CACHE_HALF);
                nc->skb_count = NAPI_SKB_CACHE_HALF;
        }
@@ -1807,7 +1806,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
                if (skb_pfmemalloc(skb))
                        gfp_mask |= __GFP_MEMALLOC;
 
-               n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
+               n = kmem_cache_alloc(skbuff_cache, gfp_mask);
                if (!n)
                        return NULL;
 
@@ -4677,7 +4676,7 @@ static void skb_extensions_init(void) {}
 
 void __init skb_init(void)
 {
-       skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
+       skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
                                              sizeof(struct sk_buff),
                                              0,
                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
@@ -5556,7 +5555,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
 {
        if (head_stolen) {
                skb_release_head_state(skb);
-               kmem_cache_free(skbuff_head_cache, skb);
+               kmem_cache_free(skbuff_cache, skb);
        } else {
                __kfree_skb(skb);
        }
index a5a7ecf6391cc312082007d918986c18f02f9d65..03938fe6d33a92a1584654d646ac71bb6e929b1e 100644 (file)
@@ -603,8 +603,7 @@ EXPORT_SYMBOL_GPL(xdp_warn);
 
 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
 {
-       n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
-                                     n_skb, skbs);
+       n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
        if (unlikely(!n_skb))
                return -ENOMEM;
 
@@ -673,7 +672,7 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
 {
        struct sk_buff *skb;
 
-       skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+       skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
        if (unlikely(!skb))
                return NULL;