net: page_pool: split the page_pool_params into fast and slow
authorJakub Kicinski <kuba@kernel.org>
Tue, 21 Nov 2023 00:00:34 +0000 (16:00 -0800)
committerJakub Kicinski <kuba@kernel.org>
Wed, 22 Nov 2023 01:22:29 +0000 (17:22 -0800)
struct page_pool is rather performance critical and we use
16B of the first cache line to store 2 pointers used only
by test code. Future patches will add more informational
(non-fast path) attributes.

It's convenient for the user of the API to not have to worry
which fields are fast and which are slow path. Use struct
groups to split the params into the two categories internally.

Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Reviewed-by: Mina Almasry <almasrymina@google.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Link: https://lore.kernel.org/r/20231121000048.789613-2-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/page_pool/types.h
net/core/page_pool.c

index 6fc5134095ed19503bcecd3e223116cf49350566..23950fcc4eca3320404365a8775075662fbc0aa2 100644 (file)
@@ -54,18 +54,22 @@ struct pp_alloc_cache {
  * @offset:    DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
  */
 struct page_pool_params {
-       unsigned int    flags;
-       unsigned int    order;
-       unsigned int    pool_size;
-       int             nid;
-       struct device   *dev;
-       struct napi_struct *napi;
-       enum dma_data_direction dma_dir;
-       unsigned int    max_len;
-       unsigned int    offset;
+       struct_group_tagged(page_pool_params_fast, fast,
+               unsigned int    flags;
+               unsigned int    order;
+               unsigned int    pool_size;
+               int             nid;
+               struct device   *dev;
+               struct napi_struct *napi;
+               enum dma_data_direction dma_dir;
+               unsigned int    max_len;
+               unsigned int    offset;
+       );
+       struct_group_tagged(page_pool_params_slow, slow,
 /* private: used by test code only */
-       void (*init_callback)(struct page *page, void *arg);
-       void *init_arg;
+               void (*init_callback)(struct page *page, void *arg);
+               void *init_arg;
+       );
 };
 
 #ifdef CONFIG_PAGE_POOL_STATS
@@ -119,7 +123,7 @@ struct page_pool_stats {
 #endif
 
 struct page_pool {
-       struct page_pool_params p;
+       struct page_pool_params_fast p;
 
        long frag_users;
        struct page *frag_page;
@@ -178,6 +182,9 @@ struct page_pool {
        refcount_t user_cnt;
 
        u64 destroy_cnt;
+
+       /* Slow/Control-path information follows */
+       struct page_pool_params_slow slow;
 };
 
 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
index dec5443372360bb462a937724d5747ab676566f9..ab22a2fdae5780451e6b38b9cd087d493e885575 100644 (file)
@@ -173,7 +173,8 @@ static int page_pool_init(struct page_pool *pool,
 {
        unsigned int ring_qsize = 1024; /* Default */
 
-       memcpy(&pool->p, params, sizeof(pool->p));
+       memcpy(&pool->p, &params->fast, sizeof(pool->p));
+       memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
 
        /* Validate only known flags were used */
        if (pool->p.flags & ~(PP_FLAG_ALL))
@@ -388,8 +389,8 @@ static void page_pool_set_pp_info(struct page_pool *pool,
         * the overhead is negligible.
         */
        page_pool_fragment_page(page, 1);
-       if (pool->p.init_callback)
-               pool->p.init_callback(page, pool->p.init_arg);
+       if (pool->slow.init_callback)
+               pool->slow.init_callback(page, pool->slow.init_arg);
 }
 
 static void page_pool_clear_pp_info(struct page *page)