net/mlx5e: Optimize the page cache reducing its size 2x
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Thu, 29 Sep 2022 07:21:49 +0000 (00:21 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 30 Sep 2022 14:55:46 +0000 (07:55 -0700)
RX page cache stores dma_info structs, that consist of a pointer to
struct page and a DMA address. In fact, the DMA address is extracted
from struct page using page_pool_get_dma_addr when a page is pushed to
the cache. By moving this call to the point when a page is popped from
the cache, we can avoid storing the DMA address in the cache,
effectively reducing its size by two times without losing any
functionality.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 449c016262f4f81c120fb4a03f9a4dfdee8536bc..6b91fa7f2221f8cbd0519925cd235f39da68621c 100644 (file)
@@ -630,7 +630,7 @@ struct mlx5e_mpw_info {
 struct mlx5e_page_cache {
        u32 head;
        u32 tail;
-       struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
+       struct page *page_cache[MLX5E_CACHE_SIZE];
 };
 
 struct mlx5e_rq;
index fbbc2e792c279f28615c12f026114c3649b3a4a5..b1d8fd08887bb10b5de74f6db6c14b151ed30030 100644 (file)
@@ -830,13 +830,11 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
 
        for (i = rq->page_cache.head; i != rq->page_cache.tail;
             i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
-               struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
-
                /* With AF_XDP, page_cache is not used, so this loop is not
                 * entered, and it's safe to call mlx5e_page_release_dynamic
                 * directly.
                 */
-               mlx5e_page_release_dynamic(rq, dma_info->page, false);
+               mlx5e_page_release_dynamic(rq, rq->page_cache.page_cache[i], false);
        }
 
        xdp_rxq_info_unreg(&rq->xdp_rxq);
index de929fde8cc62f6b5785b6eafcef0c51ce5db7e1..b8aa6f843675435ab481ef0a2fb782987772ab1d 100644 (file)
@@ -245,8 +245,7 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
                return false;
        }
 
-       cache->page_cache[cache->tail].page = page;
-       cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
+       cache->page_cache[cache->tail] = page;
        cache->tail = tail_next;
        return true;
 }
@@ -262,12 +261,13 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
                return false;
        }
 
-       if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+       if (page_ref_count(cache->page_cache[cache->head]) != 1) {
                stats->cache_busy++;
                return false;
        }
 
-       *dma_info = cache->page_cache[cache->head];
+       dma_info->page = cache->page_cache[cache->head];
+       dma_info->addr = page_pool_get_dma_addr(dma_info->page);
        cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
        stats->cache_reuse++;