net/mlx5e: Dynamically allocate DIM structure for SQs/RQs
authorRahul Rameshbabu <rrameshbabu@nvidia.com>
Fri, 19 Apr 2024 08:04:43 +0000 (11:04 +0300)
committerJakub Kicinski <kuba@kernel.org>
Mon, 22 Apr 2024 21:22:16 +0000 (14:22 -0700)
Make it possible for the DIM structure to be torn down while an SQ or RQ is
still active. Changing the CQ period mode is an example where the previous
sampling done with the DIM structure would need to be invalidated.

Co-developed-by: Nabil S. Alramli <dev@nalramli.com>
Signed-off-by: Nabil S. Alramli <dev@nalramli.com>
Co-developed-by: Joe Damato <jdamato@fastly.com>
Signed-off-by: Joe Damato <jdamato@fastly.com>
Signed-off-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20240419080445.417574-4-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c

index 1c98199b526795bc5d2a555db0561abb75712792..c8c0a0614e7b6a3bd1183ce94d3eabfa521dd9e7 100644 (file)
@@ -430,7 +430,7 @@ struct mlx5e_txqsq {
        u16                        cc;
        u16                        skb_fifo_cc;
        u32                        dma_fifo_cc;
-       struct dim                 dim; /* Adaptive Moderation */
+       struct dim                *dim; /* Adaptive Moderation */
 
        /* dirtied @xmit */
        u16                        pc ____cacheline_aligned_in_smp;
@@ -722,7 +722,7 @@ struct mlx5e_rq {
        int                    ix;
        unsigned int           hw_mtu;
 
-       struct dim         dim; /* Dynamic Interrupt Moderation */
+       struct dim            *dim; /* Dynamic Interrupt Moderation */
 
        /* XDP */
        struct bpf_prog __rcu *xdp_prog;
index df692e29ab8a5237ccf884e05086b35110c43d6b..106a1f70dd9a4c74f46965db8bc77f925c89ee8a 100644 (file)
@@ -44,7 +44,7 @@ mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder,
 void mlx5e_rx_dim_work(struct work_struct *work)
 {
        struct dim *dim = container_of(work, struct dim, work);
-       struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+       struct mlx5e_rq *rq = dim->priv;
        struct dim_cq_moder cur_moder =
                net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
 
@@ -54,7 +54,7 @@ void mlx5e_rx_dim_work(struct work_struct *work)
 void mlx5e_tx_dim_work(struct work_struct *work)
 {
        struct dim *dim = container_of(work, struct dim, work);
-       struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
+       struct mlx5e_txqsq *sq = dim->priv;
        struct dim_cq_moder cur_moder =
                net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
 
index 12d1f454834329cad18bbf7367bd22e591a781e5..8b4ecae0fd9fff597c64bbddad85f3cda972a821 100644 (file)
@@ -962,11 +962,20 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
                }
        }
 
-       INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
-       rq->dim.mode = params->rx_cq_moderation.cq_period_mode;
+       rq->dim = kvzalloc_node(sizeof(*rq->dim), GFP_KERNEL, node);
+       if (!rq->dim) {
+               err = -ENOMEM;
+               goto err_unreg_xdp_rxq_info;
+       }
+
+       rq->dim->priv = rq;
+       INIT_WORK(&rq->dim->work, mlx5e_rx_dim_work);
+       rq->dim->mode = params->rx_cq_moderation.cq_period_mode;
 
        return 0;
 
+err_unreg_xdp_rxq_info:
+       xdp_rxq_info_unreg(&rq->xdp_rxq);
 err_destroy_page_pool:
        page_pool_destroy(rq->page_pool);
 err_free_by_rq_type:
@@ -1014,6 +1023,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
                mlx5e_free_wqe_alloc_info(rq);
        }
 
+       kvfree(rq->dim);
        xdp_rxq_info_unreg(&rq->xdp_rxq);
        page_pool_destroy(rq->page_pool);
        mlx5_wq_destroy(&rq->wq_ctrl);
@@ -1341,7 +1351,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
 
 void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
-       cancel_work_sync(&rq->dim.work);
+       cancel_work_sync(&rq->dim->work);
        cancel_work_sync(&rq->recover_work);
        mlx5e_destroy_rq(rq);
        mlx5e_free_rx_descs(rq);
@@ -1616,12 +1626,20 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
+       sq->dim = kvzalloc_node(sizeof(*sq->dim), GFP_KERNEL, cpu_to_node(c->cpu));
+       if (!sq->dim) {
+               err = -ENOMEM;
+               goto err_free_txqsq_db;
+       }
 
-       INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
-       sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
+       sq->dim->priv = sq;
+       INIT_WORK(&sq->dim->work, mlx5e_tx_dim_work);
+       sq->dim->mode = params->tx_cq_moderation.cq_period_mode;
 
        return 0;
 
+err_free_txqsq_db:
+       mlx5e_free_txqsq_db(sq);
 err_sq_wq_destroy:
        mlx5_wq_destroy(&sq->wq_ctrl);
 
@@ -1630,6 +1648,7 @@ err_sq_wq_destroy:
 
 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
 {
+       kvfree(sq->dim);
        mlx5e_free_txqsq_db(sq);
        mlx5_wq_destroy(&sq->wq_ctrl);
 }
@@ -1841,7 +1860,7 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
        struct mlx5_core_dev *mdev = sq->mdev;
        struct mlx5_rate_limit rl = {0};
 
-       cancel_work_sync(&sq->dim.work);
+       cancel_work_sync(&sq->dim->work);
        cancel_work_sync(&sq->recover_work);
        mlx5e_destroy_sq(mdev, sq->sqn);
        if (sq->rate_limit) {
index a7d9b7cb4297fcf486a4dba310d25b856ded1b8f..5873fde65c2e33645ca0621f4220b97b84f7f9db 100644 (file)
@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
                return;
 
        dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
-       net_dim(&sq->dim, dim_sample);
+       net_dim(sq->dim, dim_sample);
 }
 
 static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
                return;
 
        dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
-       net_dim(&rq->dim, dim_sample);
+       net_dim(rq->dim, dim_sample);
 }
 
 void mlx5e_trigger_irq(struct mlx5e_icosq *sq)