void mlx5e_rx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct mlx5e_rq *rq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
void mlx5e_tx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
+ struct mlx5e_txqsq *sq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
}
}
- INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
- rq->dim.mode = params->rx_cq_moderation.cq_period_mode;
+ rq->dim = kvzalloc_node(sizeof(*rq->dim), GFP_KERNEL, node);
+ if (!rq->dim) {
+ err = -ENOMEM;
+ goto err_unreg_xdp_rxq_info;
+ }
+
+ rq->dim->priv = rq;
+ INIT_WORK(&rq->dim->work, mlx5e_rx_dim_work);
+ rq->dim->mode = params->rx_cq_moderation.cq_period_mode;
return 0;
+err_unreg_xdp_rxq_info:
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
err_destroy_page_pool:
page_pool_destroy(rq->page_pool);
err_free_by_rq_type:
mlx5e_free_wqe_alloc_info(rq);
}
+ kvfree(rq->dim);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->dim.work);
+ cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
+ sq->dim = kvzalloc_node(sizeof(*sq->dim), GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!sq->dim) {
+ err = -ENOMEM;
+ goto err_free_txqsq_db;
+ }
- INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
- sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
+ sq->dim->priv = sq;
+ INIT_WORK(&sq->dim->work, mlx5e_tx_dim_work);
+ sq->dim->mode = params->tx_cq_moderation.cq_period_mode;
return 0;
+err_free_txqsq_db:
+ mlx5e_free_txqsq_db(sq);
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{
+ kvfree(sq->dim);
mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
}
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
- cancel_work_sync(&sq->dim.work);
+ cancel_work_sync(&sq->dim->work);
cancel_work_sync(&sq->recover_work);
mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) {
return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&sq->dim, dim_sample);
+ net_dim(sq->dim, dim_sample);
}
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&rq->dim, dim_sample);
+ net_dim(rq->dim, dim_sample);
}
void mlx5e_trigger_irq(struct mlx5e_icosq *sq)