static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
u32 *xsk_frames,
- bool recycle,
struct xdp_frame_bulk *bq)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
case MLX5E_XDP_XMIT_MODE_PAGE:
/* XDP_TX from the regular RQ */
page_pool_put_defragged_page(xdpi.page.rq->page_pool,
- xdpi.page.page, -1, recycle);
+ xdpi.page.page, -1, true);
break;
case MLX5E_XDP_XMIT_MODE_XSK:
/* AF_XDP send */
sqcc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
sq->cc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
}
xdp_flush_frame_bulk(&bq);
}
static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
- struct mlx5e_frag_page *frag_page,
- bool recycle)
+ struct mlx5e_frag_page *frag_page)
{
u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
struct page *page = frag_page->page;
if (page_pool_defrag_page(page, drain_count) == 0)
- page_pool_put_defragged_page(rq->page_pool, page, -1, recycle);
+ page_pool_put_defragged_page(rq->page_pool, page, -1, true);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
}
static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *frag,
- bool recycle)
+ struct mlx5e_wqe_frag_info *frag)
{
if (mlx5e_frag_can_release(frag))
- mlx5e_page_release_fragmented(rq, frag->frag_page, recycle);
+ mlx5e_page_release_fragmented(rq, frag->frag_page);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
free_frags:
while (--i >= 0)
- mlx5e_put_rx_frag(rq, --frag, true);
+ mlx5e_put_rx_frag(rq, --frag);
return err;
}
static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *wi,
- bool recycle)
+ struct mlx5e_wqe_frag_info *wi)
{
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
- mlx5e_put_rx_frag(rq, wi, recycle);
+ mlx5e_put_rx_frag(rq, wi);
}
static void mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info *wi)
if (rq->xsk_pool)
mlx5e_xsk_free_rx_wqe(wi);
else
- mlx5e_free_rx_wqe(rq, wi, false);
+ mlx5e_free_rx_wqe(rq, wi);
}
static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
struct mlx5e_wqe_frag_info *wi;
wi = get_frag(rq, j);
- mlx5e_free_rx_wqe(rq, wi, true);
+ mlx5e_free_rx_wqe(rq, wi);
}
}
}
static void
-mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
+mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
{
bool no_xdp_xmit;
int i;
if (rq->xsk_pool) {
struct xdp_buff **xsk_buffs = wi->alloc_units.xsk_buffs;
- /* The `recycle` parameter is ignored, and the page is always
- * put into the Reuse Ring, because there is no way to return
- * the page to the userspace when the interface goes down.
+ /* The page is always put into the Reuse Ring, because there
+ * is no way to return the page to userspace when the interface
+ * goes down.
*/
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap))
struct mlx5e_frag_page *frag_page;
frag_page = &wi->alloc_units.frag_pages[i];
- mlx5e_page_release_fragmented(rq, frag_page, recycle);
+ mlx5e_page_release_fragmented(rq, frag_page);
}
}
}
dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page, true);
+ mlx5e_page_release_fragmented(rq, dma_info->frag_page);
}
}
rq->stats->buff_alloc_err++;
err_unmap:
while (--i >= 0) {
frag_page--;
- mlx5e_page_release_fragmented(rq, frag_page, true);
+ mlx5e_page_release_fragmented(rq, frag_page);
}
err:
hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
if (hd_info->frag_page && hd_info->frag_page != deleted_page) {
deleted_page = hd_info->frag_page;
- mlx5e_page_release_fragmented(rq, hd_info->frag_page, false);
+ mlx5e_page_release_fragmented(rq, hd_info->frag_page);
}
hd_info->frag_page = NULL;
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
- /* Don't recycle, this function is called on rq/netdev close */
- mlx5e_free_rx_mpwqe(rq, wi, false);
+ /* This function is called on rq/netdev close. */
+ mlx5e_free_rx_mpwqe(rq, wi);
}
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
/* Deferred free for better page pool cache usage. */
- mlx5e_free_rx_mpwqe(rq, wi, true);
+ mlx5e_free_rx_mpwqe(rq, wi);
alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
mlx5e_alloc_rx_mpwqe(rq, head);
int i;
for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++)
- mlx5e_put_rx_frag(rq, &head_wi[i], true);
+ mlx5e_put_rx_frag(rq, &head_wi[i]);
}
return NULL; /* page/packet was consumed by XDP */
}
struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page, true);
+ mlx5e_page_release_fragmented(rq, dma_info->frag_page);
}
bitmap_clear(shampo->bitmap, header_index, 1);
}
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
- s->rx_cache_reuse += rq_stats->cache_reuse;
- s->rx_cache_full += rq_stats->cache_full;
- s->rx_cache_empty += rq_stats->cache_empty;
- s->rx_cache_busy += rq_stats->cache_busy;
- s->rx_cache_waive += rq_stats->cache_waive;
s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err;
s->rx_recover += rq_stats->recover;
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
- { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
- { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
- { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
- { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
- { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },