MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
- MLX5E_SQ_STATE_REDIRECT,
};
struct mlx5e_sq_wqe_info {
return true;
}
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
{
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
bool is_redirect;
u16 sqcc;
int i;
if (!cqe)
return false;
- is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
- rq = container_of(sq, struct mlx5e_rq, xdpsq);
+ is_redirect = !rq;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
- struct mlx5e_rq *rq;
- bool is_redirect;
-
- is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
- rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq);
+ bool is_redirect = !rq;
while (sq->cc != sq->pc) {
u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len);
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq);
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
- if (is_redirect)
- set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
return err;
}
-static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
+static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
struct mlx5e_channel *c = sq->channel;
napi_synchronize(&c->napi);
mlx5e_destroy_sq(c->mdev, sq->sqn);
- mlx5e_free_xdpsq_descs(sq);
+ mlx5e_free_xdpsq_descs(sq, rq);
mlx5e_free_xdpsq(sq);
}
err_close_xdp_sq:
if (c->xdp)
- mlx5e_close_xdpsq(&c->rq.xdpsq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq);
err_close_sqs:
mlx5e_close_sqs(c);
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
- mlx5e_close_xdpsq(&c->xdpsq);
+ mlx5e_close_xdpsq(&c->xdpsq, NULL);
mlx5e_close_rq(&c->rq);
if (c->xdp)
- mlx5e_close_xdpsq(&c->rq.xdpsq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
napi_disable(&c->napi);
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
+ struct mlx5e_rq *rq = &c->rq;
bool busy = false;
int work_done = 0;
int i;
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
- busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq, NULL);
if (c->xdp)
- busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
+ busy |= mlx5e_poll_xdpsq_cq(&rq->xdpsq.cq, rq);
if (likely(budget)) { /* budget=0 means: don't poll rx rings */
- work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
+ work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
busy |= work_done == budget;
}
- busy |= c->rq.post_wqes(&c->rq);
+ busy |= c->rq.post_wqes(rq);
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
mlx5e_cq_arm(&c->sq[i].cq);
}
- mlx5e_handle_rx_dim(&c->rq);
+ mlx5e_handle_rx_dim(rq);
- mlx5e_cq_arm(&c->rq.cq);
+ mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->xdpsq.cq);