/* dirtied @xmit */
        u16                        pc ____cacheline_aligned_in_smp;
        u32                        dma_fifo_pc;
-       struct mlx5e_sq_stats      stats;
 
        struct mlx5e_cq            cq;
 
        /* read only */
        struct mlx5_wq_cyc         wq;
        u32                        dma_fifo_mask;
+       struct mlx5e_sq_stats     *stats;
        void __iomem              *uar_map;
        struct netdev_queue       *txq;
        u32                        sqn;
        struct mlx5e_channel  *channel;
        struct device         *pdev;
        struct net_device     *netdev;
-       struct mlx5e_rq_stats  stats;
+       struct mlx5e_rq_stats *stats;
        struct mlx5e_cq        cq;
        struct mlx5e_page_cache page_cache;
        struct hwtstamp_config *tstamp;
 
        /* data path - accessed per napi poll */
        struct irq_desc *irq_desc;
-       struct mlx5e_ch_stats      stats;
+       struct mlx5e_ch_stats     *stats;
 
        /* control */
        struct mlx5e_priv         *priv;
        struct mlx5e_params    params;
 };
 
+struct mlx5e_channel_stats {
+       struct mlx5e_ch_stats ch;
+       struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
+       struct mlx5e_rq_stats rq;
+} ____cacheline_aligned_in_smp;
+
 enum mlx5e_traffic_types {
        MLX5E_TT_IPV4_TCP,
        MLX5E_TT_IPV6_TCP,
        struct mlx5_core_dev      *mdev;
        struct net_device         *netdev;
        struct mlx5e_stats         stats;
+       struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+       u8                         max_opened_tc;
        struct hwtstamp_config     tstamp;
        u16                        q_counter;
        u16                        drop_rq_q_counter;
 
        int headln;
        int i;
 
-       sq->stats.tls_ooo++;
+       sq->stats->tls_ooo++;
 
        if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
                /* We might get here if a retransmission reaches the driver
        skb_shinfo(nskb)->nr_frags = info.nr_frags;
        nskb->data_len = info.sync_len;
        nskb->len += info.sync_len;
-       sq->stats.tls_resync_bytes += nskb->len;
+       sq->stats->tls_resync_bytes += nskb->len;
        mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
                                    cpu_to_be64(info.rcd_sn));
        mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
 
        rq->ix      = c->ix;
        rq->mdev    = mdev;
        rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+       rq->stats   = &c->priv->channel_stats[c->ix].rq;
 
        rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
        if (IS_ERR(rq->xdp_prog)) {
                             int txq_ix,
                             struct mlx5e_params *params,
                             struct mlx5e_sq_param *param,
-                            struct mlx5e_txqsq *sq)
+                            struct mlx5e_txqsq *sq,
+                            int tc)
 {
        void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
        struct mlx5_core_dev *mdev = c->mdev;
        sq->txq_ix    = txq_ix;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
+       sq->stats     = &c->priv->channel_stats[c->ix].sq[tc];
        INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
                            int txq_ix,
                            struct mlx5e_params *params,
                            struct mlx5e_sq_param *param,
-                           struct mlx5e_txqsq *sq)
+                           struct mlx5e_txqsq *sq,
+                           int tc)
 {
        struct mlx5e_create_sq_param csp = {};
        u32 tx_rate;
        int err;
 
-       err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
+       err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
        if (err)
                return err;
 
                return;
 
        mlx5e_reset_txqsq_cc_pc(sq);
-       sq->stats.recover++;
+       sq->stats->recover++;
        recover->last_recover = jiffies;
        mlx5e_activate_txqsq(sq);
 }
                          struct mlx5e_params *params,
                          struct mlx5e_channel_param *cparam)
 {
-       int err;
-       int tc;
+       struct mlx5e_priv *priv = c->priv;
+       int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
 
        for (tc = 0; tc < params->num_tc; tc++) {
-               int txq_ix = c->ix + tc * params->num_channels;
+               int txq_ix = c->ix + tc * max_nch;
 
                err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
-                                      params, &cparam->sq, &c->sq[tc]);
+                                      params, &cparam->sq, &c->sq[tc], tc);
                if (err)
                        goto err_close_sqs;
        }
        c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
        c->num_tc   = params->num_tc;
        c->xdp      = !!params->xdp_prog;
+       c->stats    = &priv->channel_stats[ix].ch;
 
        mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
        c->irq_desc = irq_to_desc(irq);
        struct mlx5e_txqsq *sq;
        int i, tc;
 
-       for (i = 0; i < priv->channels.num; i++)
+       for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
                for (tc = 0; tc < priv->profile->max_tc; tc++)
                        priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
 
        if (err)
                goto out;
 
+       priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
+                                   new_channels.params.num_tc);
        mlx5e_switch_priv_channels(priv, &new_channels, NULL);
 out:
        mutex_unlock(&priv->state_lock);
                return false;
 
        netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
-       sq->channel->stats.eq_rearm++;
+       sq->channel->stats->eq_rearm++;
        return true;
 }
 
        priv->profile     = profile;
        priv->ppriv       = ppriv;
        priv->msglevel    = MLX5E_MSG_LEVEL;
+       priv->max_opened_tc = 1;
 
        mlx5e_build_nic_params(mdev, &priv->channels.params,
                               profile->max_nch(mdev), netdev->mtu);
 
        for (i = 0; i < priv->channels.num; i++) {
                struct mlx5e_channel *c = priv->channels.c[i];
 
-               rq_stats = &c->rq.stats;
+               rq_stats = c->rq.stats;
 
                s->rx_packets   += rq_stats->packets;
                s->rx_bytes     += rq_stats->bytes;
 
                for (j = 0; j < priv->channels.params.num_tc; j++) {
-                       sq_stats = &c->sq[j].stats;
+                       sq_stats = c->sq[j].stats;
 
                        s->tx_packets           += sq_stats->packets;
                        s->tx_bytes             += sq_stats->bytes;
 
        mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
        cq->decmprs_left        = be32_to_cpu(cq->title.byte_cnt);
        cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
-       rq->stats.cqe_compress_blks++;
+       rq->stats->cqe_compress_blks++;
 }
 
 static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
        mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
        cq->wq.cc = cqcc;
        cq->decmprs_left -= cqe_count;
-       rq->stats.cqe_compress_pkts += cqe_count;
+       rq->stats->cqe_compress_pkts += cqe_count;
 
        return cqe_count;
 }
 {
        struct mlx5e_page_cache *cache = &rq->page_cache;
        u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
+       struct mlx5e_rq_stats *stats = rq->stats;
 
        if (tail_next == cache->head) {
-               rq->stats.cache_full++;
+               stats->cache_full++;
                return false;
        }
 
        if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
-               rq->stats.cache_waive++;
+               stats->cache_waive++;
                return false;
        }
 
                                      struct mlx5e_dma_info *dma_info)
 {
        struct mlx5e_page_cache *cache = &rq->page_cache;
+       struct mlx5e_rq_stats *stats = rq->stats;
 
        if (unlikely(cache->head == cache->tail)) {
-               rq->stats.cache_empty++;
+               stats->cache_empty++;
                return false;
        }
 
        if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
-               rq->stats.cache_busy++;
+               stats->cache_busy++;
                return false;
        }
 
        *dma_info = cache->page_cache[cache->head];
        cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
-       rq->stats.cache_reuse++;
+       stats->cache_reuse++;
 
        dma_sync_single_for_device(rq->pdev, dma_info->addr,
                                   RQ_PAGE_SIZE(rq),
                                           struct mlx5e_wqe_frag_info *wi)
 {
        if (mlx5e_page_reuse(rq, wi)) {
-               rq->stats.page_reuse++;
+               rq->stats->page_reuse++;
                return;
        }
 
                dma_info--;
                mlx5e_page_release(rq, dma_info, true);
        }
-       rq->stats.buff_alloc_err++;
+       rq->stats->buff_alloc_err++;
 
        return err;
 }
 
                err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head);
                if (unlikely(err)) {
-                       rq->stats.buff_alloc_err++;
+                       rq->stats->buff_alloc_err++;
                        break;
                }
 
                                     struct sk_buff *skb,
                                     bool   lro)
 {
+       struct mlx5e_rq_stats *stats = rq->stats;
        int network_depth = 0;
 
        if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
 
        if (lro) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
-               rq->stats.csum_unnecessary++;
+               stats->csum_unnecessary++;
                return;
        }
 
                        skb->csum = csum_partial(skb->data + ETH_HLEN,
                                                 network_depth - ETH_HLEN,
                                                 skb->csum);
-               rq->stats.csum_complete++;
+               stats->csum_complete++;
                return;
        }
 
                if (cqe_is_tunneled(cqe)) {
                        skb->csum_level = 1;
                        skb->encapsulation = 1;
-                       rq->stats.csum_unnecessary_inner++;
+                       stats->csum_unnecessary_inner++;
                        return;
                }
-               rq->stats.csum_unnecessary++;
+               stats->csum_unnecessary++;
                return;
        }
 csum_none:
        skb->ip_summed = CHECKSUM_NONE;
-       rq->stats.csum_none++;
+       stats->csum_none++;
 }
 
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
                                      struct sk_buff *skb)
 {
        u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+       struct mlx5e_rq_stats *stats = rq->stats;
        struct net_device *netdev = rq->netdev;
 
        skb->mac_len = ETH_HLEN;
                /* Subtract one since we already counted this as one
                 * "regular" packet in mlx5e_complete_rx_cqe()
                 */
-               rq->stats.packets += lro_num_seg - 1;
-               rq->stats.lro_packets++;
-               rq->stats.lro_bytes += cqe_bcnt;
+               stats->packets += lro_num_seg - 1;
+               stats->lro_packets++;
+               stats->lro_bytes += cqe_bcnt;
        }
 
        if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
        if (cqe_has_vlan(cqe)) {
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       be16_to_cpu(cqe->vlan_info));
-               rq->stats.removed_vlan_packets++;
+               stats->removed_vlan_packets++;
        }
 
        skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
                                         u32 cqe_bcnt,
                                         struct sk_buff *skb)
 {
-       rq->stats.packets++;
-       rq->stats.bytes += cqe_bcnt;
+       struct mlx5e_rq_stats *stats = rq->stats;
+
+       stats->packets++;
+       stats->bytes += cqe_bcnt;
        mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
 }
 
        dma_addr_t dma_addr  = di->addr + data_offset;
        unsigned int dma_len = xdp->data_end - xdp->data;
 
+       struct mlx5e_rq_stats *stats = rq->stats;
+
        prefetchw(wqe);
 
        if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
-               rq->stats.xdp_drop++;
+               stats->xdp_drop++;
                return false;
        }
 
                        mlx5e_xmit_xdp_doorbell(sq);
                        sq->db.doorbell = false;
                }
-               rq->stats.xdp_tx_full++;
+               stats->xdp_tx_full++;
                return false;
        }
 
 
        sq->db.doorbell = true;
 
-       rq->stats.xdp_tx++;
+       stats->xdp_tx++;
        return true;
 }
 
        case XDP_ABORTED:
                trace_xdp_exception(rq->netdev, prog, act);
        case XDP_DROP:
-               rq->stats.xdp_drop++;
+               rq->stats->xdp_drop++;
                return true;
        }
 }
        struct sk_buff *skb = build_skb(va, frag_size);
 
        if (unlikely(!skb)) {
-               rq->stats.buff_alloc_err++;
+               rq->stats->buff_alloc_err++;
                return NULL;
        }
 
        wi->offset += frag_size;
 
        if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
-               rq->stats.wqe_err++;
+               rq->stats->wqe_err++;
                return NULL;
        }
 
        skb = napi_alloc_skb(rq->cq.napi,
                             ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long)));
        if (unlikely(!skb)) {
-               rq->stats.buff_alloc_err++;
+               rq->stats->buff_alloc_err++;
                return NULL;
        }
 
        wi->consumed_strides += cstrides;
 
        if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
-               rq->stats.wqe_err++;
+               rq->stats->wqe_err++;
                goto mpwrq_cqe_out;
        }
 
        if (unlikely(mpwrq_is_filler_cqe(cqe))) {
-               rq->stats.mpwqe_filler++;
+               rq->stats->mpwqe_filler++;
                goto mpwrq_cqe_out;
        }
 
                                         u32 cqe_bcnt,
                                         struct sk_buff *skb)
 {
+       struct mlx5e_rq_stats *stats = rq->stats;
        struct hwtstamp_config *tstamp;
        struct net_device *netdev;
        struct mlx5e_priv *priv;
 
        skb->dev = netdev;
 
-       rq->stats.csum_complete++;
-       rq->stats.packets++;
-       rq->stats.bytes += cqe_bcnt;
+       stats->csum_complete++;
+       stats->packets++;
+       stats->bytes += cqe_bcnt;
 }
 
 void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
 
 void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
 {
        struct mlx5e_sw_stats temp, *s = &temp;
-       struct mlx5e_rq_stats *rq_stats;
-       struct mlx5e_sq_stats *sq_stats;
-       struct mlx5e_ch_stats *ch_stats;
-       int i, j;
+       int i;
 
        memset(s, 0, sizeof(*s));
        read_lock(&priv->stats_lock);
        if (!priv->channels_active)
                goto out;
-       for (i = 0; i < priv->channels.num; i++) {
-               struct mlx5e_channel *c = priv->channels.c[i];
 
-               rq_stats = &c->rq.stats;
-               ch_stats = &c->stats;
+       for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
+               struct mlx5e_channel_stats *channel_stats =
+                       &priv->channel_stats[i];
+               struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
+               struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
+               int j;
 
                s->rx_packets   += rq_stats->packets;
                s->rx_bytes     += rq_stats->bytes;
                s->rx_cache_waive += rq_stats->cache_waive;
                s->ch_eq_rearm += ch_stats->eq_rearm;
 
-               for (j = 0; j < priv->channels.params.num_tc; j++) {
-                       sq_stats = &c->sq[j].stats;
+               for (j = 0; j < priv->max_opened_tc; j++) {
+                       struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
 
                        s->tx_packets           += sq_stats->packets;
                        s->tx_bytes             += sq_stats->bytes;
 
 static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
 {
-       return (NUM_RQ_STATS * priv->channels.num) +
-               (NUM_CH_STATS * priv->channels.num) +
-               (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
+       int max_nch = priv->profile->max_nch(priv->mdev);
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               return 0;
+
+       return (NUM_RQ_STATS * max_nch) +
+              (NUM_CH_STATS * max_nch) +
+              (NUM_SQ_STATS * max_nch * priv->max_opened_tc);
 }
 
 static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
                                           int idx)
 {
+       int max_nch = priv->profile->max_nch(priv->mdev);
        int i, j, tc;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                return idx;
 
-       for (i = 0; i < priv->channels.num; i++)
+       for (i = 0; i < max_nch; i++)
                for (j = 0; j < NUM_CH_STATS; j++)
                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
                                ch_stats_desc[j].format, i);
 
-       for (i = 0; i < priv->channels.num; i++)
+       for (i = 0; i < max_nch; i++)
                for (j = 0; j < NUM_RQ_STATS; j++)
                        sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
 
-       for (tc = 0; tc < priv->channels.params.num_tc; tc++)
-               for (i = 0; i < priv->channels.num; i++)
+       /* priv->channel_tc2txq[i][tc] is valid only when device is open */
+       for (tc = 0; tc < priv->max_opened_tc; tc++)
+               for (i = 0; i < max_nch; i++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
                                        sq_stats_desc[j].format,
 static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
                                         int idx)
 {
-       struct mlx5e_channels *channels = &priv->channels;
+       int max_nch = priv->profile->max_nch(priv->mdev);
        int i, j, tc;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                return idx;
 
-       for (i = 0; i < channels->num; i++)
+       for (i = 0; i < max_nch; i++)
                for (j = 0; j < NUM_CH_STATS; j++)
                        data[idx++] =
-                               MLX5E_READ_CTR64_CPU(&channels->c[i]->stats,
+                               MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
                                                     ch_stats_desc, j);
 
-       for (i = 0; i < channels->num; i++)
+       for (i = 0; i < max_nch; i++)
                for (j = 0; j < NUM_RQ_STATS; j++)
                        data[idx++] =
-                               MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
+                               MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
                                                     rq_stats_desc, j);
 
-       for (tc = 0; tc < priv->channels.params.num_tc; tc++)
-               for (i = 0; i < channels->num; i++)
+       for (tc = 0; tc < priv->max_opened_tc; tc++)
+               for (i = 0; i < max_nch; i++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
                                data[idx++] =
-                                       MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
+                                       MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
                                                             sq_stats_desc, j);
 
        return idx;
 
                if (skb->encapsulation) {
                        eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
                                          MLX5_ETH_WQE_L4_INNER_CSUM;
-                       sq->stats.csum_partial_inner++;
+                       sq->stats->csum_partial_inner++;
                } else {
                        eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
-                       sq->stats.csum_partial++;
+                       sq->stats->csum_partial++;
                }
        } else
-               sq->stats.csum_none++;
+               sq->stats->csum_none++;
 }
 
 static inline u16
 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
 {
+       struct mlx5e_sq_stats *stats = sq->stats;
        u16 ihs;
 
        if (skb->encapsulation) {
                ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
-               sq->stats.tso_inner_packets++;
-               sq->stats.tso_inner_bytes += skb->len - ihs;
+               stats->tso_inner_packets++;
+               stats->tso_inner_bytes += skb->len - ihs;
        } else {
                ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
-               sq->stats.tso_packets++;
-               sq->stats.tso_bytes += skb->len - ihs;
+               stats->tso_packets++;
+               stats->tso_bytes += skb->len - ihs;
        }
 
        return ihs;
                wi->num_wqebbs = 1;
                mlx5e_post_nop(wq, sq->sqn, &sq->pc);
        }
-       sq->stats.nop += nnops;
+       sq->stats->nop += nnops;
 }
 
 static inline void
        sq->pc += wi->num_wqebbs;
        if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
                netif_tx_stop_queue(sq->txq);
-               sq->stats.stopped++;
+               sq->stats->stopped++;
        }
 
        if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
        struct mlx5_wqe_data_seg *dseg;
        struct mlx5e_tx_wqe_info *wi;
 
+       struct mlx5e_sq_stats *stats = sq->stats;
        unsigned char *skb_data = skb->data;
        unsigned int skb_len = skb->len;
        u16 ds_cnt, ds_cnt_inl = 0;
                mss       = cpu_to_be16(skb_shinfo(skb)->gso_size);
                ihs       = mlx5e_tx_get_gso_ihs(sq, skb);
                num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
-               sq->stats.packets += skb_shinfo(skb)->gso_segs;
+               stats->packets += skb_shinfo(skb)->gso_segs;
        } else {
                opcode    = MLX5_OPCODE_SEND;
                mss       = 0;
                ihs       = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
                num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
-               sq->stats.packets++;
+               stats->packets++;
        }
 
-       sq->stats.bytes     += num_bytes;
-       sq->stats.xmit_more += skb->xmit_more;
+       stats->bytes     += num_bytes;
+       stats->xmit_more += skb->xmit_more;
 
        headlen = skb_len - ihs - skb->data_len;
        ds_cnt += !!headlen;
                if (skb_vlan_tag_present(skb)) {
                        mlx5e_insert_vlan(eseg->inline_hdr.start, skb,
                                          ihs - VLAN_HLEN, &skb_data, &skb_len);
-                       sq->stats.added_vlan_packets++;
+                       stats->added_vlan_packets++;
                } else {
                        memcpy(eseg->inline_hdr.start, skb_data, ihs);
                        mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
                if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
                        eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
                eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
-               sq->stats.added_vlan_packets++;
+               stats->added_vlan_packets++;
        }
 
        num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg);
        return NETDEV_TX_OK;
 
 err_drop:
-       sq->stats.dropped++;
+       stats->dropped++;
        dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
                                queue_work(cq->channel->priv->wq,
                                           &sq->recover.recover_work);
                        }
-                       sq->stats.cqe_err++;
+                       sq->stats->cqe_err++;
                }
 
                do {
                                   MLX5E_SQ_STOP_ROOM) &&
            !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
                netif_tx_wake_queue(sq->txq);
-               sq->stats.wake++;
+               sq->stats->wake++;
        }
 
        return (i == MLX5E_TX_CQ_POLL_BUDGET);
        struct mlx5_wqe_data_seg *dseg;
        struct mlx5e_tx_wqe_info *wi;
 
+       struct mlx5e_sq_stats *stats = sq->stats;
        unsigned char *skb_data = skb->data;
        unsigned int skb_len = skb->len;
        u16 headlen, ihs, pi, frag_pi;
                mss       = cpu_to_be16(skb_shinfo(skb)->gso_size);
                ihs       = mlx5e_tx_get_gso_ihs(sq, skb);
                num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
-               sq->stats.packets += skb_shinfo(skb)->gso_segs;
+               stats->packets += skb_shinfo(skb)->gso_segs;
        } else {
                opcode    = MLX5_OPCODE_SEND;
                mss       = 0;
                ihs       = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
                num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
-               sq->stats.packets++;
+               stats->packets++;
        }
 
-       sq->stats.bytes     += num_bytes;
-       sq->stats.xmit_more += skb->xmit_more;
+       stats->bytes     += num_bytes;
+       stats->xmit_more += skb->xmit_more;
 
        headlen = skb_len - ihs - skb->data_len;
        ds_cnt += !!headlen;
        return NETDEV_TX_OK;
 
 err_drop:
-       sq->stats.dropped++;
+       stats->dropped++;
        dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
 
 
 static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
 {
+       struct mlx5e_sq_stats *stats = sq->stats;
        struct net_dim_sample dim_sample;
 
        if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
                return;
 
-       net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes,
+       net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes,
                       &dim_sample);
        net_dim(&sq->dim, dim_sample);
 }
 
 static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
 {
+       struct mlx5e_rq_stats *stats = rq->stats;
        struct net_dim_sample dim_sample;
 
        if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
                return;
 
-       net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes,
+       net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes,
                       &dim_sample);
        net_dim(&rq->dim, dim_sample);
 }