return rc;
 }
 
+static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
+                                  struct nix_smq_flush_ctx *smq_flush_ctx)
+{
+       struct nix_smq_tree_ctx *smq_tree_ctx;
+       u64 parent_off, regval;
+       u16 schq;
+       int lvl;
+
+       smq_flush_ctx->smq = smq;
+
+       schq = smq;
+       for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+               smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+               if (lvl == NIX_TXSCH_LVL_TL1) {
+                       smq_flush_ctx->tl1_schq = schq;
+                       smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
+                       smq_tree_ctx->pir_off = 0;
+                       smq_tree_ctx->pir_val = 0;
+                       parent_off = 0;
+               } else if (lvl == NIX_TXSCH_LVL_TL2) {
+                       smq_flush_ctx->tl2_schq = schq;
+                       smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
+                       smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
+                       parent_off = NIX_AF_TL2X_PARENT(schq);
+               } else if (lvl == NIX_TXSCH_LVL_TL3) {
+                       smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
+                       smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
+                       parent_off = NIX_AF_TL3X_PARENT(schq);
+               } else if (lvl == NIX_TXSCH_LVL_TL4) {
+                       smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
+                       smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
+                       parent_off = NIX_AF_TL4X_PARENT(schq);
+               } else if (lvl == NIX_TXSCH_LVL_MDQ) {
+                       smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
+                       smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
+                       parent_off = NIX_AF_MDQX_PARENT(schq);
+               }
+               /* save cir/pir register values */
+               smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
+               if (smq_tree_ctx->pir_off)
+                       smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
+
+               /* get parent txsch node */
+               if (parent_off) {
+                       regval = rvu_read64(rvu, blkaddr, parent_off);
+                       schq = (regval >> 16) & 0x1FF;
+               }
+       }
+}
+
+static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+                                     struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
+{
+       struct nix_txsch *txsch;
+       struct nix_hw *nix_hw;
+       u64 regoff;
+       int tl2;
+
+       nix_hw = get_nix_hw(rvu->hw, blkaddr);
+       if (!nix_hw)
+               return;
+
+       /* loop through all TL2s with matching PF_FUNC */
+       txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+       for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
+               /* skip the smq(flush) TL2 */
+               if (tl2 == smq_flush_ctx->tl2_schq)
+                       continue;
+               /* skip unused TL2s */
+               if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
+                       continue;
+               /* skip if PF_FUNC doesn't match */
+               if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
+                   (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
+                                   ~RVU_PFVF_FUNC_MASK)))
+                       continue;
+               /* enable/disable XOFF */
+               regoff = NIX_AF_TL2X_SW_XOFF(tl2);
+               if (enable)
+                       rvu_write64(rvu, blkaddr, regoff, 0x1);
+               else
+                       rvu_write64(rvu, blkaddr, regoff, 0x0);
+       }
+}
+
+static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
+                                     struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
+{
+       u64 cir_off, pir_off, cir_val, pir_val;
+       struct nix_smq_tree_ctx *smq_tree_ctx;
+       int lvl;
+
+       for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+               smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+               cir_off = smq_tree_ctx->cir_off;
+               cir_val = smq_tree_ctx->cir_val;
+               pir_off = smq_tree_ctx->pir_off;
+               pir_val = smq_tree_ctx->pir_val;
+
+               if (enable) {
+                       rvu_write64(rvu, blkaddr, cir_off, cir_val);
+                       if (lvl != NIX_TXSCH_LVL_TL1)
+                               rvu_write64(rvu, blkaddr, pir_off, pir_val);
+               } else {
+                       rvu_write64(rvu, blkaddr, cir_off, 0x0);
+                       if (lvl != NIX_TXSCH_LVL_TL1)
+                               rvu_write64(rvu, blkaddr, pir_off, 0x0);
+               }
+       }
+}
+
 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
                         int smq, u16 pcifunc, int nixlf)
 {
+       struct nix_smq_flush_ctx *smq_flush_ctx;
        int pf = rvu_get_pf(pcifunc);
        u8 cgx_id = 0, lmac_id = 0;
        int err, restore_tx_en = 0;
                                                   lmac_id, true);
        }
 
+       /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
+       smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
+       if (!smq_flush_ctx)
+               return -ENOMEM;
+       nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
+       nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
+       nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
+
        cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
        /* Do SMQ flush and set enqueue xoff */
        cfg |= BIT_ULL(50) | BIT_ULL(49);
        err = rvu_poll_reg(rvu, blkaddr,
                           NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
        if (err)
-               dev_err(rvu->dev,
-                       "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
+               dev_info(rvu->dev,
+                        "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
+                        nixlf, smq);
+
+       /* clear XOFF on TL2s */
+       nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
+       nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
+       kfree(smq_flush_ctx);
 
        rvu_cgx_enadis_rx_bp(rvu, pf, true);
        /* restore cgx tx state */