return ret;
 }
 
+static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
+{
+       struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
+       u32 prio_queues = 0;
+
+       if (queues & BIT(IEEE80211_AC_VO))
+               prio_queues |= BIT(rqpn->dma_map_vo);
+       if (queues & BIT(IEEE80211_AC_VI))
+               prio_queues |= BIT(rqpn->dma_map_vi);
+       if (queues & BIT(IEEE80211_AC_BE))
+               prio_queues |= BIT(rqpn->dma_map_be);
+       if (queues & BIT(IEEE80211_AC_BK))
+               prio_queues |= BIT(rqpn->dma_map_bk);
+
+       return prio_queues;
+}
+
+static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
+                                      u32 prio_queue, bool drop)
+{
+       u32 addr;
+       u16 avail_page, rsvd_page;
+       int i;
+
+       switch (prio_queue) {
+       case RTW_DMA_MAPPING_EXTRA:
+               addr = REG_FIFOPAGE_INFO_4;
+               break;
+       case RTW_DMA_MAPPING_LOW:
+               addr = REG_FIFOPAGE_INFO_2;
+               break;
+       case RTW_DMA_MAPPING_NORMAL:
+               addr = REG_FIFOPAGE_INFO_3;
+               break;
+       case RTW_DMA_MAPPING_HIGH:
+               addr = REG_FIFOPAGE_INFO_1;
+               break;
+       default:
+               return;
+       }
+
+       /* check if all of the reserved pages are available for 100 msecs */
+       for (i = 0; i < 5; i++) {
+               rsvd_page = rtw_read16(rtwdev, addr);
+               avail_page = rtw_read16(rtwdev, addr + 2);
+               if (rsvd_page == avail_page)
+                       return;
+
+               msleep(20);
+       }
+
+       /* priority queue is still not empty, throw a warning,
+        *
+        * Note that if we want to flush the tx queue when having a lot of
+        * traffic (ex, 100Mbps up), some of the packets could be dropped.
+        * And it requires like ~2secs to flush the full priority queue.
+        */
+       if (!drop)
+               rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
+}
+
+static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
+                                     u32 prio_queues, bool drop)
+{
+       u32 q;
+
+       for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
+               if (prio_queues & BIT(q))
+                       __rtw_mac_flush_prio_queue(rtwdev, q, drop);
+}
+
+void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
+{
+       u32 prio_queues = 0;
+
+       /* If all of the hardware queues are requested to flush,
+        * or the priority queues are not mapped yet,
+        * flush all of the priority queues
+        */
+       if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
+               prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
+       else
+               prio_queues = get_priority_queues(rtwdev, queues);
+
+       rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
+}
+
 static int txdma_queue_mapping(struct rtw_dev *rtwdev)
 {
        struct rtw_chip_info *chip = rtwdev->chip;
                return -EINVAL;
        }
 
+       rtwdev->fifo.rqpn = rqpn;
        txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
        txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
        txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
 
        sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
 }
 
+static void rtw_ops_flush(struct ieee80211_hw *hw,
+                         struct ieee80211_vif *vif,
+                         u32 queues, bool drop)
+{
+       struct rtw_dev *rtwdev = hw->priv;
+
+       mutex_lock(&rtwdev->mutex);
+       rtw_leave_lps_deep(rtwdev);
+
+       rtw_mac_flush_queues(rtwdev, queues, drop);
+       mutex_unlock(&rtwdev->mutex);
+}
+
 const struct ieee80211_ops rtw_ops = {
        .tx                     = rtw_ops_tx,
        .wake_tx_queue          = rtw_ops_wake_tx_queue,
        .mgd_prepare_tx         = rtw_ops_mgd_prepare_tx,
        .set_rts_threshold      = rtw_ops_set_rts_threshold,
        .sta_statistics         = rtw_ops_sta_statistics,
+       .flush                  = rtw_ops_flush,
 };
 EXPORT_SYMBOL(rtw_ops);