static void gve_drain_page_cache(struct gve_priv *priv)
 {
-       struct page_frag_cache *nc;
        int i;
 
-       for (i = 0; i < priv->rx_cfg.num_queues; i++) {
-               nc = &priv->rx[i].page_cache;
-               if (nc->va) {
-                       __page_frag_cache_drain(virt_to_page(nc->va),
-                                               nc->pagecnt_bias);
-                       nc->va = NULL;
-               }
-       }
+       for (i = 0; i < priv->rx_cfg.num_queues; i++)
+               page_frag_cache_drain(&priv->rx[i].page_cache);
 }
 
 static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
 
 static void
 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
 {
-       struct page *page;
        int i;
 
        for (i = 0; i < q->n_desc; i++) {
                entry->buf = NULL;
        }
 
-       if (!q->cache.va)
-               return;
-
-       page = virt_to_page(q->cache.va);
-       __page_frag_cache_drain(page, q->cache.pagecnt_bias);
-       memset(&q->cache, 0, sizeof(q->cache));
+       page_frag_cache_drain(&q->cache);
 }
 
 static void
 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
 {
-       struct page *page;
-
        for (;;) {
                void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
 
                skb_free_frag(buf);
        }
 
-       if (!q->cache.va)
-               return;
-
-       page = virt_to_page(q->cache.va);
-       __page_frag_cache_drain(page, q->cache.pagecnt_bias);
-       memset(&q->cache, 0, sizeof(q->cache));
+       page_frag_cache_drain(&q->cache);
 }
 
 static void
 
 
 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
 {
-       struct page *page;
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
        unsigned int noreclaim_flag;
        if (queue->hdr_digest || queue->data_digest)
                nvme_tcp_free_crypto(queue);
 
-       if (queue->pf_cache.va) {
-               page = virt_to_head_page(queue->pf_cache.va);
-               __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
-               queue->pf_cache.va = NULL;
-       }
+       page_frag_cache_drain(&queue->pf_cache);
 
        noreclaim_flag = memalloc_noreclaim_save();
        /* ->sock will be released by fput() */
 
 
 static void nvmet_tcp_release_queue_work(struct work_struct *w)
 {
-       struct page *page;
        struct nvmet_tcp_queue *queue =
                container_of(w, struct nvmet_tcp_queue, release_work);
 
        if (queue->hdr_digest || queue->data_digest)
                nvmet_tcp_free_crypto(queue);
        ida_free(&nvmet_tcp_queue_ida, queue->idx);
-       page = virt_to_head_page(queue->pf_cache.va);
-       __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+       page_frag_cache_drain(&queue->pf_cache);
        kfree(queue);
 }
 
 
 extern void free_pages(unsigned long addr, unsigned int order);
 
 struct page_frag_cache;
+void page_frag_cache_drain(struct page_frag_cache *nc);
 extern void __page_frag_cache_drain(struct page *page, unsigned int count);
 void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
                              gfp_t gfp_mask, unsigned int align_mask);
 
        return page;
 }
 
+void page_frag_cache_drain(struct page_frag_cache *nc)
+{
+       if (!nc->va)
+               return;
+
+       __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
+       nc->va = NULL;
+}
+EXPORT_SYMBOL(page_frag_cache_drain);
+
 void __page_frag_cache_drain(struct page *page, unsigned int count)
 {
        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);