I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
        I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
        I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
+       I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
 };
 
 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
 
  **/
 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
 {
+       u64 rx_page, rx_buf, rx_reuse, rx_alloc;
        struct i40e_pf *pf = vsi->back;
-       u64 rx_page, rx_buf, rx_reuse;
        struct rtnl_link_stats64 *ons;
        struct rtnl_link_stats64 *ns;   /* netdev stats */
        struct i40e_eth_stats *oes;
        rx_page = 0;
        rx_buf = 0;
        rx_reuse = 0;
+       rx_alloc = 0;
        rcu_read_lock();
        for (q = 0; q < vsi->num_queue_pairs; q++) {
                /* locate Tx ring */
                rx_buf += p->rx_stats.alloc_buff_failed;
                rx_page += p->rx_stats.alloc_page_failed;
                rx_reuse += p->rx_stats.page_reuse_count;
+               rx_alloc += p->rx_stats.page_alloc_count;
 
                if (i40e_enabled_xdp_vsi(vsi)) {
                        /* locate XDP ring */
        vsi->rx_page_failed = rx_page;
        vsi->rx_buf_failed = rx_buf;
        vsi->rx_page_reuse = rx_reuse;
+       vsi->rx_page_alloc = rx_alloc;
 
        ns->rx_packets = rx_p;
        ns->rx_bytes = rx_b;
 
                return false;
        }
 
+       rx_ring->rx_stats.page_alloc_count++;
+
        /* map page for use */
        dma = dma_map_page_attrs(rx_ring->dev, page, 0,
                                 i40e_rx_pg_size(rx_ring),