gve: Modify rx_buf_alloc_fail counter centrally and closer to failure
authorAnkit Garg <nktgrg@google.com>
Wed, 24 Jan 2024 20:54:35 +0000 (20:54 +0000)
committerJakub Kicinski <kuba@kernel.org>
Fri, 26 Jan 2024 01:18:56 +0000 (17:18 -0800)
Previously, each caller of gve_rx_alloc_buffer had to increase counter
 and as a result one caller was not tracking those failure. Increasing
 counters at a common location now so callers don't have to duplicate
 code or miss counter management.

Signed-off-by: Ankit Garg <nktgrg@google.com>
Link: https://lore.kernel.org/r/20240124205435.1021490-1-nktgrg@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/google/gve/gve_rx.c

index c294a1595b6acf03dcc549373acdb231726659e5..c32b3d40beb0db373efbedb2bc826aec8d8012e0 100644 (file)
@@ -105,7 +105,8 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
 
 static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
                               struct gve_rx_slot_page_info *page_info,
-                              union gve_rx_data_slot *data_slot)
+                              union gve_rx_data_slot *data_slot,
+                              struct gve_rx_ring *rx)
 {
        struct page *page;
        dma_addr_t dma;
@@ -113,8 +114,12 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
 
        err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
                             GFP_ATOMIC);
-       if (err)
+       if (err) {
+               u64_stats_update_begin(&rx->statss);
+               rx->rx_buf_alloc_fail++;
+               u64_stats_update_end(&rx->statss);
                return err;
+       }
 
        gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
        return 0;
@@ -156,8 +161,9 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
                                            &rx->data.data_ring[i].qpl_offset);
                        continue;
                }
-               err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
-                                         &rx->data.data_ring[i]);
+               err = gve_rx_alloc_buffer(priv, &priv->pdev->dev,
+                                         &rx->data.page_info[i],
+                                         &rx->data.data_ring[i], rx);
                if (err)
                        goto alloc_err_rda;
        }
@@ -936,10 +942,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
                                gve_rx_free_buffer(dev, page_info, data_slot);
                                page_info->page = NULL;
                                if (gve_rx_alloc_buffer(priv, dev, page_info,
-                                                       data_slot)) {
-                                       u64_stats_update_begin(&rx->statss);
-                                       rx->rx_buf_alloc_fail++;
-                                       u64_stats_update_end(&rx->statss);
+                                                       data_slot, rx)) {
                                        break;
                                }
                        }