#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
 
+#define GVE_MAX_RX_BUFFER_SIZE 4096
+
 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
 
 #define GVE_XDP_ACTIONS 5
 
 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
 
+#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
+
 #define DQO_QPL_DEFAULT_TX_PAGES 512
 #define DQO_QPL_DEFAULT_RX_PAGES 2048
 
        struct gve_ptype_lut *ptype_lut_dqo;
 
        /* Must be a power of two. */
-       int data_buffer_size_dqo;
+       u16 data_buffer_size_dqo;
+       u16 max_rx_buffer_size; /* device limit */
 
        enum gve_queue_format queue_format;
 
        /* Interrupt coalescing settings */
        u32 tx_coalesce_usecs;
        u32 rx_coalesce_usecs;
+
+       u16 header_buf_size; /* device configured, header-split supported if non-zero */
 };
 
 enum gve_service_task_flags_bit {
 
                             struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
                             struct gve_device_option_dqo_rda **dev_op_dqo_rda,
                             struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
-                            struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+                            struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+                            struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
 {
        u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
        u16 option_length = be16_to_cpu(option->option_length);
                }
                *dev_op_jumbo_frames = (void *)(option + 1);
                break;
+       case GVE_DEV_OPT_ID_BUFFER_SIZES:
+               if (option_length < sizeof(**dev_op_buffer_sizes) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "Buffer Sizes",
+                                (int)sizeof(**dev_op_buffer_sizes),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_buffer_sizes))
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT,
+                                "Buffer Sizes");
+               *dev_op_buffer_sizes = (void *)(option + 1);
+               break;
        default:
                /* If we don't recognize the option just continue
                 * without doing anything.
                           struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
                           struct gve_device_option_dqo_rda **dev_op_dqo_rda,
                           struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
-                          struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+                          struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+                          struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
 {
        const int num_options = be16_to_cpu(descriptor->num_device_options);
        struct gve_device_option *dev_opt;
                gve_parse_device_option(priv, descriptor, dev_opt,
                                        dev_op_gqi_rda, dev_op_gqi_qpl,
                                        dev_op_dqo_rda, dev_op_jumbo_frames,
-                                       dev_op_dqo_qpl);
+                                       dev_op_dqo_qpl, dev_op_buffer_sizes);
                dev_opt = next_opt;
        }
 
                                          const struct gve_device_option_jumbo_frames
                                          *dev_op_jumbo_frames,
                                          const struct gve_device_option_dqo_qpl
-                                         *dev_op_dqo_qpl)
+                                         *dev_op_dqo_qpl,
+                                         const struct gve_device_option_buffer_sizes
+                                         *dev_op_buffer_sizes)
 {
        /* Before control reaches this point, the page-size-capped max MTU from
         * the gve_device_descriptor field has already been stored in
                if (priv->rx_pages_per_qpl == 0)
                        priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
        }
+
+       if (dev_op_buffer_sizes &&
+           (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
+               priv->max_rx_buffer_size =
+                       be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
+               priv->header_buf_size =
+                       be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
+               dev_info(&priv->pdev->dev,
+                        "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
+                        priv->max_rx_buffer_size, priv->header_buf_size);
+       }
 }
 
 int gve_adminq_describe_device(struct gve_priv *priv)
 {
+       struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
        struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
        struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
        struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
        err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
                                         &dev_op_gqi_qpl, &dev_op_dqo_rda,
                                         &dev_op_jumbo_frames,
-                                        &dev_op_dqo_qpl);
+                                        &dev_op_dqo_qpl,
+                                        &dev_op_buffer_sizes);
        if (err)
                goto free_device_descriptor;
 
        priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
 
        gve_enable_supported_features(priv, supported_features_mask,
-                                     dev_op_jumbo_frames, dev_op_dqo_qpl);
+                                     dev_op_jumbo_frames, dev_op_dqo_qpl,
+                                     dev_op_buffer_sizes);
 
 free_device_descriptor:
        dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
 
 
 static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
 
+struct gve_device_option_buffer_sizes {
+       /* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
+       __be32 supported_features_mask;
+       __be16 packet_buffer_size;
+       __be16 header_buffer_size;
+};
+
+static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+
 /* Terminology:
  *
  * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
        GVE_DEV_OPT_ID_DQO_RDA = 0x4,
        GVE_DEV_OPT_ID_DQO_QPL = 0x7,
        GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+       GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
 };
 
 enum gve_dev_opt_req_feat_mask {
        GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
        GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
        GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
 };
 
 enum gve_sup_feature_mask {
        GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+       GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
 };
 
 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
        gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
        gve_driver_capability_dqo_rda = 3,
        gve_driver_capability_alt_miss_compl = 4,
+       gve_driver_capability_flexible_buffer_size = 5,
 };
 
 #define GVE_CAP1(a) BIT((int)a)
        (GVE_CAP1(gve_driver_capability_gqi_qpl) | \
         GVE_CAP1(gve_driver_capability_gqi_rda) | \
         GVE_CAP1(gve_driver_capability_dqo_rda) | \
-        GVE_CAP1(gve_driver_capability_alt_miss_compl))
+        GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
+        GVE_CAP1(gve_driver_capability_flexible_buffer_size))
 
 #define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
 #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
        __be16 packet_buffer_size;
        __be16 rx_buff_ring_size;
        u8 enable_rsc;
-       u8 padding[5];
+       u8 padding1;
+       __be16 header_buffer_size;
+       u8 padding2[2];
 };
 
 static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);