net: ena: Make few cosmetic preparations to support large LLQ
authorShay Agroskin <shayagr@amazon.com>
Thu, 23 Mar 2023 16:36:06 +0000 (18:36 +0200)
committerJakub Kicinski <kuba@kernel.org>
Tue, 28 Mar 2023 02:49:58 +0000 (19:49 -0700)
Move ena_calc_io_queue_size() implementation closer to the file's
beginning so that it can be later called from ena_device_init()
function without adding a function declaration.

Also add an empty line at some spots to separate logical blocks in
funcitons.

Reviewed-by: Simon Horman <simon.horman@corigine.com>
Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
Signed-off-by: Shay Agroskin <shayagr@amazon.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/amazon/ena/ena_netdev.c

index cbfe7f977270f7f5134d766b552449a35bd65927..794f8eec468a4a340f513c9cb94486027f24939d 100644 (file)
@@ -3364,6 +3364,71 @@ static const struct net_device_ops ena_netdev_ops = {
        .ndo_xdp_xmit           = ena_xdp_xmit,
 };
 
+static void ena_calc_io_queue_size(struct ena_adapter *adapter,
+                                  struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+       struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
+       struct ena_com_dev *ena_dev = adapter->ena_dev;
+       u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
+       u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
+       u32 max_tx_queue_size;
+       u32 max_rx_queue_size;
+
+       if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+               struct ena_admin_queue_ext_feature_fields *max_queue_ext =
+                       &get_feat_ctx->max_queue_ext.max_queue_ext;
+               max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
+                                         max_queue_ext->max_rx_sq_depth);
+               max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
+
+               if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+                       max_tx_queue_size = min_t(u32, max_tx_queue_size,
+                                                 llq->max_llq_depth);
+               else
+                       max_tx_queue_size = min_t(u32, max_tx_queue_size,
+                                                 max_queue_ext->max_tx_sq_depth);
+
+               adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+                                                max_queue_ext->max_per_packet_tx_descs);
+               adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+                                                max_queue_ext->max_per_packet_rx_descs);
+       } else {
+               struct ena_admin_queue_feature_desc *max_queues =
+                       &get_feat_ctx->max_queues;
+               max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
+                                         max_queues->max_sq_depth);
+               max_tx_queue_size = max_queues->max_cq_depth;
+
+               if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+                       max_tx_queue_size = min_t(u32, max_tx_queue_size,
+                                                 llq->max_llq_depth);
+               else
+                       max_tx_queue_size = min_t(u32, max_tx_queue_size,
+                                                 max_queues->max_sq_depth);
+
+               adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+                                                max_queues->max_packet_tx_descs);
+               adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+                                                max_queues->max_packet_rx_descs);
+       }
+
+       max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
+       max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+
+       tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
+                                 max_tx_queue_size);
+       rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
+                                 max_rx_queue_size);
+
+       tx_queue_size = rounddown_pow_of_two(tx_queue_size);
+       rx_queue_size = rounddown_pow_of_two(rx_queue_size);
+
+       adapter->max_tx_ring_size  = max_tx_queue_size;
+       adapter->max_rx_ring_size = max_rx_queue_size;
+       adapter->requested_tx_ring_size = tx_queue_size;
+       adapter->requested_rx_ring_size = rx_queue_size;
+}
+
 static int ena_device_validate_params(struct ena_adapter *adapter,
                                      struct ena_com_dev_get_features_ctx *get_feat_ctx)
 {
@@ -4162,72 +4227,6 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
        pci_release_selected_regions(pdev, release_bars);
 }
 
-
-static void ena_calc_io_queue_size(struct ena_adapter *adapter,
-                                  struct ena_com_dev_get_features_ctx *get_feat_ctx)
-{
-       struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
-       struct ena_com_dev *ena_dev = adapter->ena_dev;
-       u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
-       u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
-       u32 max_tx_queue_size;
-       u32 max_rx_queue_size;
-
-       if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
-               struct ena_admin_queue_ext_feature_fields *max_queue_ext =
-                       &get_feat_ctx->max_queue_ext.max_queue_ext;
-               max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
-                                         max_queue_ext->max_rx_sq_depth);
-               max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
-
-               if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
-                       max_tx_queue_size = min_t(u32, max_tx_queue_size,
-                                                 llq->max_llq_depth);
-               else
-                       max_tx_queue_size = min_t(u32, max_tx_queue_size,
-                                                 max_queue_ext->max_tx_sq_depth);
-
-               adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
-                                                max_queue_ext->max_per_packet_tx_descs);
-               adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
-                                                max_queue_ext->max_per_packet_rx_descs);
-       } else {
-               struct ena_admin_queue_feature_desc *max_queues =
-                       &get_feat_ctx->max_queues;
-               max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
-                                         max_queues->max_sq_depth);
-               max_tx_queue_size = max_queues->max_cq_depth;
-
-               if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
-                       max_tx_queue_size = min_t(u32, max_tx_queue_size,
-                                                 llq->max_llq_depth);
-               else
-                       max_tx_queue_size = min_t(u32, max_tx_queue_size,
-                                                 max_queues->max_sq_depth);
-
-               adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
-                                                max_queues->max_packet_tx_descs);
-               adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
-                                                max_queues->max_packet_rx_descs);
-       }
-
-       max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
-       max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
-
-       tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
-                                 max_tx_queue_size);
-       rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
-                                 max_rx_queue_size);
-
-       tx_queue_size = rounddown_pow_of_two(tx_queue_size);
-       rx_queue_size = rounddown_pow_of_two(rx_queue_size);
-
-       adapter->max_tx_ring_size  = max_tx_queue_size;
-       adapter->max_rx_ring_size = max_rx_queue_size;
-       adapter->requested_tx_ring_size = tx_queue_size;
-       adapter->requested_rx_ring_size = rx_queue_size;
-}
-
 /* ena_probe - Device Initialization Routine
  * @pdev: PCI device information struct
  * @ent: entry in ena_pci_tbl
@@ -4364,6 +4363,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        "Failed to query interrupt moderation feature\n");
                goto err_device_destroy;
        }
+
        ena_init_io_rings(adapter,
                          0,
                          adapter->xdp_num_queues +
@@ -4488,6 +4488,7 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
        rtnl_lock(); /* lock released inside the below if-else block */
        adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
        ena_destroy_device(adapter, true);
+
        if (shutdown) {
                netif_device_detach(netdev);
                dev_close(netdev);