ice: Add support in the driver for associating queue with napi
authorAmritha Nambiar <amritha.nambiar@intel.com>
Fri, 1 Dec 2023 23:28:40 +0000 (15:28 -0800)
committerJakub Kicinski <kuba@kernel.org>
Tue, 5 Dec 2023 02:04:05 +0000 (18:04 -0800)
After the napi context is initialized, map the napi instance
with the queue/queue-set on the corresponding irq line.

Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com>
Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Link: https://lore.kernel.org/r/170147332060.5260.13310934657151560599.stgit@anambiarhost.jf.intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_lib.h
drivers/net/ethernet/intel/ice/ice_main.c

index 7fa43827a3f06c7abf036338c9237184e605ecf5..edad5f9ab16ce031d18a1ee23886017744e20e57 100644 (file)
@@ -189,10 +189,18 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
        }
        q_vector = vsi->q_vectors[v_idx];
 
-       ice_for_each_tx_ring(tx_ring, q_vector->tx)
+       ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+               if (vsi->netdev)
+                       netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
+                                            NETDEV_QUEUE_TYPE_TX, NULL);
                tx_ring->q_vector = NULL;
-       ice_for_each_rx_ring(rx_ring, q_vector->rx)
+       }
+       ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+               if (vsi->netdev)
+                       netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
+                                            NETDEV_QUEUE_TYPE_RX, NULL);
                rx_ring->q_vector = NULL;
+       }
 
        /* only VSI with an associated netdev is set up with NAPI */
        if (vsi->netdev)
index d826b5afa14344d62a5fd12b45dec4601be5e209..83f6977fad22837c34f7d2077377383b4fa822d2 100644 (file)
@@ -2452,6 +2452,10 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
                        goto unroll_vector_base;
 
                ice_vsi_map_rings_to_vectors(vsi);
+
+               /* Associate q_vector rings to napi */
+               ice_vsi_set_napi_queues(vsi, true);
+
                vsi->stat_offsets_loaded = false;
 
                if (ice_is_xdp_ena_vsi(vsi)) {
@@ -2931,6 +2935,69 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
                synchronize_irq(vsi->q_vectors[i]->irq.virq);
 }
 
+/**
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @dev: device to which NAPI and queue belong
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ * @locked: is the rtnl_lock already held
+ *
+ * Set the napi instance for the queue
+ */
+static void
+ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+                  enum netdev_queue_type type, struct napi_struct *napi,
+                  bool locked)
+{
+       if (!locked)
+               rtnl_lock();
+       netif_queue_set_napi(dev, queue_index, type, napi);
+       if (!locked)
+               rtnl_unlock();
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector
+ */
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+       struct ice_rx_ring *rx_ring;
+       struct ice_tx_ring *tx_ring;
+
+       ice_for_each_rx_ring(rx_ring, q_vector->rx)
+               ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+                                  NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+                                  locked);
+
+       ice_for_each_tx_ring(tx_ring, q_vector->tx)
+               ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+                                  NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+                                  locked);
+}
+
+/**
+ * ice_vsi_set_napi_queues
+ * @vsi: VSI pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate queue[s] with napi for all vectors
+ */
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+{
+       int i;
+
+       if (!vsi->netdev)
+               return;
+
+       ice_for_each_q_vector(vsi, i)
+               ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+}
+
 /**
  * ice_vsi_release - Delete a VSI and free its resources
  * @vsi: the VSI being removed
index f24f5d1e6f9c7905beccd74b80978d86344fbe82..71bd27244941d549d9253af900629ccb36278072 100644 (file)
@@ -91,6 +91,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
 struct ice_vsi *
 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
 
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked);
+
 int ice_vsi_release(struct ice_vsi *vsi);
 
 void ice_vsi_close(struct ice_vsi *vsi);
index 43ba3e55b8c1f3cd45c78896c8089c7e7fb167d1..9e3d3919307b3a3ad06985a728f6e098863041df 100644 (file)
@@ -3375,9 +3375,11 @@ static void ice_napi_add(struct ice_vsi *vsi)
        if (!vsi->netdev)
                return;
 
-       ice_for_each_q_vector(vsi, v_idx)
+       ice_for_each_q_vector(vsi, v_idx) {
                netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
                               ice_napi_poll);
+               ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+       }
 }
 
 /**