ibmvnic: Introduce indirect subordinate Command Response Queue buffer
authorThomas Falcon <tlfalcon@linux.ibm.com>
Thu, 19 Nov 2020 01:12:17 +0000 (19:12 -0600)
committerJakub Kicinski <kuba@kernel.org>
Sat, 21 Nov 2020 03:50:33 +0000 (19:50 -0800)
This patch introduces the infrastructure to send batched subordinate
Command Response Queue descriptors, which are used by the ibmvnic
driver to send TX frame and RX buffer descriptors.

Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
Acked-by: Lijun Pan <ljp@linux.ibm.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h

index da15913879f8e7d8c89f22823b70cb27342f479e..3884f8a683a7c163365067b91293ea7d622b92c3 100644 (file)
@@ -2858,6 +2858,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
        memset(scrq->msgs, 0, 4 * PAGE_SIZE);
        atomic_set(&scrq->used, 0);
        scrq->cur = 0;
+       scrq->ind_buf.index = 0;
 
        rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
                           4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
@@ -2909,6 +2910,11 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
                }
        }
 
+       dma_free_coherent(dev,
+                         IBMVNIC_IND_ARR_SZ,
+                         scrq->ind_buf.indir_arr,
+                         scrq->ind_buf.indir_dma);
+
        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
                         DMA_BIDIRECTIONAL);
        free_pages((unsigned long)scrq->msgs, 2);
@@ -2955,6 +2961,17 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
 
        scrq->adapter = adapter;
        scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
+       scrq->ind_buf.index = 0;
+
+       scrq->ind_buf.indir_arr =
+               dma_alloc_coherent(dev,
+                                  IBMVNIC_IND_ARR_SZ,
+                                  &scrq->ind_buf.indir_dma,
+                                  GFP_KERNEL);
+
+       if (!scrq->ind_buf.indir_arr)
+               goto indir_failed;
+
        spin_lock_init(&scrq->lock);
 
        netdev_dbg(adapter->netdev,
@@ -2963,6 +2980,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
 
        return scrq;
 
+indir_failed:
+       do {
+               rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
+                                       adapter->vdev->unit_address,
+                                       scrq->crq_num);
+       } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
 reg_failed:
        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
                         DMA_BIDIRECTIONAL);
index 217dcc7ded709260543cbbbb05db496001bd1840..4a63e98867196af1dabc9a66327ddc4d33c35174 100644 (file)
@@ -31,6 +31,8 @@
 #define IBMVNIC_BUFFS_PER_POOL 100
 #define IBMVNIC_MAX_QUEUES     16
 #define IBMVNIC_MAX_QUEUE_SZ   4096
+#define IBMVNIC_MAX_IND_DESCS  128
+#define IBMVNIC_IND_ARR_SZ     (IBMVNIC_MAX_IND_DESCS * 32)
 
 #define IBMVNIC_TSO_BUF_SZ     65536
 #define IBMVNIC_TSO_BUFS       64
@@ -861,6 +863,12 @@ union sub_crq {
        struct ibmvnic_rx_buff_add_desc rx_add;
 };
 
+struct ibmvnic_ind_xmit_queue {
+       union sub_crq *indir_arr;
+       dma_addr_t indir_dma;
+       int index;
+};
+
 struct ibmvnic_sub_crq_queue {
        union sub_crq *msgs;
        int size, cur;
@@ -873,6 +881,7 @@ struct ibmvnic_sub_crq_queue {
        spinlock_t lock;
        struct sk_buff *rx_skb_top;
        struct ibmvnic_adapter *adapter;
+       struct ibmvnic_ind_xmit_queue ind_buf;
        atomic_t used;
        char name[32];
        u64 handle;