net: mana: Define data structures for protection domain and memory registration
authorAjay Sharma <sharmaajay@microsoft.com>
Thu, 3 Nov 2022 19:16:29 +0000 (12:16 -0700)
committerLeon Romanovsky <leonro@nvidia.com>
Thu, 10 Nov 2022 05:57:27 +0000 (07:57 +0200)
The MANA hardware support protection domain and memory registration for use
in RDMA environment. Add those definitions and expose them for use by the
RDMA driver.

Signed-off-by: Ajay Sharma <sharmaajay@microsoft.com>
Signed-off-by: Long Li <longli@microsoft.com>
Link: https://lore.kernel.org/r/1667502990-2559-12-git-send-email-longli@linuxonhyperv.com
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Acked-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/net/ethernet/microsoft/mana/mana_en.c
include/net/mana/gdma.h

index 69795bc679e79a22b43571f70a78d48c5fe98860..46a7d1e6ece983dcf3162a625ef8523b97ccd04e 100644 (file)
@@ -198,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
        req.type = queue->type;
        req.pdid = queue->gdma_dev->pdid;
        req.doolbell_id = queue->gdma_dev->doorbell;
-       req.gdma_region = queue->mem_info.gdma_region;
+       req.gdma_region = queue->mem_info.dma_region_handle;
        req.queue_size = queue->queue_size;
        req.log2_throttle_limit = queue->eq.log2_throttle_limit;
        req.eq_pci_msix_index = queue->eq.msix_index;
@@ -212,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
 
        queue->id = resp.queue_index;
        queue->eq.disable_needed = true;
-       queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+       queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
        return 0;
 }
 
@@ -671,24 +671,30 @@ free_q:
        return err;
 }
 
-static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
+int mana_gd_destroy_dma_region(struct gdma_context *gc,
+                              gdma_obj_handle_t dma_region_handle)
 {
        struct gdma_destroy_dma_region_req req = {};
        struct gdma_general_resp resp = {};
        int err;
 
-       if (gdma_region == GDMA_INVALID_DMA_REGION)
-               return;
+       if (dma_region_handle == GDMA_INVALID_DMA_REGION)
+               return 0;
 
        mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
                             sizeof(resp));
-       req.gdma_region = gdma_region;
+       req.dma_region_handle = dma_region_handle;
 
        err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-       if (err || resp.hdr.status)
+       if (err || resp.hdr.status) {
                dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
                        err, resp.hdr.status);
+               return -EPROTO;
+       }
+
+       return 0;
 }
+EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
 
 static int mana_gd_create_dma_region(struct gdma_dev *gd,
                                     struct gdma_mem_info *gmi)
@@ -733,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
        if (err)
                goto out;
 
-       if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
+       if (resp.hdr.status ||
+           resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
                dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
                        resp.hdr.status);
                err = -EPROTO;
                goto out;
        }
 
-       gmi->gdma_region = resp.gdma_region;
+       gmi->dma_region_handle = resp.dma_region_handle;
 out:
        kfree(req);
        return err;
@@ -863,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
                return;
        }
 
-       mana_gd_destroy_dma_region(gc, gmi->gdma_region);
+       mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
        mana_gd_free_memory(gmi);
        kfree(queue);
 }
index f6bcd0cc6cda676ab57ac569658b7471f56d3e64..1c59502d34b54eec620ccbac651d0bf45f9f00df 100644 (file)
@@ -1523,10 +1523,10 @@ static int mana_create_txq(struct mana_port_context *apc,
                memset(&wq_spec, 0, sizeof(wq_spec));
                memset(&cq_spec, 0, sizeof(cq_spec));
 
-               wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
+               wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
                wq_spec.queue_size = txq->gdma_sq->queue_size;
 
-               cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+               cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
                cq_spec.queue_size = cq->gdma_cq->queue_size;
                cq_spec.modr_ctx_id = 0;
                cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -1541,8 +1541,10 @@ static int mana_create_txq(struct mana_port_context *apc,
                txq->gdma_sq->id = wq_spec.queue_index;
                cq->gdma_cq->id = cq_spec.queue_index;
 
-               txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
-               cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+               txq->gdma_sq->mem_info.dma_region_handle =
+                       GDMA_INVALID_DMA_REGION;
+               cq->gdma_cq->mem_info.dma_region_handle =
+                       GDMA_INVALID_DMA_REGION;
 
                txq->gdma_txq_id = txq->gdma_sq->id;
 
@@ -1753,10 +1755,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
 
        memset(&wq_spec, 0, sizeof(wq_spec));
        memset(&cq_spec, 0, sizeof(cq_spec));
-       wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
+       wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
        wq_spec.queue_size = rxq->gdma_rq->queue_size;
 
-       cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+       cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
        cq_spec.queue_size = cq->gdma_cq->queue_size;
        cq_spec.modr_ctx_id = 0;
        cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -1769,8 +1771,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
        rxq->gdma_rq->id = wq_spec.queue_index;
        cq->gdma_cq->id = cq_spec.queue_index;
 
-       rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
-       cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+       rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
+       cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
 
        rxq->gdma_id = rxq->gdma_rq->id;
        cq->gdma_id = cq->gdma_cq->id;
index 055408a5baf3de613899b46282b16a6f11e7e92a..221adc96340cb06e461e48e9d54e6d11ab77ed78 100644 (file)
@@ -29,6 +29,10 @@ enum gdma_request_type {
        GDMA_CREATE_DMA_REGION          = 25,
        GDMA_DMA_REGION_ADD_PAGES       = 26,
        GDMA_DESTROY_DMA_REGION         = 27,
+       GDMA_CREATE_PD                  = 29,
+       GDMA_DESTROY_PD                 = 30,
+       GDMA_CREATE_MR                  = 31,
+       GDMA_DESTROY_MR                 = 32,
 };
 
 #define GDMA_RESOURCE_DOORBELL_PAGE    27
@@ -61,6 +65,8 @@ enum {
        GDMA_DEVICE_MANA        = 2,
 };
 
+typedef u64 gdma_obj_handle_t;
+
 struct gdma_resource {
        /* Protect the bitmap */
        spinlock_t lock;
@@ -194,7 +200,7 @@ struct gdma_mem_info {
        u64 length;
 
        /* Allocated by the PF driver */
-       u64 gdma_region;
+       gdma_obj_handle_t dma_region_handle;
 };
 
 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
@@ -618,7 +624,7 @@ struct gdma_create_queue_req {
        u32 reserved1;
        u32 pdid;
        u32 doolbell_id;
-       u64 gdma_region;
+       gdma_obj_handle_t gdma_region;
        u32 reserved2;
        u32 queue_size;
        u32 log2_throttle_limit;
@@ -645,6 +651,28 @@ struct gdma_disable_queue_req {
        u32 alloc_res_id_on_creation;
 }; /* HW DATA */
 
+enum atb_page_size {
+       ATB_PAGE_SIZE_4K,
+       ATB_PAGE_SIZE_8K,
+       ATB_PAGE_SIZE_16K,
+       ATB_PAGE_SIZE_32K,
+       ATB_PAGE_SIZE_64K,
+       ATB_PAGE_SIZE_128K,
+       ATB_PAGE_SIZE_256K,
+       ATB_PAGE_SIZE_512K,
+       ATB_PAGE_SIZE_1M,
+       ATB_PAGE_SIZE_2M,
+       ATB_PAGE_SIZE_MAX,
+};
+
+enum gdma_mr_access_flags {
+       GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
+       GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
+       GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
+       GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
+       GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
+};
+
 /* GDMA_CREATE_DMA_REGION */
 struct gdma_create_dma_region_req {
        struct gdma_req_hdr hdr;
@@ -671,14 +699,14 @@ struct gdma_create_dma_region_req {
 
 struct gdma_create_dma_region_resp {
        struct gdma_resp_hdr hdr;
-       u64 gdma_region;
+       gdma_obj_handle_t dma_region_handle;
 }; /* HW DATA */
 
 /* GDMA_DMA_REGION_ADD_PAGES */
 struct gdma_dma_region_add_pages_req {
        struct gdma_req_hdr hdr;
 
-       u64 gdma_region;
+       gdma_obj_handle_t dma_region_handle;
 
        u32 page_addr_list_len;
        u32 reserved3;
@@ -690,9 +718,88 @@ struct gdma_dma_region_add_pages_req {
 struct gdma_destroy_dma_region_req {
        struct gdma_req_hdr hdr;
 
-       u64 gdma_region;
+       gdma_obj_handle_t dma_region_handle;
 }; /* HW DATA */
 
+enum gdma_pd_flags {
+       GDMA_PD_FLAG_INVALID = 0,
+};
+
+struct gdma_create_pd_req {
+       struct gdma_req_hdr hdr;
+       enum gdma_pd_flags flags;
+       u32 reserved;
+};/* HW DATA */
+
+struct gdma_create_pd_resp {
+       struct gdma_resp_hdr hdr;
+       gdma_obj_handle_t pd_handle;
+       u32 pd_id;
+       u32 reserved;
+};/* HW DATA */
+
+struct gdma_destroy_pd_req {
+       struct gdma_req_hdr hdr;
+       gdma_obj_handle_t pd_handle;
+};/* HW DATA */
+
+struct gdma_destory_pd_resp {
+       struct gdma_resp_hdr hdr;
+};/* HW DATA */
+
+enum gdma_mr_type {
+       /* Guest Virtual Address - MRs of this type allow access
+        * to memory mapped by PTEs associated with this MR using a virtual
+        * address that is set up in the MST
+        */
+       GDMA_MR_TYPE_GVA = 2,
+};
+
+struct gdma_create_mr_params {
+       gdma_obj_handle_t pd_handle;
+       enum gdma_mr_type mr_type;
+       union {
+               struct {
+                       gdma_obj_handle_t dma_region_handle;
+                       u64 virtual_address;
+                       enum gdma_mr_access_flags access_flags;
+               } gva;
+       };
+};
+
+struct gdma_create_mr_request {
+       struct gdma_req_hdr hdr;
+       gdma_obj_handle_t pd_handle;
+       enum gdma_mr_type mr_type;
+       u32 reserved_1;
+
+       union {
+               struct {
+                       gdma_obj_handle_t dma_region_handle;
+                       u64 virtual_address;
+                       enum gdma_mr_access_flags access_flags;
+               } gva;
+
+       };
+       u32 reserved_2;
+};/* HW DATA */
+
+struct gdma_create_mr_response {
+       struct gdma_resp_hdr hdr;
+       gdma_obj_handle_t mr_handle;
+       u32 lkey;
+       u32 rkey;
+};/* HW DATA */
+
+struct gdma_destroy_mr_request {
+       struct gdma_req_hdr hdr;
+       gdma_obj_handle_t mr_handle;
+};/* HW DATA */
+
+struct gdma_destroy_mr_response {
+       struct gdma_resp_hdr hdr;
+};/* HW DATA */
+
 int mana_gd_verify_vf_version(struct pci_dev *pdev);
 
 int mana_gd_register_device(struct gdma_dev *gd);
@@ -719,4 +826,8 @@ void mana_gd_free_memory(struct gdma_mem_info *gmi);
 
 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
                         u32 resp_len, void *resp);
+
+int mana_gd_destroy_dma_region(struct gdma_context *gc,
+                              gdma_obj_handle_t dma_region_handle);
+
 #endif /* _GDMA_H */