RDMA/vmw_pvrdma: Use for_each_sg_dma_page iterator on umem SGL
authorShiraz, Saleem <shiraz.saleem@intel.com>
Mon, 11 Feb 2019 15:25:03 +0000 (09:25 -0600)
committerJason Gunthorpe <jgg@mellanox.com>
Mon, 11 Feb 2019 22:24:55 +0000 (15:24 -0700)
Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped
SGL and get the page DMA address. This avoids the extra loop to iterate
pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.

Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c

index fb0c5c0976b34521f872cf04ebeb780e89d6cd04..7944c58ded0e59e001a21abfbe7e8aade1641cc1 100644 (file)
@@ -183,25 +183,20 @@ int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
                                struct ib_umem *umem, u64 offset)
 {
        u64 i = offset;
-       int j, entry;
-       int ret = 0, len = 0;
-       struct scatterlist *sg;
+       int ret = 0;
+       struct sg_dma_page_iter sg_iter;
 
        if (offset >= pdir->npages)
                return -EINVAL;
 
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-               len = sg_dma_len(sg) >> PAGE_SHIFT;
-               for (j = 0; j < len; j++) {
-                       dma_addr_t addr = sg_dma_address(sg) +
-                                         (j << umem->page_shift);
+       for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+               dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
 
-                       ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
-                       if (ret)
-                               goto exit;
+               ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
+               if (ret)
+                       goto exit;
 
-                       i++;
-               }
+               i++;
        }
 
 exit: