if (shmem->pages) {
                        if (shmem->mapped) {
-                               dma_unmap_sg(vgdev->vdev->dev.parent,
-                                            shmem->pages->sgl, shmem->mapped,
-                                            DMA_TO_DEVICE);
+                               dma_unmap_sgtable(vgdev->vdev->dev.parent,
+                                            shmem->pages, DMA_TO_DEVICE, 0);
                                shmem->mapped = 0;
                        }
 
        }
 
        if (use_dma_api) {
-               shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
-                                          shmem->pages->sgl,
-                                          shmem->pages->nents,
-                                          DMA_TO_DEVICE);
-               *nents = shmem->mapped;
+               ret = dma_map_sgtable(vgdev->vdev->dev.parent,
+                                     shmem->pages, DMA_TO_DEVICE, 0);
+               if (ret)
+                       return ret;
+               *nents = shmem->mapped = shmem->pages->nents;
        } else {
-               *nents = shmem->pages->nents;
+               *nents = shmem->pages->orig_nents;
        }
 
        *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
                return -ENOMEM;
        }
 
-       for_each_sg(shmem->pages->sgl, sg, *nents, si) {
-               (*ents)[si].addr = cpu_to_le64(use_dma_api
-                                              ? sg_dma_address(sg)
-                                              : sg_phys(sg));
-               (*ents)[si].length = cpu_to_le32(sg->length);
-               (*ents)[si].padding = 0;
+       if (use_dma_api) {
+               for_each_sgtable_dma_sg(shmem->pages, sg, si) {
+                       (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
+                       (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
+                       (*ents)[si].padding = 0;
+               }
+       } else {
+               for_each_sgtable_sg(shmem->pages, sg, si) {
+                       (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
+                       (*ents)[si].length = cpu_to_le32(sg->length);
+                       (*ents)[si].padding = 0;
+               }
        }
+
        return 0;
 }
 
 
                return NULL;
        }
 
-       for_each_sg(sgt->sgl, sg, *sg_ents, i) {
+       for_each_sgtable_sg(sgt, sg, i) {
                pg = vmalloc_to_page(data);
                if (!pg) {
                        sg_free_table(sgt);
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
        if (use_dma_api)
-               dma_sync_sg_for_device(vgdev->vdev->dev.parent,
-                                      shmem->pages->sgl, shmem->pages->nents,
-                                      DMA_TO_DEVICE);
+               dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+                                           shmem->pages, DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
        if (use_dma_api)
-               dma_sync_sg_for_device(vgdev->vdev->dev.parent,
-                                      shmem->pages->sgl, shmem->pages->nents,
-                                      DMA_TO_DEVICE);
+               dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+                                           shmem->pages, DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));