}
 EXPORT_SYMBOL(ib_umem_dmabuf_get);
 
+static void
+ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
+{
+       struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+
+       ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
+                              "Invalidate callback should not be called when memory is pinned\n");
+}
+
+static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
+       .allow_peer2peer = true,
+       .move_notify = ib_umem_dmabuf_unsupported_move_notify,
+};
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+                                                unsigned long offset,
+                                                size_t size, int fd,
+                                                int access)
+{
+       struct ib_umem_dmabuf *umem_dmabuf;
+       int err;
+
+       umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
+                                        &ib_umem_dmabuf_attach_pinned_ops);
+       if (IS_ERR(umem_dmabuf))
+               return umem_dmabuf;
+
+       dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+       err = dma_buf_pin(umem_dmabuf->attach);
+       if (err)
+               goto err_release;
+       umem_dmabuf->pinned = 1;
+
+       err = ib_umem_dmabuf_map_pages(umem_dmabuf);
+       if (err)
+               goto err_unpin;
+       dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+
+       return umem_dmabuf;
+
+err_unpin:
+       dma_buf_unpin(umem_dmabuf->attach);
+err_release:
+       dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+       ib_umem_release(&umem_dmabuf->umem);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
+
 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
 {
        struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
 
        dma_resv_lock(dmabuf->resv, NULL);
        ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+       if (umem_dmabuf->pinned)
+               dma_buf_unpin(umem_dmabuf->attach);
        dma_resv_unlock(dmabuf->resv);
 
        dma_buf_detach(dmabuf, umem_dmabuf->attach);
 
        unsigned long first_sg_offset;
        unsigned long last_sg_trim;
        void *private;
+       u8 pinned : 1;
 };
 
 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
                                          unsigned long offset, size_t size,
                                          int fd, int access,
                                          const struct dma_buf_attach_ops *ops);
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+                                                unsigned long offset,
+                                                size_t size, int fd,
+                                                int access);
 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
 {
        return ERR_PTR(-EOPNOTSUPP);
 }
+static inline struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
+                         size_t size, int fd, int access)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
 {
        return -EOPNOTSUPP;