struct rb_root          dma_list;
        struct blocking_notifier_head notifier;
        unsigned int            dma_avail;
+       unsigned int            vaddr_invalid_count;
        uint64_t                pgsize_bitmap;
        bool                    v2;
        bool                    nesting;
        int                     prot;           /* IOMMU_READ/WRITE */
        bool                    iommu_mapped;
        bool                    lock_cap;       /* capable(CAP_IPC_LOCK) */
+       bool                    vaddr_invalid;
        struct task_struct      *task;
        struct rb_root          pfn_list;       /* Ex-user pinned pfn list */
        unsigned long           *bitmap;
        vfio_unlink_dma(iommu, dma);
        put_task_struct(dma->task);
        vfio_dma_bitmap_free(dma);
+       if (dma->vaddr_invalid)
+               iommu->vaddr_invalid_count--;
        kfree(dma);
        iommu->dma_avail++;
 }
        dma_addr_t iova = unmap->iova;
        unsigned long size = unmap->size;
        bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;
-       struct rb_node *n;
+       bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR;
+       struct rb_node *n, *first_n;
 
        mutex_lock(&iommu->lock);
 
        }
 
        ret = 0;
-       n = vfio_find_dma_first_node(iommu, iova, size);
+       n = first_n = vfio_find_dma_first_node(iommu, iova, size);
 
        while (n) {
                dma = rb_entry(n, struct vfio_dma, node);
                if (dma->task->mm != current->mm)
                        break;
 
+               if (invalidate_vaddr) {
+                       if (dma->vaddr_invalid) {
+                               struct rb_node *last_n = n;
+
+                               for (n = first_n; n != last_n; n = rb_next(n)) {
+                                       dma = rb_entry(n,
+                                                      struct vfio_dma, node);
+                                       dma->vaddr_invalid = false;
+                                       iommu->vaddr_invalid_count--;
+                               }
+                               ret = -EINVAL;
+                               unmapped = 0;
+                               break;
+                       }
+                       dma->vaddr_invalid = true;
+                       iommu->vaddr_invalid_count++;
+                       unmapped += dma->size;
+                       n = rb_next(n);
+                       continue;
+               }
+
                if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
                        struct vfio_iommu_type1_dma_unmap nb_unmap;
 
 static int vfio_dma_do_map(struct vfio_iommu *iommu,
                           struct vfio_iommu_type1_dma_map *map)
 {
+       bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR;
        dma_addr_t iova = map->iova;
        unsigned long vaddr = map->vaddr;
        size_t size = map->size;
        if (map->flags & VFIO_DMA_MAP_FLAG_READ)
                prot |= IOMMU_READ;
 
+       if ((prot && set_vaddr) || (!prot && !set_vaddr))
+               return -EINVAL;
+
        mutex_lock(&iommu->lock);
 
        pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
 
        WARN_ON((pgsize - 1) & PAGE_MASK);
 
-       if (!prot || !size || (size | iova | vaddr) & (pgsize - 1)) {
+       if (!size || (size | iova | vaddr) & (pgsize - 1)) {
                ret = -EINVAL;
                goto out_unlock;
        }
                goto out_unlock;
        }
 
-       if (vfio_find_dma(iommu, iova, size)) {
+       dma = vfio_find_dma(iommu, iova, size);
+       if (set_vaddr) {
+               if (!dma) {
+                       ret = -ENOENT;
+               } else if (!dma->vaddr_invalid || dma->iova != iova ||
+                          dma->size != size) {
+                       ret = -EINVAL;
+               } else {
+                       dma->vaddr = vaddr;
+                       dma->vaddr_invalid = false;
+                       iommu->vaddr_invalid_count--;
+               }
+               goto out_unlock;
+       } else if (dma) {
                ret = -EEXIST;
                goto out_unlock;
        }
        case VFIO_TYPE1v2_IOMMU:
        case VFIO_TYPE1_NESTING_IOMMU:
        case VFIO_UNMAP_ALL:
+       case VFIO_UPDATE_VADDR:
                return 1;
        case VFIO_DMA_CC_IOMMU:
                if (!iommu)
 {
        struct vfio_iommu_type1_dma_map map;
        unsigned long minsz;
-       uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
+       uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE |
+                       VFIO_DMA_MAP_FLAG_VADDR;
 
        minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
 
        struct vfio_iommu_type1_dma_unmap unmap;
        struct vfio_bitmap bitmap = { 0 };
        uint32_t mask = VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP |
+                       VFIO_DMA_UNMAP_FLAG_VADDR |
                        VFIO_DMA_UNMAP_FLAG_ALL;
        unsigned long minsz;
        int ret;
                return -EINVAL;
 
        if ((unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
-           (unmap.flags & VFIO_DMA_UNMAP_FLAG_ALL))
+           (unmap.flags & (VFIO_DMA_UNMAP_FLAG_ALL |
+                           VFIO_DMA_UNMAP_FLAG_VADDR)))
                return -EINVAL;
 
        if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {