drm/vmwgfx: Support huge page faults
authorThomas Hellstrom (VMware) <thomas_os@shipmail.org>
Tue, 24 Mar 2020 17:48:55 +0000 (18:48 +0100)
committerThomas Hellstrom (VMware) <thomas_os@shipmail.org>
Tue, 24 Mar 2020 17:48:55 +0000 (18:48 +0100)
With vmwgfx dirty-tracking we need a specialized huge_fault
callback. Implement and hook it up.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Thomas Hellstrom (VMware) <thomas_os@shipmail.org>
Reviewed-by: Roland Scheidegger <sroland@vmware.com>
Acked-by: Christian König <christian.koenig@amd.com>
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c

index b70d7322570784e35f5569ceb0b4e3768d2e7e14..6fc8d5c171c687453be749f7a78045ad07d0d0a5 100644 (file)
@@ -1402,6 +1402,10 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
                        pgoff_t start, pgoff_t end);
 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+                               enum page_entry_size pe_size);
+#endif
 
 /**
  * VMW_DEBUG_KMS - Debug output for kernel mode-setting
index 8cf7a77c9b2f19f2e3d68dee8b498e5f85191036..d4d66532f9c904b2231c8da6a67653c9f72a9b69 100644 (file)
@@ -473,7 +473,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
         * a lot of unnecessary write faults.
         */
        if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
-               prot = vma->vm_page_prot;
+               prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
        else
                prot = vm_get_page_prot(vma->vm_flags);
 
@@ -486,3 +486,75 @@ out_unlock:
 
        return ret;
 }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+                               enum page_entry_size pe_size)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+           vma->vm_private_data;
+       struct vmw_buffer_object *vbo =
+               container_of(bo, struct vmw_buffer_object, base);
+       pgprot_t prot;
+       vm_fault_t ret;
+       pgoff_t fault_page_size;
+       bool write = vmf->flags & FAULT_FLAG_WRITE;
+       bool is_cow_mapping =
+               (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+
+       switch (pe_size) {
+       case PE_SIZE_PMD:
+               fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
+               break;
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+       case PE_SIZE_PUD:
+               fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
+               break;
+#endif
+       default:
+               WARN_ON_ONCE(1);
+               return VM_FAULT_FALLBACK;
+       }
+
+       /* Always do write dirty-tracking and COW on PTE level. */
+       if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping))
+               return VM_FAULT_FALLBACK;
+
+       ret = ttm_bo_vm_reserve(bo, vmf);
+       if (ret)
+               return ret;
+
+       if (vbo->dirty) {
+               pgoff_t allowed_prefault;
+               unsigned long page_offset;
+
+               page_offset = vmf->pgoff -
+                       drm_vma_node_start(&bo->base.vma_node);
+               if (page_offset >= bo->num_pages ||
+                   vmw_resources_clean(vbo, page_offset,
+                                       page_offset + PAGE_SIZE,
+                                       &allowed_prefault)) {
+                       ret = VM_FAULT_SIGBUS;
+                       goto out_unlock;
+               }
+
+               /*
+                * Write protect, so we get a new fault on write, and can
+                * split.
+                */
+               prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
+       } else {
+               prot = vm_get_page_prot(vma->vm_flags);
+       }
+
+       ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
+       if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+               return ret;
+
+out_unlock:
+       dma_resv_unlock(bo->base.resv);
+
+       return ret;
+}
+#endif
index aa7e50f63b94673e46c9417c5b68c239b924f56c..3c03b1746661a8347bee11561a288fdc68851da5 100644 (file)
@@ -34,7 +34,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
                .page_mkwrite = vmw_bo_vm_mkwrite,
                .fault = vmw_bo_vm_fault,
                .open = ttm_bo_vm_open,
-               .close = ttm_bo_vm_close
+               .close = ttm_bo_vm_close,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               .huge_fault = vmw_bo_vm_huge_fault,
+#endif
        };
        struct drm_file *file_priv = filp->private_data;
        struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);