mm: call update_mmu_cache_range() in more page fault handling paths
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 2 Aug 2023 15:14:06 +0000 (16:14 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 24 Aug 2023 23:20:27 +0000 (16:20 -0700)
Pass the vm_fault to the architecture to help it make smarter decisions
about which PTEs to insert into the TLB.

Link: https://lkml.kernel.org/r/20230802151406.3735276-39-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 12b385eaf3534e2e867cd9fe14f293a3f7f2fff1..9d7fb721a680a0341dc0965c223b2a58dbf53693 100644 (file)
@@ -2862,7 +2862,7 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src,
 
                entry = pte_mkyoung(vmf->orig_pte);
                if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
-                       update_mmu_cache(vma, addr, vmf->pte);
+                       update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
        }
 
        /*
@@ -3039,7 +3039,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
        entry = pte_mkyoung(vmf->orig_pte);
        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
        if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
-               update_mmu_cache(vma, vmf->address, vmf->pte);
+               update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
        pte_unmap_unlock(vmf->pte, vmf->ptl);
        count_vm_event(PGREUSE);
 }
@@ -3163,7 +3163,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                 */
                BUG_ON(unshare && pte_write(entry));
                set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
-               update_mmu_cache(vma, vmf->address, vmf->pte);
+               update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
                if (old_folio) {
                        /*
                         * Only after switching the pte to the new page may
@@ -4046,7 +4046,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        }
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, vmf->address, vmf->pte);
+       update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
 unlock:
        if (vmf->pte)
                pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4170,7 +4170,7 @@ setpte:
        set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, vmf->address, vmf->pte);
+       update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
 unlock:
        if (vmf->pte)
                pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4859,7 +4859,7 @@ out_map:
        if (writable)
                pte = pte_mkwrite(pte);
        ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
-       update_mmu_cache(vma, vmf->address, vmf->pte);
+       update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
        pte_unmap_unlock(vmf->pte, vmf->ptl);
        goto out;
 }
@@ -5030,7 +5030,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
        entry = pte_mkyoung(entry);
        if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
                                vmf->flags & FAULT_FLAG_WRITE)) {
-               update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
+               update_mmu_cache_range(vmf, vmf->vma, vmf->address,
+                               vmf->pte, 1);
        } else {
                /* Skip spurious TLB flush for retried page fault */
                if (vmf->flags & FAULT_FLAG_TRIED)