mmu_notifiers: don't invalidate secondary TLBs as part of mmu_notifier_invalidate_ran...
authorAlistair Popple <apopple@nvidia.com>
Tue, 25 Jul 2023 13:42:06 +0000 (23:42 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:41 +0000 (10:12 -0700)
Secondary TLBs are now invalidated from the architecture specific TLB
invalidation functions.  Therefore there is no need to explicitly notify
or invalidate as part of the range end functions.  This means we can
remove mmu_notifier_invalidate_range_end_only() and some of the
ptep_*_notify() functions.

Link: https://lkml.kernel.org/r/90d749d03cbab256ca0edeb5287069599566d783.1690292440.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Cc: Andrew Donnellan <ajd@linux.ibm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Cc: Frederic Barrat <fbarrat@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Nicolin Chen <nicolinc@nvidia.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Zhi Wang <zhi.wang.linux@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmu_notifier.h
kernel/events/uprobes.c
mm/huge_memory.c
mm/hugetlb.c
mm/memory.c
mm/migrate_device.c
mm/mmu_notifier.c
mm/rmap.c

index 64a3e051c3c43897dca74ea44e45cc32c183e06f..f2e9edc6aa435551d2527738b2aec9e59a7ec235 100644 (file)
@@ -395,8 +395,7 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
                                      unsigned long address, pte_t pte);
 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
-extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
-                                 bool only_end);
+extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
                                  unsigned long start, unsigned long end);
 extern bool
@@ -481,14 +480,7 @@ mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
                might_sleep();
 
        if (mm_has_notifiers(range->mm))
-               __mmu_notifier_invalidate_range_end(range, false);
-}
-
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
-       if (mm_has_notifiers(range->mm))
-               __mmu_notifier_invalidate_range_end(range, true);
+               __mmu_notifier_invalidate_range_end(range);
 }
 
 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
@@ -582,45 +574,6 @@ static inline void mmu_notifier_range_init_owner(
        __young;                                                        \
 })
 
-#define        ptep_clear_flush_notify(__vma, __address, __ptep)               \
-({                                                                     \
-       unsigned long ___addr = __address & PAGE_MASK;                  \
-       struct mm_struct *___mm = (__vma)->vm_mm;                       \
-       pte_t ___pte;                                                   \
-                                                                       \
-       ___pte = ptep_clear_flush(__vma, __address, __ptep);            \
-       mmu_notifier_invalidate_range(___mm, ___addr,                   \
-                                       ___addr + PAGE_SIZE);           \
-                                                                       \
-       ___pte;                                                         \
-})
-
-#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)            \
-({                                                                     \
-       unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
-       struct mm_struct *___mm = (__vma)->vm_mm;                       \
-       pmd_t ___pmd;                                                   \
-                                                                       \
-       ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);          \
-       mmu_notifier_invalidate_range(___mm, ___haddr,                  \
-                                     ___haddr + HPAGE_PMD_SIZE);       \
-                                                                       \
-       ___pmd;                                                         \
-})
-
-#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)            \
-({                                                                     \
-       unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;              \
-       struct mm_struct *___mm = (__vma)->vm_mm;                       \
-       pud_t ___pud;                                                   \
-                                                                       \
-       ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);          \
-       mmu_notifier_invalidate_range(___mm, ___haddr,                  \
-                                     ___haddr + HPAGE_PUD_SIZE);       \
-                                                                       \
-       ___pud;                                                         \
-})
-
 /*
  * set_pte_at_notify() sets the pte _after_ running the notifier.
  * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -711,11 +664,6 @@ void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
 {
 }
 
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
-}
-
 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
                                  unsigned long start, unsigned long end)
 {
index f0ac5b8749195913188ba4d27f2f49e81e668448..3048589e2e8516e12a817875988bdc5986c6ad09 100644 (file)
@@ -193,7 +193,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
        }
 
        flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
-       ptep_clear_flush_notify(vma, addr, pvmw.pte);
+       ptep_clear_flush(vma, addr, pvmw.pte);
        if (new_page)
                set_pte_at_notify(mm, addr, pvmw.pte,
                                  mk_pte(new_page, vma->vm_page_prot));
index 762be2f4244cd9d37e2d48bd9886cd96e04f6126..3ece117de898504ad1880feb6eafcfabd0509226 100644 (file)
@@ -2003,7 +2003,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
 
        count_vm_event(THP_SPLIT_PUD);
 
-       pudp_huge_clear_flush_notify(vma, haddr, pud);
+       pudp_huge_clear_flush(vma, haddr, pud);
 }
 
 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
@@ -2023,11 +2023,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
 
 out:
        spin_unlock(ptl);
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback as
-        * the above pudp_huge_clear_flush_notify() did already call it.
-        */
-       mmu_notifier_invalidate_range_only_end(&range);
+       mmu_notifier_invalidate_range_end(&range);
 }
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
@@ -2094,7 +2090,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        count_vm_event(THP_SPLIT_PMD);
 
        if (!vma_is_anonymous(vma)) {
-               old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+               old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
                /*
                 * We are going to unmap this huge page. So
                 * just go ahead and zap it
@@ -2304,20 +2300,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
 out:
        spin_unlock(ptl);
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback.
-        * They are 3 cases to consider inside __split_huge_pmd_locked():
-        *  1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
-        *  2) __split_huge_zero_page_pmd() read only zero page and any write
-        *    fault will trigger a flush_notify before pointing to a new page
-        *    (it is fine if the secondary mmu keeps pointing to the old zero
-        *    page in the meantime)
-        *  3) Split a huge pmd into pte pointing to the same page. No need
-        *     to invalidate secondary tlb entry they are all still valid.
-        *     any further changes to individual pte will notify. So no need
-        *     to call mmu_notifier->invalidate_range()
-        */
-       mmu_notifier_invalidate_range_only_end(&range);
+       mmu_notifier_invalidate_range_end(&range);
 }
 
 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
index 412a3eec081cfe59b938670bb6857c3b6b5533d0..4672752b0b17f4e17bcfd40a4af4f104054329b5 100644 (file)
@@ -5688,7 +5688,6 @@ retry_avoidcopy:
 
                /* Break COW or unshare */
                huge_ptep_clear_flush(vma, haddr, ptep);
-               mmu_notifier_invalidate_range(mm, range.start, range.end);
                page_remove_rmap(&old_folio->page, vma, true);
                hugepage_add_new_anon_rmap(new_folio, vma, haddr);
                if (huge_pte_uffd_wp(pte))
index 44d11812a88f2eabb9b7cfb3023e83b6873dcad0..3e16f06373765f64d06d964e9a6cd7f41e6d1cc1 100644 (file)
@@ -3155,7 +3155,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                 * that left a window where the new PTE could be loaded into
                 * some TLBs while the old PTE remains in others.
                 */
-               ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
+               ptep_clear_flush(vma, vmf->address, vmf->pte);
                folio_add_new_anon_rmap(new_folio, vma, vmf->address);
                folio_add_lru_vma(new_folio, vma);
                /*
@@ -3201,11 +3201,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                pte_unmap_unlock(vmf->pte, vmf->ptl);
        }
 
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback as
-        * the above ptep_clear_flush_notify() did already call it.
-        */
-       mmu_notifier_invalidate_range_only_end(&range);
+       mmu_notifier_invalidate_range_end(&range);
 
        if (new_folio)
                folio_put(new_folio);
index e29626e1329e97e3d4521cb3e38e654eff245ae2..6c556b5876c61c90fd05de00d3959c755717cacf 100644 (file)
@@ -658,7 +658,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
 
        if (flush) {
                flush_cache_page(vma, addr, pte_pfn(orig_pte));
-               ptep_clear_flush_notify(vma, addr, ptep);
+               ptep_clear_flush(vma, addr, ptep);
                set_pte_at_notify(mm, addr, ptep, entry);
                update_mmu_cache(vma, addr, ptep);
        } else {
@@ -763,13 +763,8 @@ static void __migrate_device_pages(unsigned long *src_pfns,
                        src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
        }
 
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback as
-        * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
-        * did already call it.
-        */
        if (notified)
-               mmu_notifier_invalidate_range_only_end(&range);
+               mmu_notifier_invalidate_range_end(&range);
 }
 
 /**
index b7ad1559c72fdaf4cd55747df98b752aa0ac793a..453a156d93c013d89a46c56b485954208bf96002 100644 (file)
@@ -551,7 +551,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
 
 static void
 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
-                       struct mmu_notifier_range *range, bool only_end)
+                       struct mmu_notifier_range *range)
 {
        struct mmu_notifier *subscription;
        int id;
@@ -559,24 +559,6 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
                                 srcu_read_lock_held(&srcu)) {
-               /*
-                * Call invalidate_range here too to avoid the need for the
-                * subsystem of having to register an invalidate_range_end
-                * call-back when there is invalidate_range already. Usually a
-                * subsystem registers either invalidate_range_start()/end() or
-                * invalidate_range(), so this will be no additional overhead
-                * (besides the pointer check).
-                *
-                * We skip call to invalidate_range() if we know it is safe ie
-                * call site use mmu_notifier_invalidate_range_only_end() which
-                * is safe to do when we know that a call to invalidate_range()
-                * already happen under page table lock.
-                */
-               if (!only_end && subscription->ops->invalidate_range)
-                       subscription->ops->invalidate_range(subscription,
-                                                           range->mm,
-                                                           range->start,
-                                                           range->end);
                if (subscription->ops->invalidate_range_end) {
                        if (!mmu_notifier_range_blockable(range))
                                non_block_start();
@@ -589,8 +571,7 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
        srcu_read_unlock(&srcu, id);
 }
 
-void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
-                                        bool only_end)
+void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
 {
        struct mmu_notifier_subscriptions *subscriptions =
                range->mm->notifier_subscriptions;
@@ -600,7 +581,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
                mn_itree_inv_end(subscriptions);
 
        if (!hlist_empty(&subscriptions->list))
-               mn_hlist_invalidate_end(subscriptions, range, only_end);
+               mn_hlist_invalidate_end(subscriptions, range);
        lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 }
 
index 1355bf686fae9ec14103f64609647e9ce767b7a7..51ec8aa5e61f2d8ab0b2816b8520367ce32d2833 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -985,13 +985,6 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
 #endif
                }
 
-               /*
-                * No need to call mmu_notifier_invalidate_range() as we are
-                * downgrading page table protection not changing it to point
-                * to a new page.
-                *
-                * See Documentation/mm/mmu_notifier.rst
-                */
                if (ret)
                        cleaned++;
        }
@@ -1549,8 +1542,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                                        hugetlb_vma_unlock_write(vma);
                                        flush_tlb_range(vma,
                                                range.start, range.end);
-                                       mmu_notifier_invalidate_range(mm,
-                                               range.start, range.end);
                                        /*
                                         * The ref count of the PMD page was
                                         * dropped which is part of the way map
@@ -1623,9 +1614,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                         * copied pages.
                         */
                        dec_mm_counter(mm, mm_counter(&folio->page));
-                       /* We have to invalidate as we cleared the pte */
-                       mmu_notifier_invalidate_range(mm, address,
-                                                     address + PAGE_SIZE);
                } else if (folio_test_anon(folio)) {
                        swp_entry_t entry = { .val = page_private(subpage) };
                        pte_t swp_pte;
@@ -1637,9 +1625,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                                        folio_test_swapcache(folio))) {
                                WARN_ON_ONCE(1);
                                ret = false;
-                               /* We have to invalidate as we cleared the pte */
-                               mmu_notifier_invalidate_range(mm, address,
-                                                       address + PAGE_SIZE);
                                page_vma_mapped_walk_done(&pvmw);
                                break;
                        }
@@ -1670,9 +1655,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                                 */
                                if (ref_count == 1 + map_count &&
                                    !folio_test_dirty(folio)) {
-                                       /* Invalidate as we cleared the pte */
-                                       mmu_notifier_invalidate_range(mm,
-                                               address, address + PAGE_SIZE);
                                        dec_mm_counter(mm, MM_ANONPAGES);
                                        goto discard;
                                }
@@ -1727,9 +1709,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                        if (pte_uffd_wp(pteval))
                                swp_pte = pte_swp_mkuffd_wp(swp_pte);
                        set_pte_at(mm, address, pvmw.pte, swp_pte);
-                       /* Invalidate as we cleared the pte */
-                       mmu_notifier_invalidate_range(mm, address,
-                                                     address + PAGE_SIZE);
                } else {
                        /*
                         * This is a locked file-backed folio,
@@ -1745,13 +1724,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                        dec_mm_counter(mm, mm_counter_file(&folio->page));
                }
 discard:
-               /*
-                * No need to call mmu_notifier_invalidate_range() it has be
-                * done above for all cases requiring it to happen under page
-                * table lock before mmu_notifier_invalidate_range_end()
-                *
-                * See Documentation/mm/mmu_notifier.rst
-                */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
                        mlock_drain_local();
@@ -1930,8 +1902,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                        hugetlb_vma_unlock_write(vma);
                                        flush_tlb_range(vma,
                                                range.start, range.end);
-                                       mmu_notifier_invalidate_range(mm,
-                                               range.start, range.end);
 
                                        /*
                                         * The ref count of the PMD page was
@@ -2036,9 +2006,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                         * copied pages.
                         */
                        dec_mm_counter(mm, mm_counter(&folio->page));
-                       /* We have to invalidate as we cleared the pte */
-                       mmu_notifier_invalidate_range(mm, address,
-                                                     address + PAGE_SIZE);
                } else {
                        swp_entry_t entry;
                        pte_t swp_pte;
@@ -2102,13 +2069,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                         */
                }
 
-               /*
-                * No need to call mmu_notifier_invalidate_range() it has be
-                * done above for all cases requiring it to happen under page
-                * table lock before mmu_notifier_invalidate_range_end()
-                *
-                * See Documentation/mm/mmu_notifier.rst
-                */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
                        mlock_drain_local();