mm/mmu_gather: pass "delay_rmap" instead of encoded page to __tlb_remove_page_size()
authorDavid Hildenbrand <david@redhat.com>
Wed, 14 Feb 2024 20:44:30 +0000 (21:44 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 23:27:17 +0000 (15:27 -0800)
We have two bits available in the encoded page pointer to store additional
information.  Currently, we use one bit to request delay of the rmap
removal until after a TLB flush.

We want to make use of the remaining bit internally for batching of
multiple pages of the same folio, specifying that the next encoded page
pointer in an array is actually "nr_pages".  So pass page + delay_rmap
flag instead of an encoded page, to handle the encoding internally.

Link: https://lkml.kernel.org/r/20240214204435.167852-6-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/s390/include/asm/tlb.h
include/asm-generic/tlb.h
mm/mmu_gather.c

index d1455a601adcad03a6bd7ec2f467cec47ed55bd8..48df896d5b79d23d13fa1fb3603638d9c93724e0 100644 (file)
@@ -25,8 +25,7 @@
 void __tlb_remove_table(void *_table);
 static inline void tlb_flush(struct mmu_gather *tlb);
 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
-                                         struct encoded_page *page,
-                                         int page_size);
+               struct page *page, bool delay_rmap, int page_size);
 
 #define tlb_flush tlb_flush
 #define pte_free_tlb pte_free_tlb
@@ -42,14 +41,14 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
  * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
  * has already been freed, so just do free_page_and_swap_cache.
  *
- * s390 doesn't delay rmap removal, so there is nothing encoded in
- * the page pointer.
+ * s390 doesn't delay rmap removal.
  */
 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
-                                         struct encoded_page *page,
-                                         int page_size)
+               struct page *page, bool delay_rmap, int page_size)
 {
-       free_page_and_swap_cache(encoded_page_ptr(page));
+       VM_WARN_ON_ONCE(delay_rmap);
+
+       free_page_and_swap_cache(page);
        return false;
 }
 
index 129a3a759976598efe88f390847565c7027cecd5..2eb7b0d4f5d2b5de62a2c599594889e814671846 100644 (file)
@@ -260,9 +260,8 @@ struct mmu_gather_batch {
  */
 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
 
-extern bool __tlb_remove_page_size(struct mmu_gather *tlb,
-                                  struct encoded_page *page,
-                                  int page_size);
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+               bool delay_rmap, int page_size);
 
 #ifdef CONFIG_SMP
 /*
@@ -462,13 +461,14 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
                                        struct page *page, int page_size)
 {
-       if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size))
+       if (__tlb_remove_page_size(tlb, page, false, page_size))
                tlb_flush_mmu(tlb);
 }
 
-static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page, unsigned int flags)
+static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb,
+               struct page *page, bool delay_rmap)
 {
-       return __tlb_remove_page_size(tlb, encode_page(page, flags), PAGE_SIZE);
+       return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE);
 }
 
 /* tlb_remove_page
index 604ddf08affed2063923549bc503f0e18c6fd34e..ac733d81b11211b31be976e9e614da4bcca32e9e 100644 (file)
@@ -116,7 +116,8 @@ static void tlb_batch_list_free(struct mmu_gather *tlb)
        tlb->local.next = NULL;
 }
 
-bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size)
+bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+               bool delay_rmap, int page_size)
 {
        struct mmu_gather_batch *batch;
 
@@ -131,13 +132,13 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, i
         * Add the page and check if we are full. If so
         * force a flush.
         */
-       batch->encoded_pages[batch->nr++] = page;
+       batch->encoded_pages[batch->nr++] = encode_page(page, delay_rmap);
        if (batch->nr == batch->max) {
                if (!tlb_next_batch(tlb))
                        return true;
                batch = tlb->active;
        }
-       VM_BUG_ON_PAGE(batch->nr > batch->max, encoded_page_ptr(page));
+       VM_BUG_ON_PAGE(batch->nr > batch->max, page);
 
        return false;
 }