mm/memory-failure: remove fsdax_pgoff argument from __add_to_kill
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 12 Apr 2024 19:34:58 +0000 (20:34 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:44 +0000 (17:53 -0700)
Patch series "Some cleanups for memory-failure", v3.

A lot of folio conversions, plus some other simplifications.

This patch (of 11):

Unify the KSM and DAX codepaths by calculating the addr in
add_to_kill_fsdax() instead of telling __add_to_kill() to calculate it.

Link: https://lkml.kernel.org/r/20240412193510.2356957-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20240412193510.2356957-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory-failure.c

index 68e1fe1c0b724c358e16a31ec57c463f6b68cad1..396f939c0e1fe15b28ff2a11ce4a13937b8bf71e 100644 (file)
@@ -427,21 +427,13 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
  * not much we can do. We just print a message and ignore otherwise.
  */
 
-#define FSDAX_INVALID_PGOFF ULONG_MAX
-
 /*
  * Schedule a process for later kill.
  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
- *
- * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
- * filesystem with a memory failure handler has claimed the
- * memory_failure event. In all other cases, page->index and
- * page->mapping are sufficient for mapping the page back to its
- * corresponding user virtual address.
  */
 static void __add_to_kill(struct task_struct *tsk, struct page *p,
                          struct vm_area_struct *vma, struct list_head *to_kill,
-                         unsigned long ksm_addr, pgoff_t fsdax_pgoff)
+                         unsigned long addr)
 {
        struct to_kill *tk;
 
@@ -451,12 +443,10 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
                return;
        }
 
-       tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
-       if (is_zone_device_page(p)) {
-               if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
-                       tk->addr = vma_address(vma, fsdax_pgoff, 1);
+       tk->addr = addr ? addr : page_address_in_vma(p, vma);
+       if (is_zone_device_page(p))
                tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
-       else
+       else
                tk->size_shift = page_shift(compound_head(p));
 
        /*
@@ -486,7 +476,7 @@ static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
                                  struct vm_area_struct *vma,
                                  struct list_head *to_kill)
 {
-       __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF);
+       __add_to_kill(tsk, p, vma, to_kill, 0);
 }
 
 #ifdef CONFIG_KSM
@@ -504,10 +494,10 @@ static bool task_in_to_kill_list(struct list_head *to_kill,
 }
 void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
                     struct vm_area_struct *vma, struct list_head *to_kill,
-                    unsigned long ksm_addr)
+                    unsigned long addr)
 {
        if (!task_in_to_kill_list(to_kill, tsk))
-               __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF);
+               __add_to_kill(tsk, p, vma, to_kill, addr);
 }
 #endif
 /*
@@ -681,7 +671,8 @@ static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
                              struct vm_area_struct *vma,
                              struct list_head *to_kill, pgoff_t pgoff)
 {
-       __add_to_kill(tsk, p, vma, to_kill, 0, pgoff);
+       unsigned long addr = vma_address(vma, pgoff, 1);
+       __add_to_kill(tsk, p, vma, to_kill, addr);
 }
 
 /*