hugetlbfs: don't delete error page from pagecache
authorJames Houghton <jthoughton@google.com>
Tue, 18 Oct 2022 20:01:25 +0000 (20:01 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 26 Nov 2022 08:24:33 +0000 (09:24 +0100)
[ Upstream commit 8625147cafaa9ba74713d682f5185eb62cb2aedb ]

This change is very similar to the change that was made for shmem [1], and
it solves the same problem but for HugeTLBFS instead.

Currently, when poison is found in a HugeTLB page, the page is removed
from the page cache.  That means that attempting to map or read that
hugepage in the future will result in a new hugepage being allocated
instead of notifying the user that the page was poisoned.  As [1] states,
this is effectively memory corruption.

The fix is to leave the page in the page cache.  If the user attempts to
use a poisoned HugeTLB page with a syscall, the syscall will fail with
EIO, the same error code that shmem uses.  For attempts to map the page,
the thread will get a BUS_MCEERR_AR SIGBUS.

[1]: commit a76054266661 ("mm: shmem: don't truncate page if memory failure happens")

Link: https://lkml.kernel.org/r/20221018200125.848471-1-jthoughton@google.com
Signed-off-by: James Houghton <jthoughton@google.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Tested-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/hugetlbfs/inode.c
mm/hugetlb.c
mm/memory-failure.c

index d74a49b188c24ea187932594a76a49dc1a0704da..be8deec29ebe3ee4464f4a5271343206d6b6ed41 100644 (file)
@@ -361,6 +361,12 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
                } else {
                        unlock_page(page);
 
+                       if (PageHWPoison(page)) {
+                               put_page(page);
+                               retval = -EIO;
+                               break;
+                       }
+
                        /*
                         * We have the page, copy it to user space buffer.
                         */
@@ -984,13 +990,6 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
 static int hugetlbfs_error_remove_page(struct address_space *mapping,
                                struct page *page)
 {
-       struct inode *inode = mapping->host;
-       pgoff_t index = page->index;
-
-       remove_huge_page(page);
-       if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
-               hugetlb_fix_reserve_counts(inode);
-
        return 0;
 }
 
index dbb63ec3b5faba400692898488156e97444f8e6a..e7bd42f236671d126cefbf091be6f84b9f250583 100644 (file)
@@ -5350,6 +5350,10 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
        spin_lock(ptl);
 
+       ret = -EIO;
+       if (PageHWPoison(page))
+               goto out_release_unlock;
+
        /*
         * Recheck the i_size after holding PT lock to make sure not
         * to leave any page mapped (as page_mapped()) beyond the end
index 85b1a77e3a99a19f8d65ead7d6ca463efa1e0e5d..2ad0f45800916d48574c3e3d06b40779442827ea 100644 (file)
@@ -1040,6 +1040,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
        int res;
        struct page *hpage = compound_head(p);
        struct address_space *mapping;
+       bool extra_pins = false;
 
        if (!PageHuge(hpage))
                return MF_DELAYED;
@@ -1047,6 +1048,8 @@ static int me_huge_page(struct page_state *ps, struct page *p)
        mapping = page_mapping(hpage);
        if (mapping) {
                res = truncate_error_page(hpage, page_to_pfn(p), mapping);
+               /* The page is kept in page cache. */
+               extra_pins = true;
                unlock_page(hpage);
        } else {
                res = MF_FAILED;
@@ -1064,7 +1067,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
                }
        }
 
-       if (has_extra_refcount(ps, p, false))
+       if (has_extra_refcount(ps, p, extra_pins))
                res = MF_FAILED;
 
        return res;