x86/sev: Introduce an SNP leaked pages list
authorAshish Kalra <ashish.kalra@amd.com>
Fri, 26 Jan 2024 04:11:15 +0000 (22:11 -0600)
committerBorislav Petkov (AMD) <bp@alien8.de>
Mon, 29 Jan 2024 19:34:18 +0000 (20:34 +0100)
Pages are unsafe to be released back to the page-allocator if they
have been transitioned to firmware/guest state and can't be reclaimed
or transitioned back to hypervisor/shared state. In this case, add them
to an internal leaked pages list to ensure that they are not freed or
touched/accessed to cause fatal page faults.

  [ mdr: Relocate to arch/x86/virt/svm/sev.c ]

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Link: https://lore.kernel.org/r/20240126041126.1927228-16-michael.roth@amd.com
arch/x86/include/asm/sev.h
arch/x86/virt/svm/sev.c

index 57fd95a5e6b28f33acce4fb6496127eed85a7cae..60de1b43a7290a9fabbbb0d6ff243a3e64bf9967 100644 (file)
@@ -264,6 +264,7 @@ void snp_dump_hva_rmpentry(unsigned long address);
 int psmash(u64 pfn);
 int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable);
 int rmp_make_shared(u64 pfn, enum pg_level level);
+void snp_leak_pages(u64 pfn, unsigned int npages);
 #else
 static inline bool snp_probe_rmptable_info(void) { return false; }
 static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
@@ -275,6 +276,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as
        return -ENODEV;
 }
 static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
+static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
 #endif
 
 #endif
index 5566fb04bd38b1644694a025c20de18065c8ed9a..0dffbf3908d03fa25e4e1f74bcc1cc2ef4ddfc6e 100644 (file)
@@ -65,6 +65,11 @@ static u64 probed_rmp_base, probed_rmp_size;
 static struct rmpentry *rmptable __ro_after_init;
 static u64 rmptable_max_pfn __ro_after_init;
 
+static LIST_HEAD(snp_leaked_pages_list);
+static DEFINE_SPINLOCK(snp_leaked_pages_list_lock);
+
+static unsigned long snp_nr_leaked_pages;
+
 #undef pr_fmt
 #define pr_fmt(fmt)    "SEV-SNP: " fmt
 
@@ -515,3 +520,35 @@ int rmp_make_shared(u64 pfn, enum pg_level level)
        return rmpupdate(pfn, &state);
 }
 EXPORT_SYMBOL_GPL(rmp_make_shared);
+
+void snp_leak_pages(u64 pfn, unsigned int npages)
+{
+       struct page *page = pfn_to_page(pfn);
+
+       pr_warn("Leaking PFN range 0x%llx-0x%llx\n", pfn, pfn + npages);
+
+       spin_lock(&snp_leaked_pages_list_lock);
+       while (npages--) {
+
+               /*
+                * Reuse the page's buddy list for chaining into the leaked
+                * pages list. This page should not be on a free list currently
+                * and is also unsafe to be added to a free list.
+                */
+               if (likely(!PageCompound(page)) ||
+
+                       /*
+                        * Skip inserting tail pages of compound page as
+                        * page->buddy_list of tail pages is not usable.
+                        */
+                   (PageHead(page) && compound_nr(page) <= npages))
+                       list_add_tail(&page->buddy_list, &snp_leaked_pages_list);
+
+               dump_rmpentry(pfn);
+               snp_nr_leaked_pages++;
+               pfn++;
+               page++;
+       }
+       spin_unlock(&snp_leaked_pages_list_lock);
+}
+EXPORT_SYMBOL_GPL(snp_leak_pages);