x86/fault: Add helper for dumping RMP entries
authorBrijesh Singh <brijesh.singh@amd.com>
Fri, 26 Jan 2024 04:11:07 +0000 (22:11 -0600)
committerBorislav Petkov (AMD) <bp@alien8.de>
Mon, 29 Jan 2024 16:26:30 +0000 (17:26 +0100)
This information will be useful for debugging things like page faults
due to RMP access violations and RMPUPDATE failures.

  [ mdr: move helper to standalone patch, rework dump logic as suggested
    by Boris. ]

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20240126041126.1927228-8-michael.roth@amd.com
arch/x86/include/asm/sev.h
arch/x86/virt/svm/sev.c

index 01ce61b283a324c5e00ed36ae085c89db731bdf7..2c53e3de0b71b4998e81d6a4bdeaf30e41c409fb 100644 (file)
@@ -247,9 +247,11 @@ static inline u64 sev_get_status(void) { return 0; }
 #ifdef CONFIG_KVM_AMD_SEV
 bool snp_probe_rmptable_info(void);
 int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level);
+void snp_dump_hva_rmpentry(unsigned long address);
 #else
 static inline bool snp_probe_rmptable_info(void) { return false; }
 static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
+static inline void snp_dump_hva_rmpentry(unsigned long address) {}
 #endif
 
 #endif
index 7669b2ff0ec78601a79b2559d2024d9989261702..c74266e039b2a9f83407a6a0d8e49d6cbb7ab437 100644 (file)
  * Family 19h Model 01h, Rev B1 processor.
  */
 struct rmpentry {
-       u64     assigned        : 1,
-               pagesize        : 1,
-               immutable       : 1,
-               rsvd1           : 9,
-               gpa             : 39,
-               asid            : 10,
-               vmsa            : 1,
-               validated       : 1,
-               rsvd2           : 1;
-       u64 rsvd3;
+       union {
+               struct {
+                       u64 assigned    : 1,
+                           pagesize    : 1,
+                           immutable   : 1,
+                           rsvd1       : 9,
+                           gpa         : 39,
+                           asid        : 10,
+                           vmsa        : 1,
+                           validated   : 1,
+                           rsvd2       : 1;
+               };
+               u64 lo;
+       };
+       u64 hi;
 } __packed;
 
 /*
@@ -263,3 +268,77 @@ int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level)
        return 0;
 }
 EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
+
+/*
+ * Dump the raw RMP entry for a particular PFN. These bits are documented in the
+ * PPR for a particular CPU model and provide useful information about how a
+ * particular PFN is being utilized by the kernel/firmware at the time certain
+ * unexpected events occur, such as RMP faults.
+ */
+static void dump_rmpentry(u64 pfn)
+{
+       u64 pfn_i, pfn_end;
+       struct rmpentry *e;
+       int level;
+
+       e = __snp_lookup_rmpentry(pfn, &level);
+       if (IS_ERR(e)) {
+               pr_err("Failed to read RMP entry for PFN 0x%llx, error %ld\n",
+                      pfn, PTR_ERR(e));
+               return;
+       }
+
+       if (e->assigned) {
+               pr_info("PFN 0x%llx, RMP entry: [0x%016llx - 0x%016llx]\n",
+                       pfn, e->lo, e->hi);
+               return;
+       }
+
+       /*
+        * If the RMP entry for a particular PFN is not in an assigned state,
+        * then it is sometimes useful to get an idea of whether or not any RMP
+        * entries for other PFNs within the same 2MB region are assigned, since
+        * those too can affect the ability to access a particular PFN in
+        * certain situations, such as when the PFN is being accessed via a 2MB
+        * mapping in the host page table.
+        */
+       pfn_i = ALIGN_DOWN(pfn, PTRS_PER_PMD);
+       pfn_end = pfn_i + PTRS_PER_PMD;
+
+       pr_info("PFN 0x%llx unassigned, dumping non-zero entries in 2M PFN region: [0x%llx - 0x%llx]\n",
+               pfn, pfn_i, pfn_end);
+
+       while (pfn_i < pfn_end) {
+               e = __snp_lookup_rmpentry(pfn_i, &level);
+               if (IS_ERR(e)) {
+                       pr_err("Error %ld reading RMP entry for PFN 0x%llx\n",
+                              PTR_ERR(e), pfn_i);
+                       pfn_i++;
+                       continue;
+               }
+
+               if (e->lo || e->hi)
+                       pr_info("PFN: 0x%llx, [0x%016llx - 0x%016llx]\n", pfn_i, e->lo, e->hi);
+               pfn_i++;
+       }
+}
+
+void snp_dump_hva_rmpentry(unsigned long hva)
+{
+       unsigned long paddr;
+       unsigned int level;
+       pgd_t *pgd;
+       pte_t *pte;
+
+       pgd = __va(read_cr3_pa());
+       pgd += pgd_index(hva);
+       pte = lookup_address_in_pgd(pgd, hva, &level);
+
+       if (!pte) {
+               pr_err("Can't dump RMP entry for HVA %lx: no PTE/PFN found\n", hva);
+               return;
+       }
+
+       paddr = PFN_PHYS(pte_pfn(*pte)) | (hva & ~page_level_mask(level));
+       dump_rmpentry(PHYS_PFN(paddr));
+}