x86/mm/pat: Make set_memory_np() L1TF safe
authorAndi Kleen <ak@linux.intel.com>
Tue, 7 Aug 2018 22:09:39 +0000 (15:09 -0700)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 8 Aug 2018 07:23:44 +0000 (09:23 +0200)
set_memory_np() is used to mark kernel mappings not present, but it has
it's own open coded mechanism which does not have the L1TF protection of
inverting the address bits.

Replace the open coded PTE manipulation with the L1TF protecting low level
PTE routines.

Passes the CPA self test.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/mm/pageattr.c

index 3bded76e8d5c5676bd972b0217e96fb84753ff7f..7bb6f65c79de73f16165ae0d2723b77f911ba69d 100644 (file)
@@ -1014,8 +1014,8 @@ static long populate_pmd(struct cpa_data *cpa,
 
                pmd = pmd_offset(pud, start);
 
-               set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
-                                  massage_pgprot(pmd_pgprot)));
+               set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
+                                       canon_pgprot(pmd_pgprot))));
 
                start     += PMD_SIZE;
                cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
@@ -1087,8 +1087,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
         * Map everything starting from the Gb boundary, possibly with 1G pages
         */
        while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
-               set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
-                                  massage_pgprot(pud_pgprot)));
+               set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
+                                  canon_pgprot(pud_pgprot))));
 
                start     += PUD_SIZE;
                cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;