x86/mm/64: Flush global TLB on boot and AP bringup
authorJoerg Roedel <jroedel@suse.de>
Thu, 2 Dec 2021 15:32:24 +0000 (16:32 +0100)
committerBorislav Petkov <bp@suse.de>
Mon, 6 Dec 2021 08:38:48 +0000 (09:38 +0100)
The AP bringup code uses the trampoline_pgd page-table which
establishes global mappings in the user range of the address space.
Flush the global TLB entries after the indentity mappings are removed so
no stale entries remain in the TLB.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20211202153226.22946-3-joro@8bytes.org
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/mm/tlb.c

index b587a9ee9cb25876d1cf6862563802e94b5cc816..98fa0a1140742ab4bd88b862a9775b389aaa7d6d 100644 (file)
@@ -261,4 +261,9 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 
 #endif /* !MODULE */
 
+static inline void __native_tlb_flush_global(unsigned long cr4)
+{
+       native_write_cr4(cr4 ^ X86_CR4_PGE);
+       native_write_cr4(cr4);
+}
 #endif /* _ASM_X86_TLBFLUSH_H */
index fc5371a7e9d199b85992f1634ad073396ae07ac1..75acb6027a87a222c9694b352f09f9c4fc93da43 100644 (file)
@@ -483,6 +483,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
        /* Kill off the identity-map trampoline */
        reset_early_page_tables();
 
+       __native_tlb_flush_global(native_read_cr4());
+
        clear_bss();
 
        clear_page(init_top_pgt);
index d8b3ebd2bb85fe1ff903455de990074eb96e78e9..9c63fc5988cdac29fe77807750bc1ec86063d8cb 100644 (file)
@@ -166,9 +166,26 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
        call    sev_verify_cbit
        popq    %rsi
 
-       /* Switch to new page-table */
+       /*
+        * Switch to new page-table
+        *
+        * For the boot CPU this switches to early_top_pgt which still has the
+        * indentity mappings present. The secondary CPUs will switch to the
+        * init_top_pgt here, away from the trampoline_pgd and unmap the
+        * indentity mapped ranges.
+        */
        movq    %rax, %cr3
 
+       /*
+        * Do a global TLB flush after the CR3 switch to make sure the TLB
+        * entries from the identity mapping are flushed.
+        */
+       movq    %cr4, %rcx
+       movq    %rcx, %rax
+       xorq    $X86_CR4_PGE, %rcx
+       movq    %rcx, %cr4
+       movq    %rax, %cr4
+
        /* Ensure I am executing from virtual addresses */
        movq    $1f, %rax
        ANNOTATE_RETPOLINE_SAFE
index 59ba2968af1b30349e0ca2427851e7334525dd76..1e6513f95133a1e55292adb6662a70d86fe701e4 100644 (file)
@@ -1148,7 +1148,7 @@ void flush_tlb_one_user(unsigned long addr)
  */
 STATIC_NOPV void native_flush_tlb_global(void)
 {
-       unsigned long cr4, flags;
+       unsigned long flags;
 
        if (static_cpu_has(X86_FEATURE_INVPCID)) {
                /*
@@ -1168,11 +1168,7 @@ STATIC_NOPV void native_flush_tlb_global(void)
         */
        raw_local_irq_save(flags);
 
-       cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       /* toggle PGE */
-       native_write_cr4(cr4 ^ X86_CR4_PGE);
-       /* write old PGE again and flush TLBs */
-       native_write_cr4(cr4);
+       __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
 
        raw_local_irq_restore(flags);
 }