x86/mtrr: Move cache control code to cacheinfo.c
authorJuergen Gross <jgross@suse.com>
Wed, 2 Nov 2022 07:47:03 +0000 (08:47 +0100)
committerBorislav Petkov <bp@suse.de>
Thu, 10 Nov 2022 12:12:44 +0000 (13:12 +0100)
Prepare making PAT and MTRR support independent from each other by
moving some code needed by both out of the MTRR-specific sources.

  [ bp: Massage commit message. ]

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221102074713.21493-7-jgross@suse.com
Signed-off-by: Borislav Petkov <bp@suse.de>
arch/x86/kernel/cpu/cacheinfo.c
arch/x86/kernel/cpu/mtrr/generic.c

index 32fb04920171ee486e729f7d51d10d6e1d4b1922..0cbacece87dbc0eeec2b8ae9af9ddbde28c3390e 100644 (file)
@@ -20,6 +20,8 @@
 #include <asm/cacheinfo.h>
 #include <asm/amd_nb.h>
 #include <asm/smp.h>
+#include <asm/mtrr.h>
+#include <asm/tlbflush.h>
 
 #include "cpu.h"
 
@@ -1043,3 +1045,78 @@ int populate_cache_leaves(unsigned int cpu)
 
        return 0;
 }
+
+/*
+ * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
+ *
+ * Since we are disabling the cache don't allow any interrupts,
+ * they would run extremely slow and would only increase the pain.
+ *
+ * The caller must ensure that local interrupts are disabled and
+ * are reenabled after cache_enable() has been called.
+ */
+static unsigned long saved_cr4;
+static DEFINE_RAW_SPINLOCK(cache_disable_lock);
+
+void cache_disable(void) __acquires(cache_disable_lock)
+{
+       unsigned long cr0;
+
+       /*
+        * Note that this is not ideal
+        * since the cache is only flushed/disabled for this CPU while the
+        * MTRRs are changed, but changing this requires more invasive
+        * changes to the way the kernel boots
+        */
+
+       raw_spin_lock(&cache_disable_lock);
+
+       /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
+       cr0 = read_cr0() | X86_CR0_CD;
+       write_cr0(cr0);
+
+       /*
+        * Cache flushing is the most time-consuming step when programming
+        * the MTRRs. Fortunately, as per the Intel Software Development
+        * Manual, we can skip it if the processor supports cache self-
+        * snooping.
+        */
+       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+               wbinvd();
+
+       /* Save value of CR4 and clear Page Global Enable (bit 7) */
+       if (cpu_feature_enabled(X86_FEATURE_PGE)) {
+               saved_cr4 = __read_cr4();
+               __write_cr4(saved_cr4 & ~X86_CR4_PGE);
+       }
+
+       /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+       flush_tlb_local();
+
+       if (cpu_feature_enabled(X86_FEATURE_MTRR))
+               mtrr_disable();
+
+       /* Again, only flush caches if we have to. */
+       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+               wbinvd();
+}
+
+void cache_enable(void) __releases(cache_disable_lock)
+{
+       /* Flush TLBs (no need to flush caches - they are disabled) */
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+       flush_tlb_local();
+
+       if (cpu_feature_enabled(X86_FEATURE_MTRR))
+               mtrr_enable();
+
+       /* Enable caches */
+       write_cr0(read_cr0() & ~X86_CR0_CD);
+
+       /* Restore value of CR4 */
+       if (cpu_feature_enabled(X86_FEATURE_PGE))
+               __write_cr4(saved_cr4);
+
+       raw_spin_unlock(&cache_disable_lock);
+}
index 0db0770e75f6d9935d6c5204b4b2bd1820ca2d1d..396cb1eb1171ab26123f8cd3475ac150d875f312 100644 (file)
@@ -731,80 +731,6 @@ void mtrr_enable(void)
        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 }
 
-/*
- * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
- *
- * Since we are disabling the cache don't allow any interrupts,
- * they would run extremely slow and would only increase the pain.
- *
- * The caller must ensure that local interrupts are disabled and
- * are reenabled after cache_enable() has been called.
- */
-static unsigned long saved_cr4;
-static DEFINE_RAW_SPINLOCK(cache_disable_lock);
-
-void cache_disable(void) __acquires(cache_disable_lock)
-{
-       unsigned long cr0;
-
-       /*
-        * Note that this is not ideal
-        * since the cache is only flushed/disabled for this CPU while the
-        * MTRRs are changed, but changing this requires more invasive
-        * changes to the way the kernel boots
-        */
-
-       raw_spin_lock(&cache_disable_lock);
-
-       /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
-       cr0 = read_cr0() | X86_CR0_CD;
-       write_cr0(cr0);
-
-       /*
-        * Cache flushing is the most time-consuming step when programming
-        * the MTRRs. Fortunately, as per the Intel Software Development
-        * Manual, we can skip it if the processor supports cache self-
-        * snooping.
-        */
-       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
-               wbinvd();
-
-       /* Save value of CR4 and clear Page Global Enable (bit 7) */
-       if (boot_cpu_has(X86_FEATURE_PGE)) {
-               saved_cr4 = __read_cr4();
-               __write_cr4(saved_cr4 & ~X86_CR4_PGE);
-       }
-
-       /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
-       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-       flush_tlb_local();
-
-       if (cpu_feature_enabled(X86_FEATURE_MTRR))
-               mtrr_disable();
-
-       /* Again, only flush caches if we have to. */
-       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
-               wbinvd();
-}
-
-void cache_enable(void) __releases(cache_disable_lock)
-{
-       /* Flush TLBs (no need to flush caches - they are disabled) */
-       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-       flush_tlb_local();
-
-       if (cpu_feature_enabled(X86_FEATURE_MTRR))
-               mtrr_enable();
-
-       /* Enable caches */
-       write_cr0(read_cr0() & ~X86_CR0_CD);
-
-       /* Restore value of CR4 */
-       if (boot_cpu_has(X86_FEATURE_PGE))
-               __write_cr4(saved_cr4);
-       raw_spin_unlock(&cache_disable_lock);
-}
-
 static void generic_set_all(void)
 {
        unsigned long mask, count;