x86/mtrr: Disentangle MTRR init from PAT init
authorJuergen Gross <jgross@suse.com>
Wed, 2 Nov 2022 07:47:04 +0000 (08:47 +0100)
committerBorislav Petkov <bp@suse.de>
Thu, 10 Nov 2022 12:12:44 +0000 (13:12 +0100)
Add a main cache_cpu_init() init routine which initializes MTRR and/or
PAT support depending on what has been detected on the system.

Leave the MTRR-specific initialization in a MTRR-specific init function
where the smp_changes_mask setting happens now with caches disabled.

This global mask update was done with caches enabled before probably
because atomic operations while running uncached might have been quite
expensive.

But since only systems with a broken BIOS should ever require to set any
bit in smp_changes_mask, hurting those devices with a penalty of a few
microseconds during boot shouldn't be a real issue.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221102074713.21493-8-jgross@suse.com
Signed-off-by: Borislav Petkov <bp@suse.de>
arch/x86/include/asm/cacheinfo.h
arch/x86/include/asm/mtrr.h
arch/x86/kernel/cpu/cacheinfo.c
arch/x86/kernel/cpu/mtrr/generic.c

index 6159874b4183ba9dbdf78a79332334388a96d375..978bac70fd4942365bf68e70036f68c6da0edcb8 100644 (file)
@@ -12,5 +12,6 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
 
 void cache_disable(void);
 void cache_enable(void);
+void cache_cpu_init(void);
 
 #endif /* _ASM_X86_CACHEINFO_H */
index 12a16caed395ae0a15b52becbe03b12bc36f9d52..986249a2b9b666d4849a65ad3c7413f960bf3843 100644 (file)
@@ -50,6 +50,7 @@ extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
 extern int amd_special_default_mtrr(void);
 void mtrr_disable(void);
 void mtrr_enable(void);
+void mtrr_generic_set_state(void);
 #  else
 static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
 {
@@ -91,6 +92,7 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 #define mtrr_bp_restore() do {} while (0)
 #define mtrr_disable() do {} while (0)
 #define mtrr_enable() do {} while (0)
+#define mtrr_generic_set_state() do {} while (0)
 #  endif
 
 #ifdef CONFIG_COMPAT
index 0cbacece87dbc0eeec2b8ae9af9ddbde28c3390e..31684bfa6f84d1f5cff2cb523ad005a1154e25a6 100644 (file)
@@ -1120,3 +1120,20 @@ void cache_enable(void) __releases(cache_disable_lock)
 
        raw_spin_unlock(&cache_disable_lock);
 }
+
+void cache_cpu_init(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       cache_disable();
+
+       if (memory_caching_control & CACHE_MTRR)
+               mtrr_generic_set_state();
+
+       if (memory_caching_control & CACHE_PAT)
+               pat_init();
+
+       cache_enable();
+       local_irq_restore(flags);
+}
index 396cb1eb1171ab26123f8cd3475ac150d875f312..d409c38ee748d898cf8990cb700388733c5f66c5 100644 (file)
@@ -731,30 +731,19 @@ void mtrr_enable(void)
        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 }
 
-static void generic_set_all(void)
+void mtrr_generic_set_state(void)
 {
        unsigned long mask, count;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       cache_disable();
 
        /* Actually set the state */
        mask = set_mtrr_state();
 
-       /* also set PAT */
-       pat_init();
-
-       cache_enable();
-       local_irq_restore(flags);
-
        /* Use the atomic bitops to update the global mask */
        for (count = 0; count < sizeof(mask) * 8; ++count) {
                if (mask & 0x01)
                        set_bit(count, &smp_changes_mask);
                mask >>= 1;
        }
-
 }
 
 /**
@@ -854,7 +843,7 @@ int positive_have_wrcomb(void)
  * Generic structure...
  */
 const struct mtrr_ops generic_mtrr_ops = {
-       .set_all                = generic_set_all,
+       .set_all                = cache_cpu_init,
        .get                    = generic_get_mtrr,
        .get_free_region        = generic_get_free_region,
        .set                    = generic_set_mtrr,