riscv: Avoid TLB flush loops when affected by SiFive CIP-1200
authorSamuel Holland <samuel.holland@sifive.com>
Wed, 27 Mar 2024 04:49:49 +0000 (21:49 -0700)
committerPalmer Dabbelt <palmer@rivosinc.com>
Mon, 29 Apr 2024 17:49:31 +0000 (10:49 -0700)
Implementations affected by SiFive errata CIP-1200 have a bug which
forces the kernel to always use the global variant of the sfence.vma
instruction. When affected by this errata, do not attempt to flush a
range of addresses; each iteration of the loop would actually flush the
whole TLB instead. Instead, minimize the overall number of sfence.vma
instructions.

Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com>
Link: https://lore.kernel.org/r/20240327045035.368512-9-samuel.holland@sifive.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
arch/riscv/errata/sifive/errata.c
arch/riscv/include/asm/tlbflush.h
arch/riscv/mm/tlbflush.c

index 3d9a32d791f73c4ee0199c9c00c695ff922d4d75..716cfedad3a2a442547df98b26a58f937e57caef 100644 (file)
@@ -42,6 +42,11 @@ static bool errata_cip_1200_check_func(unsigned long  arch_id, unsigned long imp
                return false;
        if ((impid & 0xffffff) > 0x200630 || impid == 0x1200626)
                return false;
+
+#ifdef CONFIG_MMU
+       tlb_flush_all_threshold = 0;
+#endif
+
        return true;
 }
 
index 463b615d7728c8ddd77b6ea70b32a0db47b198ba..8e329721375b31b9815e6dae3a84cc96f76031e8 100644 (file)
@@ -66,6 +66,8 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
                               unsigned long uaddr);
 void arch_flush_tlb_batched_pending(struct mm_struct *mm);
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+
+extern unsigned long tlb_flush_all_threshold;
 #else /* CONFIG_MMU */
 #define local_flush_tlb_all()                  do { } while (0)
 #endif /* CONFIG_MMU */
index ad7bdcfcc2199228c7b363d10ce23e342a6517b9..18af7b5053af232a645b0836887daf6ea9af5035 100644 (file)
@@ -11,7 +11,7 @@
  * Flush entire TLB if number of entries to be flushed is greater
  * than the threshold below.
  */
-static unsigned long tlb_flush_all_threshold __read_mostly = 64;
+unsigned long tlb_flush_all_threshold __read_mostly = 64;
 
 static void local_flush_tlb_range_threshold_asid(unsigned long start,
                                                 unsigned long size,