riscv: mm: Use a fixed layout for the MM context ID
authorSamuel Holland <samuel.holland@sifive.com>
Wed, 27 Mar 2024 04:49:51 +0000 (21:49 -0700)
committerPalmer Dabbelt <palmer@rivosinc.com>
Mon, 29 Apr 2024 17:49:33 +0000 (10:49 -0700)
Currently, the size of the ASID field in the MM context ID dynamically
depends on the number of hardware-supported ASID bits. This requires
reading a global variable to extract either field from the context ID.
Instead, allocate the maximum possible number of bits to the ASID field,
so the layout of the context ID is known at compile-time.

Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
Link: https://lore.kernel.org/r/20240327045035.368512-11-samuel.holland@sifive.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
arch/riscv/include/asm/mmu.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/mm/context.c

index a550fbf770be2763bc58328ae58fea318b273947..dc0273f7905f871d4746621170d707374a63d392 100644 (file)
@@ -26,8 +26,8 @@ typedef struct {
 #endif
 } mm_context_t;
 
-#define cntx2asid(cntx)                ((cntx) & asid_mask)
-#define cntx2version(cntx)     ((cntx) & ~asid_mask)
+#define cntx2asid(cntx)                ((cntx) & SATP_ASID_MASK)
+#define cntx2version(cntx)     ((cntx) & ~SATP_ASID_MASK)
 
 void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
                               phys_addr_t sz, pgprot_t prot);
index 8e329721375b31b9815e6dae3a84cc96f76031e8..72e559934952939aa410ef7cdda4ef7ad2696a26 100644 (file)
@@ -15,8 +15,6 @@
 #define FLUSH_TLB_NO_ASID       ((unsigned long)-1)
 
 #ifdef CONFIG_MMU
-extern unsigned long asid_mask;
-
 static inline void local_flush_tlb_all(void)
 {
        __asm__ __volatile__ ("sfence.vma" : : : "memory");
index b562b3c444875a287130edcdf37bd81c2adaf057..5315af06cd4d00c890cb2e465a278f2289644ef4 100644 (file)
@@ -22,7 +22,6 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
 
 static unsigned long asid_bits;
 static unsigned long num_asids;
-unsigned long asid_mask;
 
 static atomic_long_t current_version;
 
@@ -128,7 +127,7 @@ static unsigned long __new_context(struct mm_struct *mm)
                goto set_asid;
 
        /* We're out of ASIDs, so increment current_version */
-       ver = atomic_long_add_return_relaxed(num_asids, &current_version);
+       ver = atomic_long_add_return_relaxed(BIT(SATP_ASID_BITS), &current_version);
 
        /* Flush everything  */
        __flush_context();
@@ -247,7 +246,6 @@ static int __init asids_init(void)
        /* Pre-compute ASID details */
        if (asid_bits) {
                num_asids = 1 << asid_bits;
-               asid_mask = num_asids - 1;
        }
 
        /*
@@ -255,7 +253,7 @@ static int __init asids_init(void)
         * at-least twice more than CPUs
         */
        if (num_asids > (2 * num_possible_cpus())) {
-               atomic_long_set(&current_version, num_asids);
+               atomic_long_set(&current_version, BIT(SATP_ASID_BITS));
 
                context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL);
                if (!context_asid_map)