csky: Fix TLB maintenance synchronization problem
authorGuo Ren <guoren@linux.alibaba.com>
Thu, 24 Dec 2020 05:59:57 +0000 (05:59 +0000)
committerGuo Ren <guoren@linux.alibaba.com>
Tue, 12 Jan 2021 01:52:41 +0000 (09:52 +0800)
TLB invalidate didn't contain a barrier operation in csky cpu and
we need to prevent previous PTW response after TLB invalidation
instruction. Of cause, the ASID changing also needs to take care
of the issue.

CPU0                    CPU1
===============         ===============
set_pte
sync_is()        ->     See the previous set_pte for all harts
tlbi.vas         ->     Invalidate all harts TLB entry & flush pipeline

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
arch/csky/abiv1/inc/abi/ckmmu.h
arch/csky/abiv2/inc/abi/ckmmu.h
arch/csky/include/asm/mmu_context.h
arch/csky/mm/init.c
arch/csky/mm/tlb.c

index cceb3afb4c91679058aece518d29f8b77a7f75c7..b4650de43078950737bc9e54f76b658925822676 100644 (file)
@@ -89,9 +89,10 @@ static inline void tlb_invalid_indexed(void)
        cpwcr("cpcr8", 0x02000000);
 }
 
-static inline void setup_pgd(pgd_t *pgd)
+static inline void setup_pgd(pgd_t *pgd, int asid)
 {
        cpwcr("cpcr29", __pa(pgd) | BIT(0));
+       write_mmu_entryhi(asid);
 }
 
 static inline pgd_t *get_pgd(void)
index c39b13810550f1172e04d5342828e8e6cf5e0e56..530d2c7edc85600b348cd1049632d1e34c3b71ff 100644 (file)
@@ -78,8 +78,13 @@ static inline void tlb_read(void)
 static inline void tlb_invalid_all(void)
 {
 #ifdef CONFIG_CPU_HAS_TLBI
-       asm volatile("tlbi.alls\n":::"memory");
        sync_is();
+       asm volatile(
+               "tlbi.alls      \n"
+               "sync.i         \n"
+               :
+               :
+               : "memory");
 #else
        mtcr("cr<8, 15>", 0x04000000);
 #endif
@@ -88,8 +93,13 @@ static inline void tlb_invalid_all(void)
 static inline void local_tlb_invalid_all(void)
 {
 #ifdef CONFIG_CPU_HAS_TLBI
-       asm volatile("tlbi.all\n":::"memory");
        sync_is();
+       asm volatile(
+               "tlbi.all       \n"
+               "sync.i         \n"
+               :
+               :
+               : "memory");
 #else
        tlb_invalid_all();
 #endif
@@ -100,12 +110,27 @@ static inline void tlb_invalid_indexed(void)
        mtcr("cr<8, 15>", 0x02000000);
 }
 
-static inline void setup_pgd(pgd_t *pgd)
+#define NOP32 ".long 0x4820c400\n"
+
+static inline void setup_pgd(pgd_t *pgd, int asid)
 {
 #ifdef CONFIG_CPU_HAS_TLBI
-       mtcr("cr<28, 15>", __pa(pgd) | BIT(0));
+       sync_is();
+#else
+       mb();
+#endif
+       asm volatile(
+#ifdef CONFIG_CPU_HAS_TLBI
+               "mtcr %1, cr<28, 15>    \n"
 #endif
-       mtcr("cr<29, 15>", __pa(pgd) | BIT(0));
+               "mtcr %1, cr<29, 15>    \n"
+               "mtcr %0, cr< 4, 15>    \n"
+               ".rept 64               \n"
+               NOP32
+               ".endr                  \n"
+               :
+               :"r"(asid), "r"(__pa(pgd) | BIT(0))
+               :"memory");
 }
 
 static inline pgd_t *get_pgd(void)
index 3767dbffd02f4f2fc3b7bb38a7c111fb4c7d1b05..594167bbdc63f60cd75850a7cd6eae8dba2a6cac 100644 (file)
@@ -30,8 +30,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
        if (prev != next)
                check_and_switch_context(next, cpu);
 
-       setup_pgd(next->pgd);
-       write_mmu_entryhi(next->context.asid.counter);
+       setup_pgd(next->pgd, next->context.asid.counter);
 
        flush_icache_deferred(next);
 }
index 8170d7ce116ba73eb15a20b6978900f3e394c058..bc05a3be9d57b9d2f2396607c0c710b4c964f2e5 100644 (file)
@@ -164,7 +164,7 @@ void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn)
        /* Setup page mask to 4k */
        write_mmu_pagemask(0);
 
-       setup_pgd(swapper_pg_dir);
+       setup_pgd(swapper_pg_dir, 0);
 }
 
 void __init fixrange_init(unsigned long start, unsigned long end,
index ed151238111228ca6d1c851a895c9413e5be1dd1..9234c5e5ceafd5e573dde6c6047913aa9fa2b14e 100644 (file)
@@ -24,7 +24,13 @@ void flush_tlb_all(void)
 void flush_tlb_mm(struct mm_struct *mm)
 {
 #ifdef CONFIG_CPU_HAS_TLBI
-       asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
+       sync_is();
+       asm volatile(
+               "tlbi.asids %0  \n"
+               "sync.i         \n"
+               :
+               : "r" (cpu_asid(mm))
+               : "memory");
 #else
        tlb_invalid_all();
 #endif
@@ -53,11 +59,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
        end   &= TLB_ENTRY_SIZE_MASK;
 
 #ifdef CONFIG_CPU_HAS_TLBI
+       sync_is();
        while (start < end) {
-               asm volatile("tlbi.vas %0"::"r"(start | newpid));
+               asm volatile(
+                       "tlbi.vas %0    \n"
+                       :
+                       : "r" (start | newpid)
+                       : "memory");
+
                start += 2*PAGE_SIZE;
        }
-       sync_is();
+       asm volatile("sync.i\n");
 #else
        {
        unsigned long flags, oldpid;
@@ -87,11 +99,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
        end   &= TLB_ENTRY_SIZE_MASK;
 
 #ifdef CONFIG_CPU_HAS_TLBI
+       sync_is();
        while (start < end) {
-               asm volatile("tlbi.vaas %0"::"r"(start));
+               asm volatile(
+                       "tlbi.vaas %0   \n"
+                       :
+                       : "r" (start)
+                       : "memory");
+
                start += 2*PAGE_SIZE;
        }
-       sync_is();
+       asm volatile("sync.i\n");
 #else
        {
        unsigned long flags, oldpid;
@@ -121,8 +139,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
        addr &= TLB_ENTRY_SIZE_MASK;
 
 #ifdef CONFIG_CPU_HAS_TLBI
-       asm volatile("tlbi.vas %0"::"r"(addr | newpid));
        sync_is();
+       asm volatile(
+               "tlbi.vas %0    \n"
+               "sync.i         \n"
+               :
+               : "r" (addr | newpid)
+               : "memory");
 #else
        {
        int oldpid, idx;
@@ -147,8 +170,13 @@ void flush_tlb_one(unsigned long addr)
        addr &= TLB_ENTRY_SIZE_MASK;
 
 #ifdef CONFIG_CPU_HAS_TLBI
-       asm volatile("tlbi.vaas %0"::"r"(addr));
        sync_is();
+       asm volatile(
+               "tlbi.vaas %0   \n"
+               "sync.i         \n"
+               :
+               : "r" (addr)
+               : "memory");
 #else
        {
        int oldpid, idx;