arm64: __flush_dcache_area to take end parameter instead of size
authorFuad Tabba <tabba@google.com>
Mon, 24 May 2021 08:29:55 +0000 (09:29 +0100)
committerWill Deacon <will@kernel.org>
Tue, 25 May 2021 18:27:49 +0000 (19:27 +0100)
To be consistent with other functions with similar names and
functionality in cacheflush.h, cache.S, and cachetlb.rst, change
to specify the range in terms of start and end, as opposed to
start and size.

No functional change intended.

Reported-by: Will Deacon <will@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20210524083001.2586635-13-tabba@google.com
Signed-off-by: Will Deacon <will@kernel.org>
14 files changed:
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/idreg-override.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kvm/hyp/nvhe/cache.S
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/mm/cache.S

index 934b9be582d21deb1f7d7d98d036918642e80180..ed1cc9d8e6df70e69f12269fa1cdfd961769bd22 100644 (file)
@@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
 #define gic_read_lpir(c)               readq_relaxed(c)
 #define gic_write_lpir(v, c)           writeq_relaxed(v, c)
 
-#define gic_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
+#define gic_flush_dcache_to_poc(a,l)   \
+       __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))
 
 #define gits_read_baser(c)             readq_relaxed(c)
 #define gits_write_baser(v, c)         writeq_relaxed(v, c)
index 1572347068170eb05f91369751ae397a4b8f34c7..695f88864784f7e04b03672c7279a5ffb360808e 100644 (file)
  *             - start  - virtual start address
  *             - end    - virtual end address
  *
- *     __flush_dcache_area(kaddr, size)
+ *     __flush_dcache_area(start, end)
  *
  *             Ensure that the data held in page is written back.
- *             - kaddr  - page address
- *             - size   - region size
+ *             - start  - virtual start address
+ *             - end    - virtual end address
  */
 extern void __flush_icache_range(unsigned long start, unsigned long end);
 extern void invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __flush_dcache_area(void *addr, size_t len);
+extern void __flush_dcache_area(unsigned long start, unsigned long end);
 extern void __inval_dcache_area(unsigned long start, unsigned long end);
 extern void __clean_dcache_area_poc(void *addr, size_t len);
 extern void __clean_dcache_area_pop(void *addr, size_t len);
index 3578aba9c60805ad538ad99a352a09bc1ea2b340..0ae2397076fd7724c6031a16713beaccbdf87bda 100644 (file)
@@ -137,7 +137,7 @@ void efi_virtmap_unload(void);
 
 static inline void efi_capsule_flush_cache_range(void *addr, int size)
 {
-       __flush_dcache_area(addr, size);
+       __flush_dcache_area((unsigned long)addr, (unsigned long)addr + size);
 }
 
 #endif /* _ASM_EFI_H */
index 25ed956f9af1530bf57c3343107d6f7ad54b3574..33293d5855af42072d3c8be3c6d2ba780f150908 100644 (file)
@@ -180,7 +180,8 @@ static inline void *__kvm_vector_slot2addr(void *base,
 
 struct kvm;
 
-#define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
+#define kvm_flush_dcache_to_poc(a,l)   \
+       __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))
 
 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
index b1cef371df2b211ced3f07f84e0dac92220c4097..b40ddce7150733dbb05d0443f44018303e1c423e 100644 (file)
@@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length,
        return 0;
 }
 
-#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
-
 #ifdef CONFIG_ARM64_MTE
 
 static DEFINE_XARRAY(mte_pages);
@@ -383,13 +381,18 @@ int swsusp_arch_suspend(void)
                ret = swsusp_save();
        } else {
                /* Clean kernel core startup/idle code to PoC*/
-               dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
-               dcache_clean_range(__idmap_text_start, __idmap_text_end);
+               __flush_dcache_area((unsigned long)__mmuoff_data_start,
+                                   (unsigned long)__mmuoff_data_end);
+               __flush_dcache_area((unsigned long)__idmap_text_start,
+                                   (unsigned long)__idmap_text_end);
 
                /* Clean kvm setup code to PoC? */
                if (el2_reset_needed()) {
-                       dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
-                       dcache_clean_range(__hyp_text_start, __hyp_text_end);
+                       __flush_dcache_area(
+                               (unsigned long)__hyp_idmap_text_start,
+                               (unsigned long)__hyp_idmap_text_end);
+                       __flush_dcache_area((unsigned long)__hyp_text_start,
+                                           (unsigned long)__hyp_text_end);
                }
 
                swsusp_mte_restore_tags();
@@ -474,7 +477,8 @@ int swsusp_arch_resume(void)
         * The hibernate exit text contains a set of el2 vectors, that will
         * be executed at el2 with the mmu off in order to reload hyp-stub.
         */
-       __flush_dcache_area(hibernate_exit, exit_size);
+       __flush_dcache_area((unsigned long)hibernate_exit,
+                           (unsigned long)hibernate_exit + exit_size);
 
        /*
         * KASLR will cause the el2 vectors to be in a different location in
index e628c8ce1ffe2f6976fd20233f75632d73ce42c4..3dd515baf526883b82be5ff268f50dea6be01e0e 100644 (file)
@@ -237,7 +237,8 @@ asmlinkage void __init init_feature_override(void)
 
        for (i = 0; i < ARRAY_SIZE(regs); i++) {
                if (regs[i]->override)
-                       __flush_dcache_area(regs[i]->override,
+                       __flush_dcache_area((unsigned long)regs[i]->override,
+                                           (unsigned long)regs[i]->override +
                                            sizeof(*regs[i]->override));
        }
 }
index 341342b207f63286db6db7c48b72fe7629c12b9e..49cccd03cb3704c78c66feec1b9ce00c9e839dba 100644 (file)
@@ -72,7 +72,9 @@ u64 __init kaslr_early_init(void)
         * we end up running with module randomization disabled.
         */
        module_alloc_base = (u64)_etext - MODULES_VSIZE;
-       __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+       __flush_dcache_area((unsigned long)&module_alloc_base,
+                           (unsigned long)&module_alloc_base +
+                                   sizeof(module_alloc_base));
 
        /*
         * Try to map the FDT early. If this fails, we simply bail,
@@ -170,8 +172,12 @@ u64 __init kaslr_early_init(void)
        module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
        module_alloc_base &= PAGE_MASK;
 
-       __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
-       __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+       __flush_dcache_area((unsigned long)&module_alloc_base,
+                           (unsigned long)&module_alloc_base +
+                                   sizeof(module_alloc_base));
+       __flush_dcache_area((unsigned long)&memstart_offset_seed,
+                           (unsigned long)&memstart_offset_seed +
+                                   sizeof(memstart_offset_seed));
 
        return offset;
 }
index a03944fd0cd4eaab649d09ea53972ca5fbf0f367..3e79110c8f3a8dbef8574606177b4fe5cbfb8299 100644 (file)
@@ -72,7 +72,9 @@ int machine_kexec_post_load(struct kimage *kimage)
         * For execution with the MMU off, reloc_code needs to be cleaned to the
         * PoC and invalidated from the I-cache.
         */
-       __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
+       __flush_dcache_area((unsigned long)reloc_code,
+                           (unsigned long)reloc_code +
+                                   arm64_relocate_new_kernel_size);
        invalidate_icache_range((uintptr_t)reloc_code,
                                (uintptr_t)reloc_code +
                                        arm64_relocate_new_kernel_size);
@@ -106,16 +108,18 @@ static void kexec_list_flush(struct kimage *kimage)
 
        for (entry = &kimage->head; ; entry++) {
                unsigned int flag;
-               void *addr;
+               unsigned long addr;
 
                /* flush the list entries. */
-               __flush_dcache_area(entry, sizeof(kimage_entry_t));
+               __flush_dcache_area((unsigned long)entry,
+                                   (unsigned long)entry +
+                                           sizeof(kimage_entry_t));
 
                flag = *entry & IND_FLAGS;
                if (flag == IND_DONE)
                        break;
 
-               addr = phys_to_virt(*entry & PAGE_MASK);
+               addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK);
 
                switch (flag) {
                case IND_INDIRECTION:
@@ -124,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage)
                        break;
                case IND_SOURCE:
                        /* flush the source pages. */
-                       __flush_dcache_area(addr, PAGE_SIZE);
+                       __flush_dcache_area(addr, addr + PAGE_SIZE);
                        break;
                case IND_DESTINATION:
                        break;
@@ -151,8 +155,10 @@ static void kexec_segment_flush(const struct kimage *kimage)
                        kimage->segment[i].memsz,
                        kimage->segment[i].memsz /  PAGE_SIZE);
 
-               __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
-                       kimage->segment[i].memsz);
+               __flush_dcache_area(
+                       (unsigned long)phys_to_virt(kimage->segment[i].mem),
+                       (unsigned long)phys_to_virt(kimage->segment[i].mem) +
+                               kimage->segment[i].memsz);
        }
 }
 
index dcd7041b2b077f0ece865d5edfb4b1b67e04199c..5fcdee331087465acb27bd7206b0c67ef1c62ebc 100644 (file)
@@ -122,7 +122,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        secondary_data.task = idle;
        secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
        update_cpu_boot_status(CPU_MMU_OFF);
-       __flush_dcache_area(&secondary_data, sizeof(secondary_data));
+       __flush_dcache_area((unsigned long)&secondary_data,
+                           (unsigned long)&secondary_data +
+                                   sizeof(secondary_data));
 
        /* Now bring the CPU into our world */
        ret = boot_secondary(cpu, idle);
@@ -143,7 +145,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        pr_crit("CPU%u: failed to come online\n", cpu);
        secondary_data.task = NULL;
        secondary_data.stack = NULL;
-       __flush_dcache_area(&secondary_data, sizeof(secondary_data));
+       __flush_dcache_area((unsigned long)&secondary_data,
+                           (unsigned long)&secondary_data +
+                                   sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
        if (status == CPU_MMU_OFF)
                status = READ_ONCE(__early_cpu_boot_status);
index c45a835128057ac6f904e9770138005c08efdfbb..58d804582a35f756d3f54604599637b48fc7034b 100644 (file)
@@ -36,7 +36,7 @@ static void write_pen_release(u64 val)
        unsigned long size = sizeof(secondary_holding_pen_release);
 
        secondary_holding_pen_release = val;
-       __flush_dcache_area(start, size);
+       __flush_dcache_area((unsigned long)start, (unsigned long)start + size);
 }
 
 
@@ -90,8 +90,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
         * the boot protocol.
         */
        writeq_relaxed(pa_holding_pen, release_addr);
-       __flush_dcache_area((__force void *)release_addr,
-                           sizeof(*release_addr));
+       __flush_dcache_area((__force unsigned long)release_addr,
+                           (__force unsigned long)release_addr +
+                                   sizeof(*release_addr));
 
        /*
         * Send an event to wake up the secondary CPU.
index 3bcfa3cac46fe22b0cefe4b798f578567f6b988c..36cef69154281cfd71d1c17fbe0538fbaa8c1a12 100644 (file)
@@ -8,7 +8,6 @@
 #include <asm/alternative.h>
 
 SYM_FUNC_START_PI(__flush_dcache_area)
-       add     x1, x0, x1
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
 SYM_FUNC_END_PI(__flush_dcache_area)
index 7488f53b0aa2fe05e4c11bd32060488a690ff386..5dffe928f256314fa8646c4d63be037b5854175b 100644 (file)
@@ -134,7 +134,8 @@ static void update_nvhe_init_params(void)
        for (i = 0; i < hyp_nr_cpus; i++) {
                params = per_cpu_ptr(&kvm_init_params, i);
                params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
-               __flush_dcache_area(params, sizeof(*params));
+               __flush_dcache_area((unsigned long)params,
+                                   (unsigned long)params + sizeof(*params));
        }
 }
 
index c37c1dc4feafa6c9c50d87b8dc857fea4abae3bd..10d2f04013d44ef8da4413f3ff2e6c1db3d86503 100644 (file)
@@ -839,8 +839,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
        stage2_put_pte(ptep, mmu, addr, level, mm_ops);
 
        if (need_flush) {
-               __flush_dcache_area(kvm_pte_follow(pte, mm_ops),
-                                   kvm_granule_size(level));
+               kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
+
+               __flush_dcache_area((unsigned long)pte_follow,
+                                   (unsigned long)pte_follow +
+                                           kvm_granule_size(level));
        }
 
        if (childp)
@@ -988,11 +991,15 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
        struct kvm_pgtable *pgt = arg;
        struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
        kvm_pte_t pte = *ptep;
+       kvm_pte_t *pte_follow;
 
        if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
                return 0;
 
-       __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
+       pte_follow = kvm_pte_follow(pte, mm_ops);
+       __flush_dcache_area((unsigned long)pte_follow,
+                           (unsigned long)pte_follow +
+                                   kvm_granule_size(level));
        return 0;
 }
 
index fff883f691f2f03065f76284175a53770ba227be..b2880aeba7ca5e598b0e1847c7805c04ed91d4b5 100644 (file)
@@ -99,16 +99,15 @@ alternative_else_nop_endif
 SYM_FUNC_END(invalidate_icache_range)
 
 /*
- *     __flush_dcache_area(kaddr, size)
+ *     __flush_dcache_area(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned and invalidated to the PoC.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
  */
 SYM_FUNC_START_PI(__flush_dcache_area)
-       add     x1, x0, x1
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
 SYM_FUNC_END_PI(__flush_dcache_area)