#define gic_read_lpir(c) readq_relaxed(c)
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
-#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+#define gic_flush_dcache_to_poc(a,l) \
+ __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))
#define gits_read_baser(c) readq_relaxed(c)
#define gits_write_baser(v, c) writeq_relaxed(v, c)
* - start - virtual start address
* - end - virtual end address
*
- * __flush_dcache_area(kaddr, size)
+ * __flush_dcache_area(start, end)
*
* Ensure that the data held in page is written back.
- * - kaddr - page address
- * - size - region size
+ * - start - virtual start address
+ * - end - virtual end address
*/
extern void __flush_icache_range(unsigned long start, unsigned long end);
extern void invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __flush_dcache_area(void *addr, size_t len);
+extern void __flush_dcache_area(unsigned long start, unsigned long end);
extern void __inval_dcache_area(unsigned long start, unsigned long end);
extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pop(void *addr, size_t len);
static inline void efi_capsule_flush_cache_range(void *addr, int size)
{
- __flush_dcache_area(addr, size);
+ __flush_dcache_area((unsigned long)addr, (unsigned long)addr + size);
}
#endif /* _ASM_EFI_H */
struct kvm;
-#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+#define kvm_flush_dcache_to_poc(a,l) \
+ __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
return 0;
}
-#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
-
#ifdef CONFIG_ARM64_MTE
static DEFINE_XARRAY(mte_pages);
ret = swsusp_save();
} else {
/* Clean kernel core startup/idle code to PoC*/
- dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
- dcache_clean_range(__idmap_text_start, __idmap_text_end);
+ __flush_dcache_area((unsigned long)__mmuoff_data_start,
+ (unsigned long)__mmuoff_data_end);
+ __flush_dcache_area((unsigned long)__idmap_text_start,
+ (unsigned long)__idmap_text_end);
/* Clean kvm setup code to PoC? */
if (el2_reset_needed()) {
- dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
- dcache_clean_range(__hyp_text_start, __hyp_text_end);
+ __flush_dcache_area(
+ (unsigned long)__hyp_idmap_text_start,
+ (unsigned long)__hyp_idmap_text_end);
+ __flush_dcache_area((unsigned long)__hyp_text_start,
+ (unsigned long)__hyp_text_end);
}
swsusp_mte_restore_tags();
* The hibernate exit text contains a set of el2 vectors, that will
* be executed at el2 with the mmu off in order to reload hyp-stub.
*/
- __flush_dcache_area(hibernate_exit, exit_size);
+ __flush_dcache_area((unsigned long)hibernate_exit,
+ (unsigned long)hibernate_exit + exit_size);
/*
* KASLR will cause the el2 vectors to be in a different location in
for (i = 0; i < ARRAY_SIZE(regs); i++) {
if (regs[i]->override)
- __flush_dcache_area(regs[i]->override,
+ __flush_dcache_area((unsigned long)regs[i]->override,
+ (unsigned long)regs[i]->override +
sizeof(*regs[i]->override));
}
}
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
- __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ __flush_dcache_area((unsigned long)&module_alloc_base,
+ (unsigned long)&module_alloc_base +
+ sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
- __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
- __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+ __flush_dcache_area((unsigned long)&module_alloc_base,
+ (unsigned long)&module_alloc_base +
+ sizeof(module_alloc_base));
+ __flush_dcache_area((unsigned long)&memstart_offset_seed,
+ (unsigned long)&memstart_offset_seed +
+ sizeof(memstart_offset_seed));
return offset;
}
* For execution with the MMU off, reloc_code needs to be cleaned to the
* PoC and invalidated from the I-cache.
*/
- __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
+ __flush_dcache_area((unsigned long)reloc_code,
+ (unsigned long)reloc_code +
+ arm64_relocate_new_kernel_size);
invalidate_icache_range((uintptr_t)reloc_code,
(uintptr_t)reloc_code +
arm64_relocate_new_kernel_size);
for (entry = &kimage->head; ; entry++) {
unsigned int flag;
- void *addr;
+ unsigned long addr;
/* flush the list entries. */
- __flush_dcache_area(entry, sizeof(kimage_entry_t));
+ __flush_dcache_area((unsigned long)entry,
+ (unsigned long)entry +
+ sizeof(kimage_entry_t));
flag = *entry & IND_FLAGS;
if (flag == IND_DONE)
break;
- addr = phys_to_virt(*entry & PAGE_MASK);
+ addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK);
switch (flag) {
case IND_INDIRECTION:
break;
case IND_SOURCE:
/* flush the source pages. */
- __flush_dcache_area(addr, PAGE_SIZE);
+ __flush_dcache_area(addr, addr + PAGE_SIZE);
break;
case IND_DESTINATION:
break;
kimage->segment[i].memsz,
kimage->segment[i].memsz / PAGE_SIZE);
- __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
- kimage->segment[i].memsz);
+ __flush_dcache_area(
+ (unsigned long)phys_to_virt(kimage->segment[i].mem),
+ (unsigned long)phys_to_virt(kimage->segment[i].mem) +
+ kimage->segment[i].memsz);
}
}
secondary_data.task = idle;
secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
update_cpu_boot_status(CPU_MMU_OFF);
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
+ __flush_dcache_area((unsigned long)&secondary_data,
+ (unsigned long)&secondary_data +
+ sizeof(secondary_data));
/* Now bring the CPU into our world */
ret = boot_secondary(cpu, idle);
pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL;
secondary_data.stack = NULL;
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
+ __flush_dcache_area((unsigned long)&secondary_data,
+ (unsigned long)&secondary_data +
+ sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
if (status == CPU_MMU_OFF)
status = READ_ONCE(__early_cpu_boot_status);
unsigned long size = sizeof(secondary_holding_pen_release);
secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
+ __flush_dcache_area((unsigned long)start, (unsigned long)start + size);
}
* the boot protocol.
*/
writeq_relaxed(pa_holding_pen, release_addr);
- __flush_dcache_area((__force void *)release_addr,
- sizeof(*release_addr));
+ __flush_dcache_area((__force unsigned long)release_addr,
+ (__force unsigned long)release_addr +
+ sizeof(*release_addr));
/*
* Send an event to wake up the secondary CPU.
#include <asm/alternative.h>
SYM_FUNC_START_PI(__flush_dcache_area)
- add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__flush_dcache_area)
for (i = 0; i < hyp_nr_cpus; i++) {
params = per_cpu_ptr(&kvm_init_params, i);
params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
- __flush_dcache_area(params, sizeof(*params));
+ __flush_dcache_area((unsigned long)params,
+ (unsigned long)params + sizeof(*params));
}
}
stage2_put_pte(ptep, mmu, addr, level, mm_ops);
if (need_flush) {
- __flush_dcache_area(kvm_pte_follow(pte, mm_ops),
- kvm_granule_size(level));
+ kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
+
+ __flush_dcache_area((unsigned long)pte_follow,
+ (unsigned long)pte_follow +
+ kvm_granule_size(level));
}
if (childp)
struct kvm_pgtable *pgt = arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep;
+ kvm_pte_t *pte_follow;
if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
return 0;
- __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
+ pte_follow = kvm_pte_follow(pte, mm_ops);
+ __flush_dcache_area((unsigned long)pte_follow,
+ (unsigned long)pte_follow +
+ kvm_granule_size(level));
return 0;
}
SYM_FUNC_END(invalidate_icache_range)
/*
- * __flush_dcache_area(kaddr, size)
+ * __flush_dcache_area(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned and invalidated to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
SYM_FUNC_START_PI(__flush_dcache_area)
- add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__flush_dcache_area)