ARM: 9387/2: mm: Rewrite cacheflush vtables in CFI safe C
authorLinus Walleij <linus.walleij@linaro.org>
Tue, 23 Apr 2024 07:43:14 +0000 (08:43 +0100)
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Mon, 29 Apr 2024 13:14:18 +0000 (14:14 +0100)
Instead of defining all cache flush operations with an assembly
macro in proc-macros.S, provide an explicit struct cpu_cache_fns
for each CPU cache type in mm/cache.c.

As a side effect from rewriting the vtables in C, we can
avoid the aliasing for the "louis" cache callback, instead we
can just assign the NN_flush_kern_cache_all() function to the
louis callback in the C vtable.

As the louis cache callback is called explicitly (not through the
vtable) if we only have one type of cache support compiled in, we
need an ifdef quirk for this in the !MULTI_CACHE case.

Feroceon and XScale have some dma mapping quirk, in this case we
can just define two structs and assign all but one callback to the
main implementation; since each of them invoked define_cache_functions
twice they require MULTI_CACHE by definition so the compiled-in
shortcut is not used on these variants.

Tested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
27 files changed:
arch/arm/include/asm/glue-cache.h
arch/arm/mm/Makefile
arch/arm/mm/cache-b15-rac.c
arch/arm/mm/cache-fa.S
arch/arm/mm/cache-nop.S
arch/arm/mm/cache-v4.S
arch/arm/mm/cache-v4wb.S
arch/arm/mm/cache-v4wt.S
arch/arm/mm/cache-v6.S
arch/arm/mm/cache-v7.S
arch/arm/mm/cache-v7m.S
arch/arm/mm/cache.c [new file with mode: 0644]
arch/arm/mm/proc-arm1020.S
arch/arm/mm/proc-arm1020e.S
arch/arm/mm/proc-arm1022.S
arch/arm/mm/proc-arm1026.S
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm922.S
arch/arm/mm/proc-arm925.S
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-arm940.S
arch/arm/mm/proc-arm946.S
arch/arm/mm/proc-feroceon.S
arch/arm/mm/proc-macros.S
arch/arm/mm/proc-mohawk.S
arch/arm/mm/proc-xsc3.S
arch/arm/mm/proc-xscale.S

index 724f8dac1e5b1d9a32404f8e806667dd56bbfcfa..4186fbf7341fc239567b387ea2dbac762cce396b 100644 (file)
 # define MULTI_CACHE 1
 #endif
 
+#ifdef CONFIG_CPU_CACHE_NOP
+#  define MULTI_CACHE 1
+#endif
+
 #if defined(CONFIG_CPU_V7M)
 #  define MULTI_CACHE 1
 #endif
 #error Unknown cache maintenance model
 #endif
 
-#ifndef __ASSEMBLER__
-static inline void nop_flush_icache_all(void) { }
-static inline void nop_flush_kern_cache_all(void) { }
-static inline void nop_flush_kern_cache_louis(void) { }
-static inline void nop_flush_user_cache_all(void) { }
-static inline void nop_flush_user_cache_range(unsigned long a,
-               unsigned long b, unsigned int c) { }
-
-static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
-static inline int nop_coherent_user_range(unsigned long a,
-               unsigned long b) { return 0; }
-static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
-
-static inline void nop_dma_flush_range(const void *a, const void *b) { }
-
-static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
-static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
-#endif
-
 #ifndef MULTI_CACHE
 #define __cpuc_flush_icache_all                __glue(_CACHE,_flush_icache_all)
 #define __cpuc_flush_kern_all          __glue(_CACHE,_flush_kern_cache_all)
+/* This function only has a dedicated assembly callback on the v7 cache */
+#ifdef CONFIG_CPU_CACHE_V7
 #define __cpuc_flush_kern_louis                __glue(_CACHE,_flush_kern_cache_louis)
+#else
+#define __cpuc_flush_kern_louis                __glue(_CACHE,_flush_kern_cache_all)
+#endif
 #define __cpuc_flush_user_all          __glue(_CACHE,_flush_user_cache_all)
 #define __cpuc_flush_user_range                __glue(_CACHE,_flush_user_cache_range)
 #define __cpuc_coherent_kern_range     __glue(_CACHE,_coherent_kern_range)
index cc8255fdf56e2b007a4ca67b507dd55b0aeec9c6..17665381be9623ffe8e9104de79a0ec194cbae2c 100644 (file)
@@ -45,6 +45,7 @@ obj-$(CONFIG_CPU_CACHE_V7)    += cache-v7.o
 obj-$(CONFIG_CPU_CACHE_FA)     += cache-fa.o
 obj-$(CONFIG_CPU_CACHE_NOP)    += cache-nop.o
 obj-$(CONFIG_CPU_CACHE_V7M)    += cache-v7m.o
+obj-y                          += cache.o
 
 obj-$(CONFIG_CPU_COPY_V4WT)    += copypage-v4wt.o
 obj-$(CONFIG_CPU_COPY_V4WB)    += copypage-v4wb.o
index 9c1172f26885fc6bc07985773609b1bd06a162a3..6f63b90f9e1ac7ab3084f8d35136fd22acde818e 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2015-2016 Broadcom
  */
 
+#include <linux/cfi_types.h>
 #include <linux/err.h>
 #include <linux/spinlock.h>
 #include <linux/io.h>
index ecd08bf440cb89c7133e60690634e665fb99b810..db454033b76ff3c3fa7b3409859f7e7d0e9fb352 100644 (file)
@@ -243,11 +243,3 @@ SYM_FUNC_END(fa_dma_map_area)
 SYM_TYPED_FUNC_START(fa_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(fa_dma_unmap_area)
-
-       .globl  fa_flush_kern_cache_louis
-       .equ    fa_flush_kern_cache_louis, fa_flush_kern_cache_all
-
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions fa
index cd191aa9031359abe49e97a294a0533cbe174413..f68dde2014ee0315a3b88afc4af885ec67dffe33 100644 (file)
@@ -18,9 +18,6 @@ SYM_TYPED_FUNC_START(nop_flush_kern_cache_all)
        ret     lr
 SYM_FUNC_END(nop_flush_kern_cache_all)
 
-       .globl nop_flush_kern_cache_louis
-       .equ nop_flush_kern_cache_louis, nop_flush_icache_all
-
 SYM_TYPED_FUNC_START(nop_flush_user_cache_all)
        ret     lr
 SYM_FUNC_END(nop_flush_user_cache_all)
@@ -50,11 +47,6 @@ SYM_TYPED_FUNC_START(nop_dma_map_area)
        ret     lr
 SYM_FUNC_END(nop_dma_map_area)
 
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions nop
-
 SYM_TYPED_FUNC_START(nop_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(nop_dma_unmap_area)
index f7b7e498d3b61f30c3a683ca105a144389da95cd..0df97a610026b6d2e9a02262668b77d189e59f94 100644 (file)
@@ -144,11 +144,3 @@ SYM_FUNC_END(v4_dma_unmap_area)
 SYM_TYPED_FUNC_START(v4_dma_map_area)
        ret     lr
 SYM_FUNC_END(v4_dma_map_area)
-
-       .globl  v4_flush_kern_cache_louis
-       .equ    v4_flush_kern_cache_louis, v4_flush_kern_cache_all
-
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions v4
index 00488108a87369d23dde835e28039f25d492cc33..1912f559968cd9e7040f64bcd76055a1ec549be7 100644 (file)
@@ -253,11 +253,3 @@ SYM_FUNC_END(v4wb_dma_map_area)
 SYM_TYPED_FUNC_START(v4wb_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(v4wb_dma_unmap_area)
-
-       .globl  v4wb_flush_kern_cache_louis
-       .equ    v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
-
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions v4wb
index 573f65271cbb9bc456c7d60d60540c61b29a163c..43b4275ab680274ff40417e48cb312a71a50fc0d 100644 (file)
@@ -200,11 +200,3 @@ SYM_FUNC_END(v4wt_dma_unmap_area)
 SYM_TYPED_FUNC_START(v4wt_dma_map_area)
        ret     lr
 SYM_FUNC_END(v4wt_dma_map_area)
-
-       .globl  v4wt_flush_kern_cache_louis
-       .equ    v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
-
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions v4wt
index 5c7549a49db5929a318e6a430e6737df04989cc3..86affd60d6d4f057619b6766b41120414d7ad0cd 100644 (file)
@@ -298,11 +298,3 @@ SYM_TYPED_FUNC_START(v6_dma_unmap_area)
        bne     v6_dma_inv_range
        ret     lr
 SYM_FUNC_END(v6_dma_unmap_area)
-
-       .globl  v6_flush_kern_cache_louis
-       .equ    v6_flush_kern_cache_louis, v6_flush_kern_cache_all
-
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions v6
index 5908dd54de47e6fdbb31610fe96482f98268c203..170b9ac72331f21c594785777b95e8bb5d657b0e 100644 (file)
@@ -456,28 +456,3 @@ SYM_TYPED_FUNC_START(v7_dma_unmap_area)
        bne     v7_dma_inv_range
        ret     lr
 SYM_FUNC_END(v7_dma_unmap_area)
-
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions v7
-
-       /* The Broadcom Brahma-B15 read-ahead cache requires some modifications
-        * to the v7_cache_fns, we only override the ones we need
-        */
-#ifndef CONFIG_CACHE_B15_RAC
-       globl_equ       b15_flush_kern_cache_all,       v7_flush_kern_cache_all
-#endif
-       globl_equ       b15_flush_icache_all,           v7_flush_icache_all
-       globl_equ       b15_flush_kern_cache_louis,     v7_flush_kern_cache_louis
-       globl_equ       b15_flush_user_cache_all,       v7_flush_user_cache_all
-       globl_equ       b15_flush_user_cache_range,     v7_flush_user_cache_range
-       globl_equ       b15_coherent_kern_range,        v7_coherent_kern_range
-       globl_equ       b15_coherent_user_range,        v7_coherent_user_range
-       globl_equ       b15_flush_kern_dcache_area,     v7_flush_kern_dcache_area
-
-       globl_equ       b15_dma_map_area,               v7_dma_map_area
-       globl_equ       b15_dma_unmap_area,             v7_dma_unmap_area
-       globl_equ       b15_dma_flush_range,            v7_dma_flush_range
-
-       define_cache_functions b15
index 5a62b9a224e1b97ef65c87f9e5e6e1ee062e853d..4e670697eabc850857143d684ba21c7e09c2e57b 100644 (file)
@@ -447,11 +447,3 @@ SYM_TYPED_FUNC_START(v7m_dma_unmap_area)
        bne     v7m_dma_inv_range
        ret     lr
 SYM_FUNC_END(v7m_dma_unmap_area)
-
-       .globl  v7m_flush_kern_cache_louis
-       .equ    v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
-
-       __INITDATA
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions v7m
diff --git a/arch/arm/mm/cache.c b/arch/arm/mm/cache.c
new file mode 100644 (file)
index 0000000..e6fbc59
--- /dev/null
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file defines C prototypes for the low-level cache assembly functions
+ * and populates a vtable for each selected ARM CPU cache type.
+ */
+
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_CPU_CACHE_V4
+void v4_flush_icache_all(void);
+void v4_flush_kern_cache_all(void);
+void v4_flush_user_cache_all(void);
+void v4_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v4_coherent_kern_range(unsigned long, unsigned long);
+int v4_coherent_user_range(unsigned long, unsigned long);
+void v4_flush_kern_dcache_area(void *, size_t);
+void v4_dma_map_area(const void *, size_t, int);
+void v4_dma_unmap_area(const void *, size_t, int);
+void v4_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v4_cache_fns __initconst = {
+       .flush_icache_all = v4_flush_icache_all,
+       .flush_kern_all = v4_flush_kern_cache_all,
+       .flush_kern_louis = v4_flush_kern_cache_all,
+       .flush_user_all = v4_flush_user_cache_all,
+       .flush_user_range = v4_flush_user_cache_range,
+       .coherent_kern_range = v4_coherent_kern_range,
+       .coherent_user_range = v4_coherent_user_range,
+       .flush_kern_dcache_area = v4_flush_kern_dcache_area,
+       .dma_map_area = v4_dma_map_area,
+       .dma_unmap_area = v4_dma_unmap_area,
+       .dma_flush_range = v4_dma_flush_range,
+};
+#endif
+
+/* V4 write-back cache "V4WB" */
+#ifdef CONFIG_CPU_CACHE_V4WB
+void v4wb_flush_icache_all(void);
+void v4wb_flush_kern_cache_all(void);
+void v4wb_flush_user_cache_all(void);
+void v4wb_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v4wb_coherent_kern_range(unsigned long, unsigned long);
+int v4wb_coherent_user_range(unsigned long, unsigned long);
+void v4wb_flush_kern_dcache_area(void *, size_t);
+void v4wb_dma_map_area(const void *, size_t, int);
+void v4wb_dma_unmap_area(const void *, size_t, int);
+void v4wb_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v4wb_cache_fns __initconst = {
+       .flush_icache_all = v4wb_flush_icache_all,
+       .flush_kern_all = v4wb_flush_kern_cache_all,
+       .flush_kern_louis = v4wb_flush_kern_cache_all,
+       .flush_user_all = v4wb_flush_user_cache_all,
+       .flush_user_range = v4wb_flush_user_cache_range,
+       .coherent_kern_range = v4wb_coherent_kern_range,
+       .coherent_user_range = v4wb_coherent_user_range,
+       .flush_kern_dcache_area = v4wb_flush_kern_dcache_area,
+       .dma_map_area = v4wb_dma_map_area,
+       .dma_unmap_area = v4wb_dma_unmap_area,
+       .dma_flush_range = v4wb_dma_flush_range,
+};
+#endif
+
+/* V4 write-through cache "V4WT" */
+#ifdef CONFIG_CPU_CACHE_V4WT
+void v4wt_flush_icache_all(void);
+void v4wt_flush_kern_cache_all(void);
+void v4wt_flush_user_cache_all(void);
+void v4wt_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v4wt_coherent_kern_range(unsigned long, unsigned long);
+int v4wt_coherent_user_range(unsigned long, unsigned long);
+void v4wt_flush_kern_dcache_area(void *, size_t);
+void v4wt_dma_map_area(const void *, size_t, int);
+void v4wt_dma_unmap_area(const void *, size_t, int);
+void v4wt_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v4wt_cache_fns __initconst = {
+       .flush_icache_all = v4wt_flush_icache_all,
+       .flush_kern_all = v4wt_flush_kern_cache_all,
+       .flush_kern_louis = v4wt_flush_kern_cache_all,
+       .flush_user_all = v4wt_flush_user_cache_all,
+       .flush_user_range = v4wt_flush_user_cache_range,
+       .coherent_kern_range = v4wt_coherent_kern_range,
+       .coherent_user_range = v4wt_coherent_user_range,
+       .flush_kern_dcache_area = v4wt_flush_kern_dcache_area,
+       .dma_map_area = v4wt_dma_map_area,
+       .dma_unmap_area = v4wt_dma_unmap_area,
+       .dma_flush_range = v4wt_dma_flush_range,
+};
+#endif
+
+/* Faraday FA526 cache */
+#ifdef CONFIG_CPU_CACHE_FA
+void fa_flush_icache_all(void);
+void fa_flush_kern_cache_all(void);
+void fa_flush_user_cache_all(void);
+void fa_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void fa_coherent_kern_range(unsigned long, unsigned long);
+int fa_coherent_user_range(unsigned long, unsigned long);
+void fa_flush_kern_dcache_area(void *, size_t);
+void fa_dma_map_area(const void *, size_t, int);
+void fa_dma_unmap_area(const void *, size_t, int);
+void fa_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns fa_cache_fns __initconst = {
+       .flush_icache_all = fa_flush_icache_all,
+       .flush_kern_all = fa_flush_kern_cache_all,
+       .flush_kern_louis = fa_flush_kern_cache_all,
+       .flush_user_all = fa_flush_user_cache_all,
+       .flush_user_range = fa_flush_user_cache_range,
+       .coherent_kern_range = fa_coherent_kern_range,
+       .coherent_user_range = fa_coherent_user_range,
+       .flush_kern_dcache_area = fa_flush_kern_dcache_area,
+       .dma_map_area = fa_dma_map_area,
+       .dma_unmap_area = fa_dma_unmap_area,
+       .dma_flush_range = fa_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_CACHE_V6
+void v6_flush_icache_all(void);
+void v6_flush_kern_cache_all(void);
+void v6_flush_user_cache_all(void);
+void v6_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v6_coherent_kern_range(unsigned long, unsigned long);
+int v6_coherent_user_range(unsigned long, unsigned long);
+void v6_flush_kern_dcache_area(void *, size_t);
+void v6_dma_map_area(const void *, size_t, int);
+void v6_dma_unmap_area(const void *, size_t, int);
+void v6_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v6_cache_fns __initconst = {
+       .flush_icache_all = v6_flush_icache_all,
+       .flush_kern_all = v6_flush_kern_cache_all,
+       .flush_kern_louis = v6_flush_kern_cache_all,
+       .flush_user_all = v6_flush_user_cache_all,
+       .flush_user_range = v6_flush_user_cache_range,
+       .coherent_kern_range = v6_coherent_kern_range,
+       .coherent_user_range = v6_coherent_user_range,
+       .flush_kern_dcache_area = v6_flush_kern_dcache_area,
+       .dma_map_area = v6_dma_map_area,
+       .dma_unmap_area = v6_dma_unmap_area,
+       .dma_flush_range = v6_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_CACHE_V7
+void v7_flush_icache_all(void);
+void v7_flush_kern_cache_all(void);
+void v7_flush_kern_cache_louis(void);
+void v7_flush_user_cache_all(void);
+void v7_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v7_coherent_kern_range(unsigned long, unsigned long);
+int v7_coherent_user_range(unsigned long, unsigned long);
+void v7_flush_kern_dcache_area(void *, size_t);
+void v7_dma_map_area(const void *, size_t, int);
+void v7_dma_unmap_area(const void *, size_t, int);
+void v7_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v7_cache_fns __initconst = {
+       .flush_icache_all = v7_flush_icache_all,
+       .flush_kern_all = v7_flush_kern_cache_all,
+       .flush_kern_louis = v7_flush_kern_cache_louis,
+       .flush_user_all = v7_flush_user_cache_all,
+       .flush_user_range = v7_flush_user_cache_range,
+       .coherent_kern_range = v7_coherent_kern_range,
+       .coherent_user_range = v7_coherent_user_range,
+       .flush_kern_dcache_area = v7_flush_kern_dcache_area,
+       .dma_map_area = v7_dma_map_area,
+       .dma_unmap_area = v7_dma_unmap_area,
+       .dma_flush_range = v7_dma_flush_range,
+};
+
+/* Special quirky cache flush function for Broadcom B15 v7 caches */
+void b15_flush_kern_cache_all(void);
+
+struct cpu_cache_fns b15_cache_fns __initconst = {
+       .flush_icache_all = v7_flush_icache_all,
+#ifdef CONFIG_CACHE_B15_RAC
+       .flush_kern_all = b15_flush_kern_cache_all,
+#else
+       .flush_kern_all = v7_flush_kern_cache_all,
+#endif
+       .flush_kern_louis = v7_flush_kern_cache_louis,
+       .flush_user_all = v7_flush_user_cache_all,
+       .flush_user_range = v7_flush_user_cache_range,
+       .coherent_kern_range = v7_coherent_kern_range,
+       .coherent_user_range = v7_coherent_user_range,
+       .flush_kern_dcache_area = v7_flush_kern_dcache_area,
+       .dma_map_area = v7_dma_map_area,
+       .dma_unmap_area = v7_dma_unmap_area,
+       .dma_flush_range = v7_dma_flush_range,
+};
+#endif
+
+/* The NOP cache is just a set of dummy stubs that by definition does nothing */
+#ifdef CONFIG_CPU_CACHE_NOP
+void nop_flush_icache_all(void);
+void nop_flush_kern_cache_all(void);
+void nop_flush_user_cache_all(void);
+void nop_flush_user_cache_range(unsigned long start, unsigned long end, unsigned int flags);
+void nop_coherent_kern_range(unsigned long start, unsigned long end);
+int nop_coherent_user_range(unsigned long, unsigned long);
+void nop_flush_kern_dcache_area(void *kaddr, size_t size);
+void nop_dma_map_area(const void *start, size_t size, int flags);
+void nop_dma_unmap_area(const void *start, size_t size, int flags);
+void nop_dma_flush_range(const void *start, const void *end);
+
+struct cpu_cache_fns nop_cache_fns __initconst = {
+       .flush_icache_all = nop_flush_icache_all,
+       .flush_kern_all = nop_flush_kern_cache_all,
+       .flush_kern_louis = nop_flush_kern_cache_all,
+       .flush_user_all = nop_flush_user_cache_all,
+       .flush_user_range = nop_flush_user_cache_range,
+       .coherent_kern_range = nop_coherent_kern_range,
+       .coherent_user_range = nop_coherent_user_range,
+       .flush_kern_dcache_area = nop_flush_kern_dcache_area,
+       .dma_map_area = nop_dma_map_area,
+       .dma_unmap_area = nop_dma_unmap_area,
+       .dma_flush_range = nop_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_CACHE_V7M
+void v7m_flush_icache_all(void);
+void v7m_flush_kern_cache_all(void);
+void v7m_flush_user_cache_all(void);
+void v7m_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v7m_coherent_kern_range(unsigned long, unsigned long);
+int v7m_coherent_user_range(unsigned long, unsigned long);
+void v7m_flush_kern_dcache_area(void *, size_t);
+void v7m_dma_map_area(const void *, size_t, int);
+void v7m_dma_unmap_area(const void *, size_t, int);
+void v7m_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v7m_cache_fns __initconst = {
+       .flush_icache_all = v7m_flush_icache_all,
+       .flush_kern_all = v7m_flush_kern_cache_all,
+       .flush_kern_louis = v7m_flush_kern_cache_all,
+       .flush_user_all = v7m_flush_user_cache_all,
+       .flush_user_range = v7m_flush_user_cache_range,
+       .coherent_kern_range = v7m_coherent_kern_range,
+       .coherent_user_range = v7m_coherent_user_range,
+       .flush_kern_dcache_area = v7m_flush_kern_dcache_area,
+       .dma_map_area = v7m_dma_map_area,
+       .dma_unmap_area = v7m_dma_unmap_area,
+       .dma_flush_range = v7m_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1020
+void arm1020_flush_icache_all(void);
+void arm1020_flush_kern_cache_all(void);
+void arm1020_flush_user_cache_all(void);
+void arm1020_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1020_coherent_kern_range(unsigned long, unsigned long);
+int arm1020_coherent_user_range(unsigned long, unsigned long);
+void arm1020_flush_kern_dcache_area(void *, size_t);
+void arm1020_dma_map_area(const void *, size_t, int);
+void arm1020_dma_unmap_area(const void *, size_t, int);
+void arm1020_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1020_cache_fns __initconst = {
+       .flush_icache_all = arm1020_flush_icache_all,
+       .flush_kern_all = arm1020_flush_kern_cache_all,
+       .flush_kern_louis = arm1020_flush_kern_cache_all,
+       .flush_user_all = arm1020_flush_user_cache_all,
+       .flush_user_range = arm1020_flush_user_cache_range,
+       .coherent_kern_range = arm1020_coherent_kern_range,
+       .coherent_user_range = arm1020_coherent_user_range,
+       .flush_kern_dcache_area = arm1020_flush_kern_dcache_area,
+       .dma_map_area = arm1020_dma_map_area,
+       .dma_unmap_area = arm1020_dma_unmap_area,
+       .dma_flush_range = arm1020_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1020E
+void arm1020e_flush_icache_all(void);
+void arm1020e_flush_kern_cache_all(void);
+void arm1020e_flush_user_cache_all(void);
+void arm1020e_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1020e_coherent_kern_range(unsigned long, unsigned long);
+int arm1020e_coherent_user_range(unsigned long, unsigned long);
+void arm1020e_flush_kern_dcache_area(void *, size_t);
+void arm1020e_dma_map_area(const void *, size_t, int);
+void arm1020e_dma_unmap_area(const void *, size_t, int);
+void arm1020e_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1020e_cache_fns __initconst = {
+       .flush_icache_all = arm1020e_flush_icache_all,
+       .flush_kern_all = arm1020e_flush_kern_cache_all,
+       .flush_kern_louis = arm1020e_flush_kern_cache_all,
+       .flush_user_all = arm1020e_flush_user_cache_all,
+       .flush_user_range = arm1020e_flush_user_cache_range,
+       .coherent_kern_range = arm1020e_coherent_kern_range,
+       .coherent_user_range = arm1020e_coherent_user_range,
+       .flush_kern_dcache_area = arm1020e_flush_kern_dcache_area,
+       .dma_map_area = arm1020e_dma_map_area,
+       .dma_unmap_area = arm1020e_dma_unmap_area,
+       .dma_flush_range = arm1020e_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1022
+void arm1022_flush_icache_all(void);
+void arm1022_flush_kern_cache_all(void);
+void arm1022_flush_user_cache_all(void);
+void arm1022_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1022_coherent_kern_range(unsigned long, unsigned long);
+int arm1022_coherent_user_range(unsigned long, unsigned long);
+void arm1022_flush_kern_dcache_area(void *, size_t);
+void arm1022_dma_map_area(const void *, size_t, int);
+void arm1022_dma_unmap_area(const void *, size_t, int);
+void arm1022_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1022_cache_fns __initconst = {
+       .flush_icache_all = arm1022_flush_icache_all,
+       .flush_kern_all = arm1022_flush_kern_cache_all,
+       .flush_kern_louis = arm1022_flush_kern_cache_all,
+       .flush_user_all = arm1022_flush_user_cache_all,
+       .flush_user_range = arm1022_flush_user_cache_range,
+       .coherent_kern_range = arm1022_coherent_kern_range,
+       .coherent_user_range = arm1022_coherent_user_range,
+       .flush_kern_dcache_area = arm1022_flush_kern_dcache_area,
+       .dma_map_area = arm1022_dma_map_area,
+       .dma_unmap_area = arm1022_dma_unmap_area,
+       .dma_flush_range = arm1022_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1026
+void arm1026_flush_icache_all(void);
+void arm1026_flush_kern_cache_all(void);
+void arm1026_flush_user_cache_all(void);
+void arm1026_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1026_coherent_kern_range(unsigned long, unsigned long);
+int arm1026_coherent_user_range(unsigned long, unsigned long);
+void arm1026_flush_kern_dcache_area(void *, size_t);
+void arm1026_dma_map_area(const void *, size_t, int);
+void arm1026_dma_unmap_area(const void *, size_t, int);
+void arm1026_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1026_cache_fns __initconst = {
+       .flush_icache_all = arm1026_flush_icache_all,
+       .flush_kern_all = arm1026_flush_kern_cache_all,
+       .flush_kern_louis = arm1026_flush_kern_cache_all,
+       .flush_user_all = arm1026_flush_user_cache_all,
+       .flush_user_range = arm1026_flush_user_cache_range,
+       .coherent_kern_range = arm1026_coherent_kern_range,
+       .coherent_user_range = arm1026_coherent_user_range,
+       .flush_kern_dcache_area = arm1026_flush_kern_dcache_area,
+       .dma_map_area = arm1026_dma_map_area,
+       .dma_unmap_area = arm1026_dma_unmap_area,
+       .dma_flush_range = arm1026_dma_flush_range,
+};
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+void arm920_flush_icache_all(void);
+void arm920_flush_kern_cache_all(void);
+void arm920_flush_user_cache_all(void);
+void arm920_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm920_coherent_kern_range(unsigned long, unsigned long);
+int arm920_coherent_user_range(unsigned long, unsigned long);
+void arm920_flush_kern_dcache_area(void *, size_t);
+void arm920_dma_map_area(const void *, size_t, int);
+void arm920_dma_unmap_area(const void *, size_t, int);
+void arm920_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm920_cache_fns __initconst = {
+       .flush_icache_all = arm920_flush_icache_all,
+       .flush_kern_all = arm920_flush_kern_cache_all,
+       .flush_kern_louis = arm920_flush_kern_cache_all,
+       .flush_user_all = arm920_flush_user_cache_all,
+       .flush_user_range = arm920_flush_user_cache_range,
+       .coherent_kern_range = arm920_coherent_kern_range,
+       .coherent_user_range = arm920_coherent_user_range,
+       .flush_kern_dcache_area = arm920_flush_kern_dcache_area,
+       .dma_map_area = arm920_dma_map_area,
+       .dma_unmap_area = arm920_dma_unmap_area,
+       .dma_flush_range = arm920_dma_flush_range,
+};
+#endif
+
+#if defined(CONFIG_CPU_ARM922T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+void arm922_flush_icache_all(void);
+void arm922_flush_kern_cache_all(void);
+void arm922_flush_user_cache_all(void);
+void arm922_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm922_coherent_kern_range(unsigned long, unsigned long);
+int arm922_coherent_user_range(unsigned long, unsigned long);
+void arm922_flush_kern_dcache_area(void *, size_t);
+void arm922_dma_map_area(const void *, size_t, int);
+void arm922_dma_unmap_area(const void *, size_t, int);
+void arm922_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm922_cache_fns __initconst = {
+       .flush_icache_all = arm922_flush_icache_all,
+       .flush_kern_all = arm922_flush_kern_cache_all,
+       .flush_kern_louis = arm922_flush_kern_cache_all,
+       .flush_user_all = arm922_flush_user_cache_all,
+       .flush_user_range = arm922_flush_user_cache_range,
+       .coherent_kern_range = arm922_coherent_kern_range,
+       .coherent_user_range = arm922_coherent_user_range,
+       .flush_kern_dcache_area = arm922_flush_kern_dcache_area,
+       .dma_map_area = arm922_dma_map_area,
+       .dma_unmap_area = arm922_dma_unmap_area,
+       .dma_flush_range = arm922_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM925T
+void arm925_flush_icache_all(void);
+void arm925_flush_kern_cache_all(void);
+void arm925_flush_user_cache_all(void);
+void arm925_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm925_coherent_kern_range(unsigned long, unsigned long);
+int arm925_coherent_user_range(unsigned long, unsigned long);
+void arm925_flush_kern_dcache_area(void *, size_t);
+void arm925_dma_map_area(const void *, size_t, int);
+void arm925_dma_unmap_area(const void *, size_t, int);
+void arm925_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm925_cache_fns __initconst = {
+       .flush_icache_all = arm925_flush_icache_all,
+       .flush_kern_all = arm925_flush_kern_cache_all,
+       .flush_kern_louis = arm925_flush_kern_cache_all,
+       .flush_user_all = arm925_flush_user_cache_all,
+       .flush_user_range = arm925_flush_user_cache_range,
+       .coherent_kern_range = arm925_coherent_kern_range,
+       .coherent_user_range = arm925_coherent_user_range,
+       .flush_kern_dcache_area = arm925_flush_kern_dcache_area,
+       .dma_map_area = arm925_dma_map_area,
+       .dma_unmap_area = arm925_dma_unmap_area,
+       .dma_flush_range = arm925_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM926T
+void arm926_flush_icache_all(void);
+void arm926_flush_kern_cache_all(void);
+void arm926_flush_user_cache_all(void);
+void arm926_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm926_coherent_kern_range(unsigned long, unsigned long);
+int arm926_coherent_user_range(unsigned long, unsigned long);
+void arm926_flush_kern_dcache_area(void *, size_t);
+void arm926_dma_map_area(const void *, size_t, int);
+void arm926_dma_unmap_area(const void *, size_t, int);
+void arm926_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm926_cache_fns __initconst = {
+       .flush_icache_all = arm926_flush_icache_all,
+       .flush_kern_all = arm926_flush_kern_cache_all,
+       .flush_kern_louis = arm926_flush_kern_cache_all,
+       .flush_user_all = arm926_flush_user_cache_all,
+       .flush_user_range = arm926_flush_user_cache_range,
+       .coherent_kern_range = arm926_coherent_kern_range,
+       .coherent_user_range = arm926_coherent_user_range,
+       .flush_kern_dcache_area = arm926_flush_kern_dcache_area,
+       .dma_map_area = arm926_dma_map_area,
+       .dma_unmap_area = arm926_dma_unmap_area,
+       .dma_flush_range = arm926_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM940T
+void arm940_flush_icache_all(void);
+void arm940_flush_kern_cache_all(void);
+void arm940_flush_user_cache_all(void);
+void arm940_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm940_coherent_kern_range(unsigned long, unsigned long);
+int arm940_coherent_user_range(unsigned long, unsigned long);
+void arm940_flush_kern_dcache_area(void *, size_t);
+void arm940_dma_map_area(const void *, size_t, int);
+void arm940_dma_unmap_area(const void *, size_t, int);
+void arm940_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm940_cache_fns __initconst = {
+       .flush_icache_all = arm940_flush_icache_all,
+       .flush_kern_all = arm940_flush_kern_cache_all,
+       .flush_kern_louis = arm940_flush_kern_cache_all,
+       .flush_user_all = arm940_flush_user_cache_all,
+       .flush_user_range = arm940_flush_user_cache_range,
+       .coherent_kern_range = arm940_coherent_kern_range,
+       .coherent_user_range = arm940_coherent_user_range,
+       .flush_kern_dcache_area = arm940_flush_kern_dcache_area,
+       .dma_map_area = arm940_dma_map_area,
+       .dma_unmap_area = arm940_dma_unmap_area,
+       .dma_flush_range = arm940_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM946E
+void arm946_flush_icache_all(void);
+void arm946_flush_kern_cache_all(void);
+void arm946_flush_user_cache_all(void);
+void arm946_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm946_coherent_kern_range(unsigned long, unsigned long);
+int arm946_coherent_user_range(unsigned long, unsigned long);
+void arm946_flush_kern_dcache_area(void *, size_t);
+void arm946_dma_map_area(const void *, size_t, int);
+void arm946_dma_unmap_area(const void *, size_t, int);
+void arm946_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm946_cache_fns __initconst = {
+       .flush_icache_all = arm946_flush_icache_all,
+       .flush_kern_all = arm946_flush_kern_cache_all,
+       .flush_kern_louis = arm946_flush_kern_cache_all,
+       .flush_user_all = arm946_flush_user_cache_all,
+       .flush_user_range = arm946_flush_user_cache_range,
+       .coherent_kern_range = arm946_coherent_kern_range,
+       .coherent_user_range = arm946_coherent_user_range,
+       .flush_kern_dcache_area = arm946_flush_kern_dcache_area,
+       .dma_map_area = arm946_dma_map_area,
+       .dma_unmap_area = arm946_dma_unmap_area,
+       .dma_flush_range = arm946_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_XSCALE
+void xscale_flush_icache_all(void);
+void xscale_flush_kern_cache_all(void);
+void xscale_flush_user_cache_all(void);
+void xscale_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void xscale_coherent_kern_range(unsigned long, unsigned long);
+int xscale_coherent_user_range(unsigned long, unsigned long);
+void xscale_flush_kern_dcache_area(void *, size_t);
+void xscale_dma_map_area(const void *, size_t, int);
+void xscale_dma_unmap_area(const void *, size_t, int);
+void xscale_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns xscale_cache_fns __initconst = {
+       .flush_icache_all = xscale_flush_icache_all,
+       .flush_kern_all = xscale_flush_kern_cache_all,
+       .flush_kern_louis = xscale_flush_kern_cache_all,
+       .flush_user_all = xscale_flush_user_cache_all,
+       .flush_user_range = xscale_flush_user_cache_range,
+       .coherent_kern_range = xscale_coherent_kern_range,
+       .coherent_user_range = xscale_coherent_user_range,
+       .flush_kern_dcache_area = xscale_flush_kern_dcache_area,
+       .dma_map_area = xscale_dma_map_area,
+       .dma_unmap_area = xscale_dma_unmap_area,
+       .dma_flush_range = xscale_dma_flush_range,
+};
+
+/* The 80200 A0 and A1 need a special quirk for dma_map_area() */
+void xscale_80200_A0_A1_dma_map_area(const void *, size_t, int);
+
+struct cpu_cache_fns xscale_80200_A0_A1_cache_fns __initconst = {
+       .flush_icache_all = xscale_flush_icache_all,
+       .flush_kern_all = xscale_flush_kern_cache_all,
+       .flush_kern_louis = xscale_flush_kern_cache_all,
+       .flush_user_all = xscale_flush_user_cache_all,
+       .flush_user_range = xscale_flush_user_cache_range,
+       .coherent_kern_range = xscale_coherent_kern_range,
+       .coherent_user_range = xscale_coherent_user_range,
+       .flush_kern_dcache_area = xscale_flush_kern_dcache_area,
+       .dma_map_area = xscale_80200_A0_A1_dma_map_area,
+       .dma_unmap_area = xscale_dma_unmap_area,
+       .dma_flush_range = xscale_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_XSC3
+void xsc3_flush_icache_all(void);
+void xsc3_flush_kern_cache_all(void);
+void xsc3_flush_user_cache_all(void);
+void xsc3_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void xsc3_coherent_kern_range(unsigned long, unsigned long);
+int xsc3_coherent_user_range(unsigned long, unsigned long);
+void xsc3_flush_kern_dcache_area(void *, size_t);
+void xsc3_dma_map_area(const void *, size_t, int);
+void xsc3_dma_unmap_area(const void *, size_t, int);
+void xsc3_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns xsc3_cache_fns __initconst = {
+       .flush_icache_all = xsc3_flush_icache_all,
+       .flush_kern_all = xsc3_flush_kern_cache_all,
+       .flush_kern_louis = xsc3_flush_kern_cache_all,
+       .flush_user_all = xsc3_flush_user_cache_all,
+       .flush_user_range = xsc3_flush_user_cache_range,
+       .coherent_kern_range = xsc3_coherent_kern_range,
+       .coherent_user_range = xsc3_coherent_user_range,
+       .flush_kern_dcache_area = xsc3_flush_kern_dcache_area,
+       .dma_map_area = xsc3_dma_map_area,
+       .dma_unmap_area = xsc3_dma_unmap_area,
+       .dma_flush_range = xsc3_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_MOHAWK
+void mohawk_flush_icache_all(void);
+void mohawk_flush_kern_cache_all(void);
+void mohawk_flush_user_cache_all(void);
+void mohawk_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void mohawk_coherent_kern_range(unsigned long, unsigned long);
+int mohawk_coherent_user_range(unsigned long, unsigned long);
+void mohawk_flush_kern_dcache_area(void *, size_t);
+void mohawk_dma_map_area(const void *, size_t, int);
+void mohawk_dma_unmap_area(const void *, size_t, int);
+void mohawk_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns mohawk_cache_fns __initconst = {
+       .flush_icache_all = mohawk_flush_icache_all,
+       .flush_kern_all = mohawk_flush_kern_cache_all,
+       .flush_kern_louis = mohawk_flush_kern_cache_all,
+       .flush_user_all = mohawk_flush_user_cache_all,
+       .flush_user_range = mohawk_flush_user_cache_range,
+       .coherent_kern_range = mohawk_coherent_kern_range,
+       .coherent_user_range = mohawk_coherent_user_range,
+       .flush_kern_dcache_area = mohawk_flush_kern_dcache_area,
+       .dma_map_area = mohawk_dma_map_area,
+       .dma_unmap_area = mohawk_dma_unmap_area,
+       .dma_flush_range = mohawk_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_FEROCEON
+void feroceon_flush_icache_all(void);
+void feroceon_flush_kern_cache_all(void);
+void feroceon_flush_user_cache_all(void);
+void feroceon_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void feroceon_coherent_kern_range(unsigned long, unsigned long);
+int feroceon_coherent_user_range(unsigned long, unsigned long);
+void feroceon_flush_kern_dcache_area(void *, size_t);
+void feroceon_dma_map_area(const void *, size_t, int);
+void feroceon_dma_unmap_area(const void *, size_t, int);
+void feroceon_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns feroceon_cache_fns __initconst = {
+       .flush_icache_all = feroceon_flush_icache_all,
+       .flush_kern_all = feroceon_flush_kern_cache_all,
+       .flush_kern_louis = feroceon_flush_kern_cache_all,
+       .flush_user_all = feroceon_flush_user_cache_all,
+       .flush_user_range = feroceon_flush_user_cache_range,
+       .coherent_kern_range = feroceon_coherent_kern_range,
+       .coherent_user_range = feroceon_coherent_user_range,
+       .flush_kern_dcache_area = feroceon_flush_kern_dcache_area,
+       .dma_map_area = feroceon_dma_map_area,
+       .dma_unmap_area = feroceon_dma_unmap_area,
+       .dma_flush_range = feroceon_dma_flush_range,
+};
+
+void feroceon_range_flush_kern_dcache_area(void *, size_t);
+void feroceon_range_dma_map_area(const void *, size_t, int);
+void feroceon_range_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns feroceon_range_cache_fns __initconst = {
+       .flush_icache_all = feroceon_flush_icache_all,
+       .flush_kern_all = feroceon_flush_kern_cache_all,
+       .flush_kern_louis = feroceon_flush_kern_cache_all,
+       .flush_user_all = feroceon_flush_user_cache_all,
+       .flush_user_range = feroceon_flush_user_cache_range,
+       .coherent_kern_range = feroceon_coherent_kern_range,
+       .coherent_user_range = feroceon_coherent_user_range,
+       .flush_kern_dcache_area = feroceon_range_flush_kern_dcache_area,
+       .dma_map_area = feroceon_range_dma_map_area,
+       .dma_unmap_area = feroceon_dma_unmap_area,
+       .dma_flush_range = feroceon_range_dma_flush_range,
+};
+#endif
index 2c873e65a5b93236c6717abf48b44a4ba9c08605..d0a57ff7846a69f6cee96eb9f9ec0fc4437a1e5d 100644 (file)
@@ -359,12 +359,6 @@ SYM_TYPED_FUNC_START(arm1020_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm1020_dma_unmap_area)
 
-       .globl  arm1020_flush_kern_cache_louis
-       .equ    arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm1020
-
        .align  5
 ENTRY(cpu_arm1020_dcache_clean_area)
 #ifndef CONFIG_CPU_DCACHE_DISABLE
index d8217737a60b3878c25f544be6925f5957bb960e..f636f42fde33fafbc81da6b3efd6bbae75a2403d 100644 (file)
@@ -346,12 +346,6 @@ SYM_TYPED_FUNC_START(arm1020e_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm1020e_dma_unmap_area)
 
-       .globl  arm1020e_flush_kern_cache_louis
-       .equ    arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm1020e
-
        .align  5
 ENTRY(cpu_arm1020e_dcache_clean_area)
 #ifndef CONFIG_CPU_DCACHE_DISABLE
index 5348cebe7e7157463af92b19fb9c0fa14bb0c341..b5f40858458d2a2703423b899ddcd89d7f4f059f 100644 (file)
@@ -345,12 +345,6 @@ SYM_TYPED_FUNC_START(arm1022_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm1022_dma_unmap_area)
 
-       .globl  arm1022_flush_kern_cache_louis
-       .equ    arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm1022
-
        .align  5
 ENTRY(cpu_arm1022_dcache_clean_area)
 #ifndef CONFIG_CPU_DCACHE_DISABLE
index 466f861d750c25e5ec2cf88260f1d878f08d91b8..505faa86ea09b4558de34b9dd83aaaec40a61fdd 100644 (file)
@@ -340,12 +340,6 @@ SYM_TYPED_FUNC_START(arm1026_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm1026_dma_unmap_area)
 
-       .globl  arm1026_flush_kern_cache_louis
-       .equ    arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm1026
-
        .align  5
 ENTRY(cpu_arm1026_dcache_clean_area)
 #ifndef CONFIG_CPU_DCACHE_DISABLE
index 6a6e26850a54e264a553bec07bb284f1e357bae3..c25e9dd0b3dd7696b59bfcf988058a47fa8db69b 100644 (file)
@@ -311,11 +311,6 @@ SYM_TYPED_FUNC_START(arm920_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm920_dma_unmap_area)
 
-       .globl  arm920_flush_kern_cache_louis
-       .equ    arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm920
 #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
 
 
index 56230e547762f9bae1ff09f4b89087dc62a165b0..bd9bdcd68c1216c95e649e6f95573c873e22b804 100644 (file)
@@ -313,12 +313,6 @@ SYM_TYPED_FUNC_START(arm922_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm922_dma_unmap_area)
 
-       .globl  arm922_flush_kern_cache_louis
-       .equ    arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm922
-
 #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
 
 ENTRY(cpu_arm922_dcache_clean_area)
index aeb4ee16dcc98dc13af2ff5c69d44e3efe7abc38..23477a509e48798dbcbbb75f6c02dede6b226a50 100644 (file)
@@ -368,12 +368,6 @@ SYM_TYPED_FUNC_START(arm925_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm925_dma_unmap_area)
 
-       .globl  arm925_flush_kern_cache_louis
-       .equ    arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm925
-
 ENTRY(cpu_arm925_dcache_clean_area)
 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
index 221aada5d2887418b93958ef4a4ee5af82bd2b63..335d18c850fc9aa06a3561c8f217599ba82689a6 100644 (file)
@@ -331,12 +331,6 @@ SYM_TYPED_FUNC_START(arm926_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm926_dma_unmap_area)
 
-       .globl  arm926_flush_kern_cache_louis
-       .equ    arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm926
-
 ENTRY(cpu_arm926_dcache_clean_area)
 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
index 23646bf801e16aa3d2e1b39b9e856b3a62cdf728..a429b06aa21206ba4849d5f1cc8a4052b8b0e10b 100644 (file)
@@ -269,12 +269,6 @@ SYM_TYPED_FUNC_START(arm940_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm940_dma_unmap_area)
 
-       .globl  arm940_flush_kern_cache_louis
-       .equ    arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm940
-
        .type   __arm940_setup, #function
 __arm940_setup:
        mov     r0, #0
index 763a5a9bc73560fa302f88823a41e72c97ef3816..a9d92380a5ac5a041b094ba86e6ee04b13cdfe07 100644 (file)
@@ -312,12 +312,6 @@ SYM_TYPED_FUNC_START(arm946_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(arm946_dma_unmap_area)
 
-       .globl  arm946_flush_kern_cache_louis
-       .equ    arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions arm946
-
 ENTRY(cpu_arm946_dcache_clean_area)
 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
index d31fe58bf475f82f1e30391b270e980490958742..73f355cf2f892a0c8b26649455406f13235a046a 100644 (file)
@@ -414,33 +414,6 @@ SYM_TYPED_FUNC_START(feroceon_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(feroceon_dma_unmap_area)
 
-       .globl  feroceon_flush_kern_cache_louis
-       .equ    feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions feroceon
-
-.macro range_alias basename
-       .globl feroceon_range_\basename
-       .type feroceon_range_\basename , %function
-       .equ feroceon_range_\basename , feroceon_\basename
-.endm
-
-/*
- * Most of the cache functions are unchanged for this case.
- * Export suitable alias symbols for the unchanged functions:
- */
-       range_alias flush_icache_all
-       range_alias flush_user_cache_all
-       range_alias flush_kern_cache_all
-       range_alias flush_kern_cache_louis
-       range_alias flush_user_cache_range
-       range_alias coherent_kern_range
-       range_alias coherent_user_range
-       range_alias dma_unmap_area
-
-       define_cache_functions feroceon_range
-
        .align  5
 ENTRY(cpu_feroceon_dcache_clean_area)
 #if defined(CONFIG_CACHE_FEROCEON_L2) && \
index c0acfeac3e84550a9e5cef1edfbab61dee7770d9..e388c4cc0c4448bdeca91c97501e869aa275389c 100644 (file)
@@ -320,24 +320,6 @@ ENTRY(\name\()_processor_functions)
 #endif
 .endm
 
-.macro define_cache_functions name:req
-       .align 2
-       .type   \name\()_cache_fns, #object
-ENTRY(\name\()_cache_fns)
-       .long   \name\()_flush_icache_all
-       .long   \name\()_flush_kern_cache_all
-       .long   \name\()_flush_kern_cache_louis
-       .long   \name\()_flush_user_cache_all
-       .long   \name\()_flush_user_cache_range
-       .long   \name\()_coherent_kern_range
-       .long   \name\()_coherent_user_range
-       .long   \name\()_flush_kern_dcache_area
-       .long   \name\()_dma_map_area
-       .long   \name\()_dma_unmap_area
-       .long   \name\()_dma_flush_range
-       .size   \name\()_cache_fns, . - \name\()_cache_fns
-.endm
-
 .macro globl_equ x, y
        .globl  \x
        .equ    \x, \y
index 949e2f254779d1aa23f599f9eb2427b1e450c444..5361bba617515ce53b74014a2651932f411bb0de 100644 (file)
@@ -296,12 +296,6 @@ SYM_TYPED_FUNC_START(mohawk_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(mohawk_dma_unmap_area)
 
-       .globl  mohawk_flush_kern_cache_louis
-       .equ    mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions mohawk
-
 ENTRY(cpu_mohawk_dcache_clean_area)
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, #CACHE_DLINESIZE
index ac3d99b49cc0363cf0f4a83d2c3d113aeca8edb5..8a7b8adf74846c3fd01a9f7155c1c17b93fca453 100644 (file)
@@ -341,12 +341,6 @@ SYM_TYPED_FUNC_START(xsc3_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(xsc3_dma_unmap_area)
 
-       .globl  xsc3_flush_kern_cache_louis
-       .equ    xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions xsc3
-
 ENTRY(cpu_xsc3_dcache_clean_area)
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean L1 D line
        add     r0, r0, #CACHELINESIZE
index 05d9ed952983164e6fca6c60b971e7c3eb39d3b7..bbf1e94ba554d44a1b073d9e479b2a9c5d7e1eec 100644 (file)
@@ -391,6 +391,20 @@ SYM_TYPED_FUNC_START(xscale_dma_map_area)
        b       xscale_dma_flush_range
 SYM_FUNC_END(xscale_dma_map_area)
 
+/*
+ * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
+ * clear the dirty bits, which means that if we invalidate a dirty line,
+ * the dirty data can still be written back to external memory later on.
+ *
+ * The recommended workaround is to always do a clean D-cache line before
+ * doing an invalidate D-cache line, so on the affected processors,
+ * dma_inv_range() is implemented as dma_flush_range().
+ *
+ * See erratum #25 of "Intel 80200 Processor Specification Update",
+ * revision January 22, 2003, available at:
+ *     http://www.intel.com/design/iio/specupdt/273415.htm
+ */
+
 /*
  *     dma_map_area(start, size, dir)
  *     - start - kernel virtual start address
@@ -414,49 +428,6 @@ SYM_TYPED_FUNC_START(xscale_dma_unmap_area)
        ret     lr
 SYM_FUNC_END(xscale_dma_unmap_area)
 
-       .globl  xscale_flush_kern_cache_louis
-       .equ    xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions xscale
-
-/*
- * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
- * clear the dirty bits, which means that if we invalidate a dirty line,
- * the dirty data can still be written back to external memory later on.
- *
- * The recommended workaround is to always do a clean D-cache line before
- * doing an invalidate D-cache line, so on the affected processors,
- * dma_inv_range() is implemented as dma_flush_range().
- *
- * See erratum #25 of "Intel 80200 Processor Specification Update",
- * revision January 22, 2003, available at:
- *     http://www.intel.com/design/iio/specupdt/273415.htm
- */
-.macro a0_alias basename
-       .globl xscale_80200_A0_A1_\basename
-       .type xscale_80200_A0_A1_\basename , %function
-       .equ xscale_80200_A0_A1_\basename , xscale_\basename
-.endm
-
-/*
- * Most of the cache functions are unchanged for these processor revisions.
- * Export suitable alias symbols for the unchanged functions:
- */
-       a0_alias flush_icache_all
-       a0_alias flush_user_cache_all
-       a0_alias flush_kern_cache_all
-       a0_alias flush_kern_cache_louis
-       a0_alias flush_user_cache_range
-       a0_alias coherent_kern_range
-       a0_alias coherent_user_range
-       a0_alias flush_kern_dcache_area
-       a0_alias dma_flush_range
-       a0_alias dma_unmap_area
-
-       @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
-       define_cache_functions xscale_80200_A0_A1
-
 ENTRY(cpu_xscale_dcache_clean_area)
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, #CACHELINESIZE