Complete switching the __iowriteXX_copy() routines over to use #define and
arch provided inline/macro functions instead of weak symbols.
S390 has an implementation that simply calls another memcpy
function. Inline this so the callers don't have to do two jumps.
Link: https://lore.kernel.org/r/3-v3-1893cd8b9369+1925-mlx5_arm_wc_jgg@nvidia.com
Acked-by: Niklas Schnelle <schnelle@linux.ibm.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
}
#define __iowrite32_copy __iowrite32_copy
+static inline void __iowrite64_copy(void __iomem *to, const void *from,
+ size_t count)
+{
+ zpci_memcpy_toio(to, from, count * 8);
+}
+#define __iowrite64_copy __iowrite64_copy
+
#endif /* CONFIG_PCI */
#include <asm-generic/io.h>
return 0;
}
-/* combine single writes by using store-block insn */
-void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
-{
- zpci_memcpy_toio(to, from, count * 8);
-}
-
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot)
{
#endif
void __ioread32_copy(void *to, const void __iomem *from, size_t count);
+
+#ifndef __iowrite64_copy
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
+#endif
#ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end,
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
-void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
- const void *from,
- size_t count)
+#ifndef __iowrite64_copy
+void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
#ifdef CONFIG_64BIT
u64 __iomem *dst = to;
__iowrite32_copy(to, from, count * 2);
#endif
}
-
EXPORT_SYMBOL_GPL(__iowrite64_copy);
+#endif