Various architectures have almost the same implementations for
__memcpy_{to,from}io and __memset_io functions. So, consolidate them and
introduce a CONFIG_GENERIC_IO_COPY flag to build the given
lib/io_copy.c.
Reviewed-by: Yann Sionneau <ysionneau@kalrayinc.com>
Signed-off-by: Julian Vetter <jvetter@kalrayinc.com>
---
lib/Kconfig | 3 ++
lib/Makefile | 1 +
lib/io_copy.c | 107 ++++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 111 insertions(+)
create mode 100644 lib/io_copy.c
diff --git a/lib/Kconfig b/lib/Kconfig
index b38849af6f13..741550bc3856 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -70,6 +70,9 @@ source "lib/math/Kconfig"
config NO_GENERIC_PCI_IOPORT_MAP
bool
+config GENERIC_IO_COPY
+ bool
+
config GENERIC_IOMAP
bool
select GENERIC_PCI_IOMAP
diff --git a/lib/Makefile b/lib/Makefile
index 322bb127b4dc..4f56ad5f9ed6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -130,6 +130,7 @@ CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
obj-y += math/ crypto/
+obj-$(CONFIG_GENERIC_IO_COPY) += io_copy.o
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
diff --git a/lib/io_copy.c b/lib/io_copy.c
new file mode 100644
index 000000000000..bfdb84e87607
--- /dev/null
+++ b/lib/io_copy.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * derived from arch/arm/kernel/io.c
+ *
+ * Copyright (C) 2024 Kalray Inc.
+ * Author(s): Julian Vetter
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+#define NATIVE_STORE_SIZE (BITS_PER_LONG/8)
+
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)from, NATIVE_STORE_SIZE)) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= NATIVE_STORE_SIZE) {
+ if (IS_ENABLED(CONFIG_64BIT))
+ put_unaligned(__raw_readq(from), (uintptr_t *)to);
+ else
+ put_unaligned(__raw_readl(from), (uintptr_t *)to);
+
+ from += NATIVE_STORE_SIZE;
+ to += NATIVE_STORE_SIZE;
+ count -= NATIVE_STORE_SIZE;
+ }
+
+ while (count) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_fromio);
+
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)to, NATIVE_STORE_SIZE)) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= NATIVE_STORE_SIZE) {
+ if (IS_ENABLED(CONFIG_64BIT))
+ __raw_writeq(get_unaligned((uintptr_t *)from), to);
+ else
+ __raw_writel(get_unaligned((uintptr_t *)from), to);
+
+ from += NATIVE_STORE_SIZE;
+ to += NATIVE_STORE_SIZE;
+ count -= NATIVE_STORE_SIZE;
+ }
+
+ while (count) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_toio);
+
+void __memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+ uintptr_t qc = (u8)c;
+
+ qc |= qc << 8;
+ qc |= qc << 16;
+
+ if (IS_ENABLED(CONFIG_64BIT))
+ qc |= qc << 32;
+
+ while (count && !IS_ALIGNED((unsigned long)dst, NATIVE_STORE_SIZE)) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+
+ while (count >= NATIVE_STORE_SIZE) {
+ if (IS_ENABLED(CONFIG_64BIT))
+ __raw_writeq(qc, dst);
+ else
+ __raw_writel(qc, dst);
+
+ dst += NATIVE_STORE_SIZE;
+ count -= NATIVE_STORE_SIZE;
+ }
+
+ while (count) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memset_io);
--
2.34.1