Both Cadence GEM Ethernet controllers on EyeQ5 are hardwired through CM3
IO Coherency Units (IOCU). For DMA coherent accesses, BIT(36) must be
set in DMA addresses.
Implement that in platform-specific dma_map_ops which get attached to
both instances of `cdns,eyeq5-gem` through a notifier block.
Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
---
MAINTAINERS | 2 +-
arch/mips/mobileye/Kconfig | 1 +
arch/mips/mobileye/Makefile | 2 +
arch/mips/mobileye/eyeq5-iocu-dma.c | 160 ++++++++++++++++++++++++++++++++++++
4 files changed, 164 insertions(+), 1 deletion(-)
diff --git a/MAINTAINERS b/MAINTAINERS
index bb9df569a3fff41ab40d7da5843f1e8564b47bf2..7ee68d7f8e8d0632846f59579412458e301bd8fb 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16789,7 +16789,7 @@ F: Documentation/devicetree/bindings/mips/mobileye.yaml
F: Documentation/devicetree/bindings/soc/mobileye/
F: arch/mips/boot/dts/mobileye/
F: arch/mips/configs/eyeq5_defconfig
-F: arch/mips/mobileye/board-epm5.its.S
+F: arch/mips/mobileye/
F: drivers/clk/clk-eyeq.c
F: drivers/pinctrl/pinctrl-eyeq5.c
F: drivers/reset/reset-eyeq.c
diff --git a/arch/mips/mobileye/Kconfig b/arch/mips/mobileye/Kconfig
index f9abb2d6e1787dbc5a173db48606ed5a02088e41..b9040f3a9b3ddc7f5addcd8e5f110cb9c775b6b1 100644
--- a/arch/mips/mobileye/Kconfig
+++ b/arch/mips/mobileye/Kconfig
@@ -9,6 +9,7 @@ choice
config MACH_EYEQ5
bool "Mobileye EyeQ5 SoC"
+ select ARCH_HAS_DMA_OPS
config MACH_EYEQ6H
bool "Mobileye EyeQ6H SoC"
diff --git a/arch/mips/mobileye/Makefile b/arch/mips/mobileye/Makefile
index 315c06b689cfbb83f9f205d1140ecf5058e2aa02..50fc7d0ae167c3fb3dc8585bcd45583c6cc3f2d2 100644
--- a/arch/mips/mobileye/Makefile
+++ b/arch/mips/mobileye/Makefile
@@ -1 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-or-later
+
+obj-$(CONFIG_MACH_EYEQ5) += eyeq5-iocu-dma.o
diff --git a/arch/mips/mobileye/eyeq5-iocu-dma.c b/arch/mips/mobileye/eyeq5-iocu-dma.c
new file mode 100644
index 0000000000000000000000000000000000000000..71d1c35f911636db141c4467dccc405af69835ec
--- /dev/null
+++ b/arch/mips/mobileye/eyeq5-iocu-dma.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-map-ops.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/gfp_types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/notifier.h>
+#include <linux/pfn.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+static void *eyeq5_iocu_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ void *p = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+
+ *dma_handle |= BIT_ULL(36);
+ return p;
+}
+
+static void eyeq5_iocu_free(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ unsigned long attrs)
+{
+ dma_handle &= ~BIT_ULL(36);
+ dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+}
+
+static int eyeq5_iocu_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long user_count = vma_pages(vma);
+ int ret;
+
+ vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
+
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
+ return -ENXIO;
+
+ return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ user_count << PAGE_SHIFT, vma->vm_page_prot);
+}
+
+static int eyeq5_iocu_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ struct page *page = virt_to_page(cpu_addr);
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (!ret)
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return ret;
+}
+
+static dma_addr_t eyeq5_iocu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t phys = page_to_phys(page) + offset;
+
+ /* BIT(36) toggles routing through IOCU for DMA operations. */
+ return phys_to_dma(dev, phys) | BIT_ULL(36);
+}
+
+static void eyeq5_iocu_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
+
+static int eyeq5_iocu_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i) {
+ sg->dma_address = eyeq5_iocu_map_page(dev, sg_page(sg),
+ sg->offset, sg->length,
+ dir, attrs);
+ if (sg->dma_address == DMA_MAPPING_ERROR)
+ return 0; /* No cleanup because ->unmap_page() is a no-op. */
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nents;
+}
+
+static void eyeq5_iocu_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ /* We know page ->unmap_page() is a no-op. */
+}
+
+const struct dma_map_ops eyeq5_iocu_ops = {
+ .alloc = eyeq5_iocu_alloc,
+ .free = eyeq5_iocu_free,
+ .alloc_pages_op = dma_direct_alloc_pages,
+ .free_pages = dma_direct_free_pages,
+ .mmap = eyeq5_iocu_mmap,
+ .get_sgtable = eyeq5_iocu_get_sgtable,
+ .map_page = eyeq5_iocu_map_page,
+ .unmap_page = eyeq5_iocu_unmap_page,
+ .map_sg = eyeq5_iocu_map_sg,
+ .unmap_sg = eyeq5_iocu_unmap_sg,
+ .get_required_mask = dma_direct_get_required_mask,
+};
+EXPORT_SYMBOL(eyeq5_iocu_ops);
+
+static int eyeq5_iocu_notifier(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct device *dev = data;
+
+ /*
+ * IOCU routing is hardwired; we must use our above custom
+ * routines for cache-coherent DMA on ethernet interfaces.
+ */
+ if (event == BUS_NOTIFY_ADD_DEVICE &&
+ device_is_compatible(dev, "mobileye,eyeq5-gem")) {
+ set_dma_ops(dev, &eyeq5_iocu_ops);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block eyeq5_iocu_nb = {
+ .notifier_call = eyeq5_iocu_notifier,
+};
+
+static int __init eyeq5_iocu_init(void)
+{
+ return bus_register_notifier(&platform_bus_type, &eyeq5_iocu_nb);
+}
+postcore_initcall(eyeq5_iocu_init);
--
2.50.0
在2025年6月27日周五 上午10:09,Théo Lebrun写道: > Both Cadence GEM Ethernet controllers on EyeQ5 are hardwired through CM3 > IO Coherency Units (IOCU). For DMA coherent accesses, BIT(36) must be > set in DMA addresses. Hi Théo, Just quick question, it seems like this special driver is only applying a fixed offset (1 << 36) to the DMA physical address, can we achieve that with dma-ranges property in DeviceTree? I belive: ``` dma-coherent; # Bus addr # Phys # Size dma-ranges = <0x10 0x00000000 0x0 0x0 0x10 0>; ``` Will do the job. Thanks Jiaxun > > Implement that in platform-specific dma_map_ops which get attached to > both instances of `cdns,eyeq5-gem` through a notifier block. > > Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com> > --- > MAINTAINERS | 2 +- > arch/mips/mobileye/Kconfig | 1 + > arch/mips/mobileye/Makefile | 2 + > arch/mips/mobileye/eyeq5-iocu-dma.c | 160 ++++++++++++++++++++++++++++++++++++ > 4 files changed, 164 insertions(+), 1 deletion(-) > > diff --git a/MAINTAINERS b/MAINTAINERS > index > bb9df569a3fff41ab40d7da5843f1e8564b47bf2..7ee68d7f8e8d0632846f59579412458e301bd8fb > 100644 > --- a/MAINTAINERS > +++ b/MAINTAINERS > @@ -16789,7 +16789,7 @@ > F: Documentation/devicetree/bindings/mips/mobileye.yaml > F: Documentation/devicetree/bindings/soc/mobileye/ > F: arch/mips/boot/dts/mobileye/ > F: arch/mips/configs/eyeq5_defconfig > -F: arch/mips/mobileye/board-epm5.its.S > +F: arch/mips/mobileye/ > F: drivers/clk/clk-eyeq.c > F: drivers/pinctrl/pinctrl-eyeq5.c > F: drivers/reset/reset-eyeq.c > diff --git a/arch/mips/mobileye/Kconfig b/arch/mips/mobileye/Kconfig > index > f9abb2d6e1787dbc5a173db48606ed5a02088e41..b9040f3a9b3ddc7f5addcd8e5f110cb9c775b6b1 > 100644 > --- a/arch/mips/mobileye/Kconfig > +++ b/arch/mips/mobileye/Kconfig > @@ -9,6 +9,7 @@ choice > > config MACH_EYEQ5 > bool "Mobileye EyeQ5 SoC" > + select ARCH_HAS_DMA_OPS > > config MACH_EYEQ6H > bool "Mobileye EyeQ6H SoC" > diff --git a/arch/mips/mobileye/Makefile b/arch/mips/mobileye/Makefile > index > 315c06b689cfbb83f9f205d1140ecf5058e2aa02..50fc7d0ae167c3fb3dc8585bcd45583c6cc3f2d2 > 100644 > --- a/arch/mips/mobileye/Makefile > +++ b/arch/mips/mobileye/Makefile > @@ -1 +1,3 @@ > # SPDX-License-Identifier: GPL-2.0-or-later > + > +obj-$(CONFIG_MACH_EYEQ5) += eyeq5-iocu-dma.o > diff --git a/arch/mips/mobileye/eyeq5-iocu-dma.c > b/arch/mips/mobileye/eyeq5-iocu-dma.c > new file mode 100644 > index > 0000000000000000000000000000000000000000..71d1c35f911636db141c4467dccc405af69835ec > --- /dev/null > +++ b/arch/mips/mobileye/eyeq5-iocu-dma.c > @@ -0,0 +1,160 @@ > +// SPDX-License-Identifier: GPL-2.0 > + > +#include <linux/bits.h> > +#include <linux/device.h> > +#include <linux/device/bus.h> > +#include <linux/dma-direct.h> > +#include <linux/dma-direction.h> > +#include <linux/dma-map-ops.h> > +#include <linux/dma-mapping.h> > +#include <linux/errno.h> > +#include <linux/export.h> > +#include <linux/gfp_types.h> > +#include <linux/init.h> > +#include <linux/mm.h> > +#include <linux/mm_types.h> > +#include <linux/notifier.h> > +#include <linux/pfn.h> > +#include <linux/platform_device.h> > +#include <linux/property.h> > +#include <linux/scatterlist.h> > +#include <linux/types.h> > + > +static void *eyeq5_iocu_alloc(struct device *dev, size_t size, > + dma_addr_t *dma_handle, gfp_t gfp, > + unsigned long attrs) > +{ > + void *p = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); > + > + *dma_handle |= BIT_ULL(36); > + return p; > +} > + > +static void eyeq5_iocu_free(struct device *dev, size_t size, > + void *vaddr, dma_addr_t dma_handle, > + unsigned long attrs) > +{ > + dma_handle &= ~BIT_ULL(36); > + dma_direct_free(dev, size, vaddr, dma_handle, attrs); > +} > + > +static int eyeq5_iocu_mmap(struct device *dev, struct vm_area_struct > *vma, > + void *cpu_addr, dma_addr_t dma_addr, size_t size, > + unsigned long attrs) > +{ > + unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); > + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; > + unsigned long user_count = vma_pages(vma); > + int ret; > + > + vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); > + > + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) > + return ret; > + > + if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) > + return -ENXIO; > + > + return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, > + user_count << PAGE_SHIFT, vma->vm_page_prot); > +} > + > +static int eyeq5_iocu_get_sgtable(struct device *dev, struct sg_table > *sgt, > + void *cpu_addr, dma_addr_t dma_addr, size_t size, > + unsigned long attrs) > +{ > + struct page *page = virt_to_page(cpu_addr); > + int ret; > + > + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); > + if (!ret) > + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); > + return ret; > +} > + > +static dma_addr_t eyeq5_iocu_map_page(struct device *dev, struct page > *page, > + unsigned long offset, size_t size, > + enum dma_data_direction dir, > + unsigned long attrs) > +{ > + phys_addr_t phys = page_to_phys(page) + offset; > + > + /* BIT(36) toggles routing through IOCU for DMA operations. */ > + return phys_to_dma(dev, phys) | BIT_ULL(36); > +} > + > +static void eyeq5_iocu_unmap_page(struct device *dev, dma_addr_t > dma_handle, > + size_t size, enum dma_data_direction dir, > + unsigned long attrs) > +{ > +} > + > +static int eyeq5_iocu_map_sg(struct device *dev, struct scatterlist > *sgl, > + int nents, enum dma_data_direction dir, > + unsigned long attrs) > +{ > + struct scatterlist *sg; > + int i; > + > + for_each_sg(sgl, sg, nents, i) { > + sg->dma_address = eyeq5_iocu_map_page(dev, sg_page(sg), > + sg->offset, sg->length, > + dir, attrs); > + if (sg->dma_address == DMA_MAPPING_ERROR) > + return 0; /* No cleanup because ->unmap_page() is a no-op. */ > + sg_dma_len(sg) = sg->length; > + } > + > + return nents; > +} > + > +static void eyeq5_iocu_unmap_sg(struct device *dev, struct scatterlist > *sgl, > + int nents, enum dma_data_direction dir, > + unsigned long attrs) > +{ > + /* We know page ->unmap_page() is a no-op. */ > +} > + > +const struct dma_map_ops eyeq5_iocu_ops = { > + .alloc = eyeq5_iocu_alloc, > + .free = eyeq5_iocu_free, > + .alloc_pages_op = dma_direct_alloc_pages, > + .free_pages = dma_direct_free_pages, > + .mmap = eyeq5_iocu_mmap, > + .get_sgtable = eyeq5_iocu_get_sgtable, > + .map_page = eyeq5_iocu_map_page, > + .unmap_page = eyeq5_iocu_unmap_page, > + .map_sg = eyeq5_iocu_map_sg, > + .unmap_sg = eyeq5_iocu_unmap_sg, > + .get_required_mask = dma_direct_get_required_mask, > +}; > +EXPORT_SYMBOL(eyeq5_iocu_ops); > + > +static int eyeq5_iocu_notifier(struct notifier_block *nb, > + unsigned long event, > + void *data) > +{ > + struct device *dev = data; > + > + /* > + * IOCU routing is hardwired; we must use our above custom > + * routines for cache-coherent DMA on ethernet interfaces. > + */ > + if (event == BUS_NOTIFY_ADD_DEVICE && > + device_is_compatible(dev, "mobileye,eyeq5-gem")) { > + set_dma_ops(dev, &eyeq5_iocu_ops); > + return NOTIFY_OK; > + } > + > + return NOTIFY_DONE; > +} > + > +static struct notifier_block eyeq5_iocu_nb = { > + .notifier_call = eyeq5_iocu_notifier, > +}; > + > +static int __init eyeq5_iocu_init(void) > +{ > + return bus_register_notifier(&platform_bus_type, &eyeq5_iocu_nb); > +} > +postcore_initcall(eyeq5_iocu_init); > > -- > 2.50.0 -- - Jiaxun
Hello Jiaxun, On Mon Jun 30, 2025 at 3:35 PM CEST, Jiaxun Yang wrote: > 在2025年6月27日周五 上午10:09,Théo Lebrun写道: >> Both Cadence GEM Ethernet controllers on EyeQ5 are hardwired through CM3 >> IO Coherency Units (IOCU). For DMA coherent accesses, BIT(36) must be >> set in DMA addresses. > > Just quick question, it seems like this special driver is only applying a > fixed offset (1 << 36) to the DMA physical address, can we achieve that with dma-ranges > property in DeviceTree? > > I belive: > ``` > dma-coherent; > # Bus addr # Phys # Size > dma-ranges = <0x10 0x00000000 0x0 0x0 0x10 0>; > ``` > > Will do the job. This is perfect! Can confirm it works just fine. When you are stuck in an issue for too long you don't think about broadening your viewpoint. Thanks, -- Théo Lebrun, Bootlin Embedded Linux and Kernel engineering https://bootlin.com
On Fri, Jun 27, 2025 at 11:09:02AM +0200, Théo Lebrun wrote: > Both Cadence GEM Ethernet controllers on EyeQ5 are hardwired through CM3 > IO Coherency Units (IOCU). For DMA coherent accesses, BIT(36) must be > set in DMA addresses. > > Implement that in platform-specific dma_map_ops which get attached to > both instances of `cdns,eyeq5-gem` through a notifier block. > > Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com> ... > diff --git a/arch/mips/mobileye/eyeq5-iocu-dma.c b/arch/mips/mobileye/eyeq5-iocu-dma.c ... > +const struct dma_map_ops eyeq5_iocu_ops = { > + .alloc = eyeq5_iocu_alloc, > + .free = eyeq5_iocu_free, > + .alloc_pages_op = dma_direct_alloc_pages, > + .free_pages = dma_direct_free_pages, > + .mmap = eyeq5_iocu_mmap, > + .get_sgtable = eyeq5_iocu_get_sgtable, > + .map_page = eyeq5_iocu_map_page, > + .unmap_page = eyeq5_iocu_unmap_page, > + .map_sg = eyeq5_iocu_map_sg, > + .unmap_sg = eyeq5_iocu_unmap_sg, > + .get_required_mask = dma_direct_get_required_mask, > +}; > +EXPORT_SYMBOL(eyeq5_iocu_ops); Hi Théo, Does eyeq5_iocu_ops need to be exported? If so it should probably be declared in a header file somewhere. But I if not probably the EXPORT_SYMBOL line should be dropped, and the structure made static. Flagged by Sparse. > + > +static int eyeq5_iocu_notifier(struct notifier_block *nb, > + unsigned long event, > + void *data) > +{ > + struct device *dev = data; > + > + /* > + * IOCU routing is hardwired; we must use our above custom > + * routines for cache-coherent DMA on ethernet interfaces. > + */ > + if (event == BUS_NOTIFY_ADD_DEVICE && > + device_is_compatible(dev, "mobileye,eyeq5-gem")) { > + set_dma_ops(dev, &eyeq5_iocu_ops); > + return NOTIFY_OK; > + } > + > + return NOTIFY_DONE; > +} ...
© 2016 - 2025 Red Hat, Inc.