include/linux/dma-mapping.h | 12 +++++++----- kernel/dma/debug.c | 4 ---- 2 files changed, 7 insertions(+), 9 deletions(-)
From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
memremap within dma_init_coherent_memory will map the given phys_addr
into vmalloc area if the pa is not found during iterating iomem_resources,
which conflict the rejection of vmalloc area in dma_map_single_attrs.
IMO, it is find to let all valid virtual address be valid for DMA as the
user will keep corresponding RAM safe for transfer.
Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
include/linux/dma-mapping.h | 12 +++++++-----
kernel/dma/debug.c | 4 ----
2 files changed, 7 insertions(+), 9 deletions(-)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f0ccca16a0ac..7a7b87289d55 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -328,12 +328,14 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- /* DMA must never operate on areas that might be remapped. */
- if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
- "rejecting DMA map of vmalloc memory\n"))
- return DMA_MAPPING_ERROR;
+ struct page *page;
+
debug_dma_map_single(dev, ptr, size);
- return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
+ if (is_vmalloc_addr(ptr))
+ page = vmalloc_to_page(ptr);
+ else
+ page = virt_to_page(ptr);
+ return dma_map_page_attrs(dev, page, offset_in_page(ptr),
size, dir, attrs);
}
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 06366acd27b0..51e1fe9a70aa 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -1198,10 +1198,6 @@ void debug_dma_map_single(struct device *dev, const void *addr,
if (!virt_addr_valid(addr))
err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
addr, len);
-
- if (is_vmalloc_addr(addr))
- err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
- addr, len);
}
EXPORT_SYMBOL(debug_dma_map_single);
--
2.25.1
On Mon, Nov 27, 2023 at 11:09:30AM +0800, zhaoyang.huang wrote: > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com> > > memremap within dma_init_coherent_memory will map the given phys_addr > into vmalloc area if the pa is not found during iterating iomem_resources, > which conflict the rejection of vmalloc area in dma_map_single_attrs. I can't parse this sentence. > IMO, it is find to let all valid virtual address be valid for DMA as the > user will keep corresponding RAM safe for transfer. No, vmalloc address can't be passed to map_single. You need to pass the page to dma_map_page, and explicitly mange cache consistency using the invalidate_kernel_vmap_range and flush_kernel_vmap_range helpers.
On Mon, Nov 27, 2023 at 3:14 PM Christoph Hellwig <hch@lst.de> wrote: > > On Mon, Nov 27, 2023 at 11:09:30AM +0800, zhaoyang.huang wrote: > > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com> > > > > memremap within dma_init_coherent_memory will map the given phys_addr > > into vmalloc area if the pa is not found during iterating iomem_resources, > > which conflict the rejection of vmalloc area in dma_map_single_attrs. > > I can't parse this sentence. Sorry for the confusion, please find below codes for more information. dma_init_coherent_memory memremap addr = ioremap_wt(offset, size); What I mean is addr is a vmalloc address, which is implicitly mapped by dma's framework and not be aware of to the driver. > > > IMO, it is find to let all valid virtual address be valid for DMA as the > > user will keep corresponding RAM safe for transfer. > > No, vmalloc address can't be passed to map_single. You need to pass > the page to dma_map_page, and explicitly mange cache consistency > using the invalidate_kernel_vmap_range and flush_kernel_vmap_range > helpers. Please correct me if I am wrong. According to my understanding, cache consistency could be solved inside dma_map_page via either dma_direct_map_page(swio/arch_sync_dma_for_device) or ops->map_page. The original thought of rejecting vmalloc is that this pa is not safe as this mapping could go in any time. What I am suggesting is to let this kind of va be enrolled. static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs) { /* DMA must never operate on areas that might be remapped. */ if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), "rejecting DMA map of vmalloc memory\n")) return DMA_MAPPING_ERROR; >
On 2023-11-27 8:56 am, Zhaoyang Huang wrote: > On Mon, Nov 27, 2023 at 3:14 PM Christoph Hellwig <hch@lst.de> wrote: >> >> On Mon, Nov 27, 2023 at 11:09:30AM +0800, zhaoyang.huang wrote: >>> From: Zhaoyang Huang <zhaoyang.huang@unisoc.com> >>> >>> memremap within dma_init_coherent_memory will map the given phys_addr >>> into vmalloc area if the pa is not found during iterating iomem_resources, >>> which conflict the rejection of vmalloc area in dma_map_single_attrs. >> >> I can't parse this sentence. > Sorry for the confusion, please find below codes for more information. > dma_init_coherent_memory > memremap > addr = ioremap_wt(offset, size); > What I mean is addr is a vmalloc address, which is implicitly mapped > by dma's framework and not be aware of to the driver. >> >>> IMO, it is find to let all valid virtual address be valid for DMA as the >>> user will keep corresponding RAM safe for transfer. >> >> No, vmalloc address can't be passed to map_single. You need to pass >> the page to dma_map_page, and explicitly mange cache consistency >> using the invalidate_kernel_vmap_range and flush_kernel_vmap_range >> helpers. > Please correct me if I am wrong. According to my understanding, cache > consistency could be solved inside dma_map_page via either > dma_direct_map_page(swio/arch_sync_dma_for_device) or ops->map_page. > The original thought of rejecting vmalloc is that this pa is not safe > as this mapping could go in any time. What I am suggesting is to let > this kind of va be enrolled. No, the point is that dma_map_single() uses virt_to_page(), and virt_to_page() is definitely not valid for vmalloc addresses. At worst it may blow up in itself with an out-of-bounds dereference; at best it's going to return a completely bogus page pointer which may then make dma_map_page() fall over. Thanks, Robin. > > static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, > size_t size, enum dma_data_direction dir, unsigned long attrs) > { > /* DMA must never operate on areas that might be remapped. */ > if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), > "rejecting DMA map of vmalloc memory\n")) > return DMA_MAPPING_ERROR; > >> >
On Mon, Nov 27, 2023 at 04:56:45PM +0800, Zhaoyang Huang wrote: > Sorry for the confusion, please find below codes for more information. > dma_init_coherent_memory > memremap > addr = ioremap_wt(offset, size); > What I mean is addr is a vmalloc address, which is implicitly mapped > by dma's framework and not be aware of to the driver. Yes. And it is only returned from dma_alloc_coherent, which should never be passed to dma_map_<anything>. > Please correct me if I am wrong. According to my understanding, cache > consistency could be solved inside dma_map_page via either > dma_direct_map_page(swio/arch_sync_dma_for_device) or ops->map_page. > The original thought of rejecting vmalloc is that this pa is not safe > as this mapping could go in any time. What I am suggesting is to let > this kind of va be enrolled. But that only works for the direct mapping. It does not work for the additional aliases created by vmap/ioremap/memremap. Now that only matters if the cache is virtually indexed, which is rather unusual these days.
This patch arose from a real problem where the driver failed to use dma_map_single(dev, ptr). The ptr is a vmalloc va which is mapped over the reserve memory by dma_init_coherent_memory. On Mon, Nov 27, 2023 at 11:09 AM zhaoyang.huang <zhaoyang.huang@unisoc.com> wrote: > > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com> > > memremap within dma_init_coherent_memory will map the given phys_addr > into vmalloc area if the pa is not found during iterating iomem_resources, > which conflict the rejection of vmalloc area in dma_map_single_attrs. > IMO, it is find to let all valid virtual address be valid for DMA as the > user will keep corresponding RAM safe for transfer. > > Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com> > --- > include/linux/dma-mapping.h | 12 +++++++----- > kernel/dma/debug.c | 4 ---- > 2 files changed, 7 insertions(+), 9 deletions(-) > > diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h > index f0ccca16a0ac..7a7b87289d55 100644 > --- a/include/linux/dma-mapping.h > +++ b/include/linux/dma-mapping.h > @@ -328,12 +328,14 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, > static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, > size_t size, enum dma_data_direction dir, unsigned long attrs) > { > - /* DMA must never operate on areas that might be remapped. */ > - if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), > - "rejecting DMA map of vmalloc memory\n")) > - return DMA_MAPPING_ERROR; > + struct page *page; > + > debug_dma_map_single(dev, ptr, size); > - return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), > + if (is_vmalloc_addr(ptr)) > + page = vmalloc_to_page(ptr); > + else > + page = virt_to_page(ptr); > + return dma_map_page_attrs(dev, page, offset_in_page(ptr), > size, dir, attrs); > } > > diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c > index 06366acd27b0..51e1fe9a70aa 100644 > --- a/kernel/dma/debug.c > +++ b/kernel/dma/debug.c > @@ -1198,10 +1198,6 @@ void debug_dma_map_single(struct device *dev, const void *addr, > if (!virt_addr_valid(addr)) > err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n", > addr, len); > - > - if (is_vmalloc_addr(addr)) > - err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", > - addr, len); > } > EXPORT_SYMBOL(debug_dma_map_single); > > -- > 2.25.1 >
© 2016 - 2025 Red Hat, Inc.