drivers/xen/swiotlb-xen.c | 7 +------ include/linux/dma-map-ops.h | 2 ++ kernel/dma/ops_helpers.c | 16 ++++++++++++++-- 3 files changed, 17 insertions(+), 8 deletions(-)
From: Roman Skakun <Roman_Skakun@epam.com>
This commit is dedicated to fix incorrect conversion from
cpu_addr to page address in cases when we get virtual
address which allocated in the vmalloc range.
As the result, virt_to_page() cannot convert this address
properly and return incorrect page address.
Need to detect such cases and obtains the page address using
vmalloc_to_page() instead.
Signed-off-by: Roman Skakun <roman_skakun@epam.com>
Reviewed-by: Andrii Anisov <andrii_anisov@epam.com>
---
Hi, Christoph!
It's updated patch in accordance with your and Stefano
suggestions.
drivers/xen/swiotlb-xen.c | 7 +------
include/linux/dma-map-ops.h | 2 ++
kernel/dma/ops_helpers.c | 16 ++++++++++++++--
3 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 92ee6eea30cd..c2f612a10a95 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -337,7 +337,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
int order = get_order(size);
phys_addr_t phys;
u64 dma_mask = DMA_BIT_MASK(32);
- struct page *page;
+ struct page *page = cpu_addr_to_page(vaddr);
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
@@ -349,11 +349,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
- if (is_vmalloc_addr(vaddr))
- page = vmalloc_to_page(vaddr);
- else
- page = virt_to_page(vaddr);
-
if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
range_straddles_page_boundary(phys, size)) &&
TestClearPageXenRemapped(page))
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index a5f89fc4d6df..ce0edb0bb603 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -226,6 +226,8 @@ struct page *dma_alloc_from_pool(struct device *dev, size_t size,
bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
bool dma_free_from_pool(struct device *dev, void *start, size_t size);
+struct page *cpu_addr_to_page(void *cpu_addr);
+
#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
#include <asm/dma-coherence.h>
#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
index 910ae69cae77..472e861750d3 100644
--- a/kernel/dma/ops_helpers.c
+++ b/kernel/dma/ops_helpers.c
@@ -5,6 +5,17 @@
*/
#include <linux/dma-map-ops.h>
+/*
+ * This helper converts virtual address to page address.
+ */
+struct page *cpu_addr_to_page(void *cpu_addr)
+{
+ if (is_vmalloc_addr(cpu_addr))
+ return vmalloc_to_page(cpu_addr);
+ else
+ return virt_to_page(cpu_addr);
+}
+
/*
* Create scatter-list for the already allocated DMA buffer.
*/
@@ -12,7 +23,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- struct page *page = virt_to_page(cpu_addr);
+ struct page *page = cpu_addr_to_page(cpu_addr);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -32,6 +43,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
+ struct page *page = cpu_addr_to_page(cpu_addr);
int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@@ -43,7 +55,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return -ENXIO;
return remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
+ page_to_pfn(page) + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
#else
return -ENXIO;
--
2.27.0
Technically this looks good. But given that exposing a helper that does either vmalloc_to_page or virt_to_page is one of the never ending MM discussions I don't want to get into that discussion and just keep it local in the DMA code. Are you fine with me applying this version? --- From 40ac971eab89330d6153e7721e88acd2d98833f9 Mon Sep 17 00:00:00 2001 From: Roman Skakun <Roman_Skakun@epam.com> Date: Fri, 16 Jul 2021 11:39:34 +0300 Subject: dma-mapping: handle vmalloc addresses in dma_common_{mmap,get_sgtable} xen-swiotlb can use vmalloc backed addresses for dma coherent allocations and uses the common helpers. Properly handle them to unbreak Xen on ARM platforms. Fixes: 1b65c4e5a9af ("swiotlb-xen: use xen_alloc/free_coherent_pages") Signed-off-by: Roman Skakun <roman_skakun@epam.com> Reviewed-by: Andrii Anisov <andrii_anisov@epam.com> [hch: split the patch, renamed the helpers] Signed-off-by: Christoph Hellwig <hch@lst.de> --- kernel/dma/ops_helpers.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c index 910ae69cae77..af4a6ef48ce0 100644 --- a/kernel/dma/ops_helpers.c +++ b/kernel/dma/ops_helpers.c @@ -5,6 +5,13 @@ */ #include <linux/dma-map-ops.h> +static struct page *dma_common_vaddr_to_page(void *cpu_addr) +{ + if (is_vmalloc_addr(cpu_addr)) + return vmalloc_to_page(cpu_addr); + return virt_to_page(cpu_addr); +} + /* * Create scatter-list for the already allocated DMA buffer. */ @@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct page *page = virt_to_page(cpu_addr); + struct page *page = dma_common_vaddr_to_page(cpu_addr); int ret; ret = sg_alloc_table(sgt, 1, GFP_KERNEL); @@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; + struct page *page = dma_common_vaddr_to_page(cpu_addr); int ret = -ENXIO; vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); @@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, return -ENXIO; return remap_pfn_range(vma, vma->vm_start, - page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff, + page_to_pfn(page) + vma->vm_pgoff, user_count << PAGE_SHIFT, vma->vm_page_prot); #else return -ENXIO; -- 2.30.2
> Technically this looks good. But given that exposing a helper > that does either vmalloc_to_page or virt_to_page is one of the > never ending MM discussions I don't want to get into that discussion > and just keep it local in the DMA code. > > Are you fine with me applying this version? Looks good to me, thanks! But, Stefano asked me about using created helper in the xen_swiotlb_free_coherent() and I created a patch according to this mention. We can merge this patch and create a new one for xen_swiotlb_free_coherent() later. пт, 16 июл. 2021 г. в 12:35, Christoph Hellwig <hch@lst.de>: > > Technically this looks good. But given that exposing a helper > that does either vmalloc_to_page or virt_to_page is one of the > never ending MM discussions I don't want to get into that discussion > and just keep it local in the DMA code. > > Are you fine with me applying this version? > > --- > From 40ac971eab89330d6153e7721e88acd2d98833f9 Mon Sep 17 00:00:00 2001 > From: Roman Skakun <Roman_Skakun@epam.com> > Date: Fri, 16 Jul 2021 11:39:34 +0300 > Subject: dma-mapping: handle vmalloc addresses in > dma_common_{mmap,get_sgtable} > > xen-swiotlb can use vmalloc backed addresses for dma coherent allocations > and uses the common helpers. Properly handle them to unbreak Xen on > ARM platforms. > > Fixes: 1b65c4e5a9af ("swiotlb-xen: use xen_alloc/free_coherent_pages") > Signed-off-by: Roman Skakun <roman_skakun@epam.com> > Reviewed-by: Andrii Anisov <andrii_anisov@epam.com> > [hch: split the patch, renamed the helpers] > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > kernel/dma/ops_helpers.c | 12 ++++++++++-- > 1 file changed, 10 insertions(+), 2 deletions(-) > > diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c > index 910ae69cae77..af4a6ef48ce0 100644 > --- a/kernel/dma/ops_helpers.c > +++ b/kernel/dma/ops_helpers.c > @@ -5,6 +5,13 @@ > */ > #include <linux/dma-map-ops.h> > > +static struct page *dma_common_vaddr_to_page(void *cpu_addr) > +{ > + if (is_vmalloc_addr(cpu_addr)) > + return vmalloc_to_page(cpu_addr); > + return virt_to_page(cpu_addr); > +} > + > /* > * Create scatter-list for the already allocated DMA buffer. > */ > @@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, > void *cpu_addr, dma_addr_t dma_addr, size_t size, > unsigned long attrs) > { > - struct page *page = virt_to_page(cpu_addr); > + struct page *page = dma_common_vaddr_to_page(cpu_addr); > int ret; > > ret = sg_alloc_table(sgt, 1, GFP_KERNEL); > @@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, > unsigned long user_count = vma_pages(vma); > unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; > unsigned long off = vma->vm_pgoff; > + struct page *page = dma_common_vaddr_to_page(cpu_addr); > int ret = -ENXIO; > > vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); > @@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, > return -ENXIO; > > return remap_pfn_range(vma, vma->vm_start, > - page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff, > + page_to_pfn(page) + vma->vm_pgoff, > user_count << PAGE_SHIFT, vma->vm_page_prot); > #else > return -ENXIO; > -- > 2.30.2 > -- Best Regards, Roman.
On Fri, 16 Jul 2021, Roman Skakun wrote: > > Technically this looks good. But given that exposing a helper > > that does either vmalloc_to_page or virt_to_page is one of the > > never ending MM discussions I don't want to get into that discussion > > and just keep it local in the DMA code. > > > > Are you fine with me applying this version? > > Looks good to me, thanks! > But, Stefano asked me about using created helper in the > xen_swiotlb_free_coherent() > and I created a patch according to this mention. > > We can merge this patch and create a new one for > xen_swiotlb_free_coherent() later. Yeah, no worries, I didn't know that exposing dma_common_vaddr_to_page was problematic. This patch is fine by me. > пт, 16 июл. 2021 г. в 12:35, Christoph Hellwig <hch@lst.de>: > > > > Technically this looks good. But given that exposing a helper > > that does either vmalloc_to_page or virt_to_page is one of the > > never ending MM discussions I don't want to get into that discussion > > and just keep it local in the DMA code. > > > > Are you fine with me applying this version? > > > > --- > > From 40ac971eab89330d6153e7721e88acd2d98833f9 Mon Sep 17 00:00:00 2001 > > From: Roman Skakun <Roman_Skakun@epam.com> > > Date: Fri, 16 Jul 2021 11:39:34 +0300 > > Subject: dma-mapping: handle vmalloc addresses in > > dma_common_{mmap,get_sgtable} > > > > xen-swiotlb can use vmalloc backed addresses for dma coherent allocations > > and uses the common helpers. Properly handle them to unbreak Xen on > > ARM platforms. > > > > Fixes: 1b65c4e5a9af ("swiotlb-xen: use xen_alloc/free_coherent_pages") > > Signed-off-by: Roman Skakun <roman_skakun@epam.com> > > Reviewed-by: Andrii Anisov <andrii_anisov@epam.com> > > [hch: split the patch, renamed the helpers] > > Signed-off-by: Christoph Hellwig <hch@lst.de> > > --- > > kernel/dma/ops_helpers.c | 12 ++++++++++-- > > 1 file changed, 10 insertions(+), 2 deletions(-) > > > > diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c > > index 910ae69cae77..af4a6ef48ce0 100644 > > --- a/kernel/dma/ops_helpers.c > > +++ b/kernel/dma/ops_helpers.c > > @@ -5,6 +5,13 @@ > > */ > > #include <linux/dma-map-ops.h> > > > > +static struct page *dma_common_vaddr_to_page(void *cpu_addr) > > +{ > > + if (is_vmalloc_addr(cpu_addr)) > > + return vmalloc_to_page(cpu_addr); > > + return virt_to_page(cpu_addr); > > +} > > + > > /* > > * Create scatter-list for the already allocated DMA buffer. > > */ > > @@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, > > void *cpu_addr, dma_addr_t dma_addr, size_t size, > > unsigned long attrs) > > { > > - struct page *page = virt_to_page(cpu_addr); > > + struct page *page = dma_common_vaddr_to_page(cpu_addr); > > int ret; > > > > ret = sg_alloc_table(sgt, 1, GFP_KERNEL); > > @@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, > > unsigned long user_count = vma_pages(vma); > > unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; > > unsigned long off = vma->vm_pgoff; > > + struct page *page = dma_common_vaddr_to_page(cpu_addr); > > int ret = -ENXIO; > > > > vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); > > @@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, > > return -ENXIO; > > > > return remap_pfn_range(vma, vma->vm_start, > > - page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff, > > + page_to_pfn(page) + vma->vm_pgoff, > > user_count << PAGE_SHIFT, vma->vm_page_prot); > > #else > > return -ENXIO; > > -- > > 2.30.2 > > > > > -- > Best Regards, Roman. >
> We can merge this patch and create a new one for > xen_swiotlb_free_coherent() later. > Yeah, no worries, I didn't know that exposing dma_common_vaddr_to_page > was problematic. > > This patch is fine by me. Good. I'm agreed too. Waiting for Christoph. пт, 16 июл. 2021 г. в 18:29, Stefano Stabellini <sstabellini@kernel.org>: > > On Fri, 16 Jul 2021, Roman Skakun wrote: > > > Technically this looks good. But given that exposing a helper > > > that does either vmalloc_to_page or virt_to_page is one of the > > > never ending MM discussions I don't want to get into that discussion > > > and just keep it local in the DMA code. > > > > > > Are you fine with me applying this version? > > > > Looks good to me, thanks! > > But, Stefano asked me about using created helper in the > > xen_swiotlb_free_coherent() > > and I created a patch according to this mention. > > > > We can merge this patch and create a new one for > > xen_swiotlb_free_coherent() later. > > Yeah, no worries, I didn't know that exposing dma_common_vaddr_to_page > was problematic. > > This patch is fine by me. > > > > пт, 16 июл. 2021 г. в 12:35, Christoph Hellwig <hch@lst.de>: > > > > > > Technically this looks good. But given that exposing a helper > > > that does either vmalloc_to_page or virt_to_page is one of the > > > never ending MM discussions I don't want to get into that discussion > > > and just keep it local in the DMA code. > > > > > > Are you fine with me applying this version? > > > > > > --- > > > From 40ac971eab89330d6153e7721e88acd2d98833f9 Mon Sep 17 00:00:00 2001 > > > From: Roman Skakun <Roman_Skakun@epam.com> > > > Date: Fri, 16 Jul 2021 11:39:34 +0300 > > > Subject: dma-mapping: handle vmalloc addresses in > > > dma_common_{mmap,get_sgtable} > > > > > > xen-swiotlb can use vmalloc backed addresses for dma coherent allocations > > > and uses the common helpers. Properly handle them to unbreak Xen on > > > ARM platforms. > > > > > > Fixes: 1b65c4e5a9af ("swiotlb-xen: use xen_alloc/free_coherent_pages") > > > Signed-off-by: Roman Skakun <roman_skakun@epam.com> > > > Reviewed-by: Andrii Anisov <andrii_anisov@epam.com> > > > [hch: split the patch, renamed the helpers] > > > Signed-off-by: Christoph Hellwig <hch@lst.de> > > > --- > > > kernel/dma/ops_helpers.c | 12 ++++++++++-- > > > 1 file changed, 10 insertions(+), 2 deletions(-) > > > > > > diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c > > > index 910ae69cae77..af4a6ef48ce0 100644 > > > --- a/kernel/dma/ops_helpers.c > > > +++ b/kernel/dma/ops_helpers.c > > > @@ -5,6 +5,13 @@ > > > */ > > > #include <linux/dma-map-ops.h> > > > > > > +static struct page *dma_common_vaddr_to_page(void *cpu_addr) > > > +{ > > > + if (is_vmalloc_addr(cpu_addr)) > > > + return vmalloc_to_page(cpu_addr); > > > + return virt_to_page(cpu_addr); > > > +} > > > + > > > /* > > > * Create scatter-list for the already allocated DMA buffer. > > > */ > > > @@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, > > > void *cpu_addr, dma_addr_t dma_addr, size_t size, > > > unsigned long attrs) > > > { > > > - struct page *page = virt_to_page(cpu_addr); > > > + struct page *page = dma_common_vaddr_to_page(cpu_addr); > > > int ret; > > > > > > ret = sg_alloc_table(sgt, 1, GFP_KERNEL); > > > @@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, > > > unsigned long user_count = vma_pages(vma); > > > unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; > > > unsigned long off = vma->vm_pgoff; > > > + struct page *page = dma_common_vaddr_to_page(cpu_addr); > > > int ret = -ENXIO; > > > > > > vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); > > > @@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, > > > return -ENXIO; > > > > > > return remap_pfn_range(vma, vma->vm_start, > > > - page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff, > > > + page_to_pfn(page) + vma->vm_pgoff, > > > user_count << PAGE_SHIFT, vma->vm_page_prot); > > > #else > > > return -ENXIO; > > > -- > > > 2.30.2 > > > > > > > > > -- > > Best Regards, Roman. > > -- Best Regards, Roman.
On Sat, Jul 17, 2021 at 11:39:21AM +0300, Roman Skakun wrote: > > We can merge this patch and create a new one for > > xen_swiotlb_free_coherent() later. > > Yeah, no worries, I didn't know that exposing dma_common_vaddr_to_page > > was problematic. > > > > This patch is fine by me. > > Good. I'm agreed too. Waiting for Christoph. Fine with. I've queued up the modified patch.
> Fine with. I've queued up the modified patch. Good. Thanks! > > On Sat, Jul 17, 2021 at 11:39:21AM +0300, Roman Skakun wrote: > > > We can merge this patch and create a new one for > > > xen_swiotlb_free_coherent() later. > > > Yeah, no worries, I didn't know that exposing dma_common_vaddr_to_page > > > was problematic. > > > > > > This patch is fine by me. > > > > Good. I'm agreed too. Waiting for Christoph. > > Fine with. I've queued up the modified patch. -- Best Regards, Roman.
© 2016 - 2024 Red Hat, Inc.