From: Leon Romanovsky <leonro@nvidia.com>
Convert the KMSAN DMA handling function from page-based to physical
address-based interface.
The refactoring renames kmsan_handle_dma() parameters from accepting
(struct page *page, size_t offset, size_t size) to (phys_addr_t phys,
size_t size). A PFN_VALID check is added to prevent KMSAN operations
on non-page memory, preventing from non struct page backed address,
As part of this change, support for highmem addresses is implemented
using kmap_local_page() to handle both lowmem and highmem regions
properly. All callers throughout the codebase are updated to use the
new phys_addr_t based interface.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/virtio/virtio_ring.c | 4 ++--
include/linux/kmsan.h | 12 +++++++-----
kernel/dma/mapping.c | 2 +-
mm/kmsan/hooks.c | 36 +++++++++++++++++++++++++++++-------
tools/virtio/linux/kmsan.h | 2 +-
5 files changed, 40 insertions(+), 16 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index f5062061c4084..c147145a65930 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -378,7 +378,7 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
* is initialized by the hardware. Explicitly check/unpoison it
* depending on the direction.
*/
- kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
+ kmsan_handle_dma(sg_phys(sg), sg->length, direction);
*addr = (dma_addr_t)sg_phys(sg);
return 0;
}
@@ -3157,7 +3157,7 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
struct vring_virtqueue *vq = to_vvq(_vq);
if (!vq->use_dma_api) {
- kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
+ kmsan_handle_dma(virt_to_phys(ptr), size, dir);
return (dma_addr_t)virt_to_phys(ptr);
}
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
index 2b1432cc16d59..6f27b9824ef77 100644
--- a/include/linux/kmsan.h
+++ b/include/linux/kmsan.h
@@ -182,8 +182,7 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
/**
* kmsan_handle_dma() - Handle a DMA data transfer.
- * @page: first page of the buffer.
- * @offset: offset of the buffer within the first page.
+ * @phys: physical address of the buffer.
* @size: buffer size.
* @dir: one of possible dma_data_direction values.
*
@@ -191,8 +190,11 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
* * checks the buffer, if it is copied to device;
* * initializes the buffer, if it is copied from device;
* * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ *
+ * The function handles page lookup internally and supports both lowmem
+ * and highmem addresses.
*/
-void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+void kmsan_handle_dma(phys_addr_t phys, size_t size,
enum dma_data_direction dir);
/**
@@ -372,8 +374,8 @@ static inline void kmsan_iounmap_page_range(unsigned long start,
{
}
-static inline void kmsan_handle_dma(struct page *page, size_t offset,
- size_t size, enum dma_data_direction dir)
+static inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
{
}
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 80481a873340a..709405d46b2b4 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -172,7 +172,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
- kmsan_handle_dma(page, offset, size, dir);
+ kmsan_handle_dma(phys, size, dir);
trace_dma_map_phys(dev, phys, addr, size, dir, attrs);
debug_dma_map_phys(dev, phys, size, dir, addr, attrs);
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 97de3d6194f07..eab7912a3bf05 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -336,25 +336,48 @@ static void kmsan_handle_dma_page(const void *addr, size_t size,
}
/* Helper function to handle DMA data transfers. */
-void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+void kmsan_handle_dma(phys_addr_t phys, size_t size,
enum dma_data_direction dir)
{
u64 page_offset, to_go, addr;
+ struct page *page;
+ void *kaddr;
- if (PageHighMem(page))
+ if (!pfn_valid(PHYS_PFN(phys)))
return;
- addr = (u64)page_address(page) + offset;
+
+ page = phys_to_page(phys);
+ page_offset = offset_in_page(phys);
+
/*
* The kernel may occasionally give us adjacent DMA pages not belonging
* to the same allocation. Process them separately to avoid triggering
* internal KMSAN checks.
*/
while (size > 0) {
- page_offset = offset_in_page(addr);
to_go = min(PAGE_SIZE - page_offset, (u64)size);
+
+ if (PageHighMem(page))
+ /* Handle highmem pages using kmap */
+ kaddr = kmap_local_page(page);
+ else
+ /* Lowmem pages can be accessed directly */
+ kaddr = page_address(page);
+
+ addr = (u64)kaddr + page_offset;
kmsan_handle_dma_page((void *)addr, to_go, dir);
- addr += to_go;
+
+ if (PageHighMem(page))
+ kunmap_local(page);
+
+ phys += to_go;
size -= to_go;
+
+ /* Move to next page if needed */
+ if (size > 0) {
+ page = phys_to_page(phys);
+ page_offset = offset_in_page(phys);
+ }
}
}
EXPORT_SYMBOL_GPL(kmsan_handle_dma);
@@ -366,8 +389,7 @@ void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
int i;
for_each_sg(sg, item, nents, i)
- kmsan_handle_dma(sg_page(item), item->offset, item->length,
- dir);
+ kmsan_handle_dma(sg_phys(item), item->length, dir);
}
/* Functions from kmsan-checks.h follow. */
diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h
index 272b5aa285d5a..6cd2e3efd03dc 100644
--- a/tools/virtio/linux/kmsan.h
+++ b/tools/virtio/linux/kmsan.h
@@ -4,7 +4,7 @@
#include <linux/gfp.h>
-inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
enum dma_data_direction dir)
{
}
--
2.50.1
On Mon, Aug 04, 2025 at 03:42:42PM +0300, Leon Romanovsky wrote:
> From: Leon Romanovsky <leonro@nvidia.com>
>
> Convert the KMSAN DMA handling function from page-based to physical
> address-based interface.
>
> The refactoring renames kmsan_handle_dma() parameters from accepting
> (struct page *page, size_t offset, size_t size) to (phys_addr_t phys,
> size_t size). A PFN_VALID check is added to prevent KMSAN operations
> on non-page memory, preventing from non struct page backed address,
>
> As part of this change, support for highmem addresses is implemented
> using kmap_local_page() to handle both lowmem and highmem regions
> properly. All callers throughout the codebase are updated to use the
> new phys_addr_t based interface.
Use the function Matthew pointed at kmap_local_pfn()
Maybe introduce the kmap_local_phys() he suggested too.
> /* Helper function to handle DMA data transfers. */
> -void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
> +void kmsan_handle_dma(phys_addr_t phys, size_t size,
> enum dma_data_direction dir)
> {
> u64 page_offset, to_go, addr;
> + struct page *page;
> + void *kaddr;
>
> - if (PageHighMem(page))
> + if (!pfn_valid(PHYS_PFN(phys)))
> return;
Not needed, the caller must pass in a phys that is kmap
compatible. Maybe just leave a comment. FWIW today this is also not
checking for P2P or DEVICE non-kmap struct pages either, so it should
be fine without checks.
> - addr = (u64)page_address(page) + offset;
> +
> + page = phys_to_page(phys);
> + page_offset = offset_in_page(phys);
> +
> /*
> * The kernel may occasionally give us adjacent DMA pages not belonging
> * to the same allocation. Process them separately to avoid triggering
> * internal KMSAN checks.
> */
> while (size > 0) {
> - page_offset = offset_in_page(addr);
> to_go = min(PAGE_SIZE - page_offset, (u64)size);
> +
> + if (PageHighMem(page))
> + /* Handle highmem pages using kmap */
> + kaddr = kmap_local_page(page);
No need for the PageHighMem() - just always call kmap_local_pfn().
I'd also propose that any debug/sanitizer checks that the passed phys
is valid for kmap (eg pfn valid, not zone_device, etc) should be
inside the kmap code.
Jason
On Thu, Aug 07, 2025 at 09:21:15AM -0300, Jason Gunthorpe wrote:
> On Mon, Aug 04, 2025 at 03:42:42PM +0300, Leon Romanovsky wrote:
> > From: Leon Romanovsky <leonro@nvidia.com>
> >
> > Convert the KMSAN DMA handling function from page-based to physical
> > address-based interface.
> >
> > The refactoring renames kmsan_handle_dma() parameters from accepting
> > (struct page *page, size_t offset, size_t size) to (phys_addr_t phys,
> > size_t size). A PFN_VALID check is added to prevent KMSAN operations
> > on non-page memory, preventing from non struct page backed address,
> >
> > As part of this change, support for highmem addresses is implemented
> > using kmap_local_page() to handle both lowmem and highmem regions
> > properly. All callers throughout the codebase are updated to use the
> > new phys_addr_t based interface.
>
> Use the function Matthew pointed at kmap_local_pfn()
>
> Maybe introduce the kmap_local_phys() he suggested too.
At this point it gives nothing.
>
> > /* Helper function to handle DMA data transfers. */
> > -void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
> > +void kmsan_handle_dma(phys_addr_t phys, size_t size,
> > enum dma_data_direction dir)
> > {
> > u64 page_offset, to_go, addr;
> > + struct page *page;
> > + void *kaddr;
> >
> > - if (PageHighMem(page))
> > + if (!pfn_valid(PHYS_PFN(phys)))
> > return;
>
> Not needed, the caller must pass in a phys that is kmap
> compatible. Maybe just leave a comment. FWIW today this is also not
> checking for P2P or DEVICE non-kmap struct pages either, so it should
> be fine without checks.
It is not true as we will call to kmsan_handle_dma() unconditionally in
dma_map_phys(). The reason to it is that kmsan_handle_dma() is guarded
with debug kconfig options and cost of pfn_valid() can be accommodated
in that case. It gives more clean DMA code.
155 dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
156 enum dma_data_direction dir, unsigned long attrs)
157 {
<...>
187
188 kmsan_handle_dma(phys, size, dir);
189 trace_dma_map_phys(dev, phys, addr, size, dir, attrs);
190 debug_dma_map_phys(dev, phys, size, dir, addr, attrs);
191
192 return addr;
193 }
194 EXPORT_SYMBOL_GPL(dma_map_phys);
So let's keep this patch as is.
Thanks
On Wed, Aug 13, 2025 at 06:07:18PM +0300, Leon Romanovsky wrote:
> > > /* Helper function to handle DMA data transfers. */
> > > -void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
> > > +void kmsan_handle_dma(phys_addr_t phys, size_t size,
> > > enum dma_data_direction dir)
> > > {
> > > u64 page_offset, to_go, addr;
> > > + struct page *page;
> > > + void *kaddr;
> > >
> > > - if (PageHighMem(page))
> > > + if (!pfn_valid(PHYS_PFN(phys)))
> > > return;
> >
> > Not needed, the caller must pass in a phys that is kmap
> > compatible. Maybe just leave a comment. FWIW today this is also not
> > checking for P2P or DEVICE non-kmap struct pages either, so it should
> > be fine without checks.
>
> It is not true as we will call to kmsan_handle_dma() unconditionally in
> dma_map_phys(). The reason to it is that kmsan_handle_dma() is guarded
> with debug kconfig options and cost of pfn_valid() can be accommodated
> in that case. It gives more clean DMA code.
Then check attrs here, not pfn_valid.
> So let's keep this patch as is.
Still need to fix the remarks you clipped, do not check PageHighMem
just call kmap_local_pfn(). All thie PageHighMem stuff is new to this
patch and should not be here, it is the wrong way to use highmem.
Jason
On Thu, Aug 14, 2025 at 09:13:16AM -0300, Jason Gunthorpe wrote:
> On Wed, Aug 13, 2025 at 06:07:18PM +0300, Leon Romanovsky wrote:
> > > > /* Helper function to handle DMA data transfers. */
> > > > -void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
> > > > +void kmsan_handle_dma(phys_addr_t phys, size_t size,
> > > > enum dma_data_direction dir)
> > > > {
> > > > u64 page_offset, to_go, addr;
> > > > + struct page *page;
> > > > + void *kaddr;
> > > >
> > > > - if (PageHighMem(page))
> > > > + if (!pfn_valid(PHYS_PFN(phys)))
> > > > return;
> > >
> > > Not needed, the caller must pass in a phys that is kmap
> > > compatible. Maybe just leave a comment. FWIW today this is also not
> > > checking for P2P or DEVICE non-kmap struct pages either, so it should
> > > be fine without checks.
> >
> > It is not true as we will call to kmsan_handle_dma() unconditionally in
> > dma_map_phys(). The reason to it is that kmsan_handle_dma() is guarded
> > with debug kconfig options and cost of pfn_valid() can be accommodated
> > in that case. It gives more clean DMA code.
>
> Then check attrs here, not pfn_valid.
attrs are not available in kmsan_handle_dma(). I can add it if you prefer.
>
> > So let's keep this patch as is.
>
> Still need to fix the remarks you clipped, do not check PageHighMem
> just call kmap_local_pfn(). All thie PageHighMem stuff is new to this
> patch and should not be here, it is the wrong way to use highmem.
Sure, thanks
>
> Jason
>
On Thu, Aug 14, 2025 at 03:35:06PM +0300, Leon Romanovsky wrote: > > Then check attrs here, not pfn_valid. > > attrs are not available in kmsan_handle_dma(). I can add it if you prefer. That makes more sense to the overall design. The comments I gave before were driving at a promise to never try to touch a struct page for ATTR_MMIO and think this should be comphrensive to never touching a struct page even if pfnvalid. > > > So let's keep this patch as is. > > > > Still need to fix the remarks you clipped, do not check PageHighMem > > just call kmap_local_pfn(). All thie PageHighMem stuff is new to this > > patch and should not be here, it is the wrong way to use highmem. > > Sure, thanks I am wondering if there is some reason it was written like this in the first place. Maybe we can't even do kmap here.. So perhaps if there is not a strong reason to change it just continue to check pagehighmem and fail. if (!(attrs & ATTR_MMIO) && PageHighMem(phys_to_page(phys))) return; Jason
On Thu, Aug 14, 2025 at 09:44:48AM -0300, Jason Gunthorpe wrote:
> On Thu, Aug 14, 2025 at 03:35:06PM +0300, Leon Romanovsky wrote:
> > > Then check attrs here, not pfn_valid.
> >
> > attrs are not available in kmsan_handle_dma(). I can add it if you prefer.
>
> That makes more sense to the overall design. The comments I gave
> before were driving at a promise to never try to touch a struct page
> for ATTR_MMIO and think this should be comphrensive to never touching
> a struct page even if pfnvalid.
>
> > > > So let's keep this patch as is.
> > >
> > > Still need to fix the remarks you clipped, do not check PageHighMem
> > > just call kmap_local_pfn(). All thie PageHighMem stuff is new to this
> > > patch and should not be here, it is the wrong way to use highmem.
> >
> > Sure, thanks
>
> I am wondering if there is some reason it was written like this in the
> first place. Maybe we can't even do kmap here.. So perhaps if there is
> not a strong reason to change it just continue to check pagehighmem
> and fail.
>
> if (!(attrs & ATTR_MMIO) && PageHighMem(phys_to_page(phys)))
> return;
Does this version good enough? There is no need to call to
kmap_local_pfn() if we prevent PageHighMem pages.
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index eab7912a3bf0..d9cf70f4159c 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -337,13 +337,13 @@ static void kmsan_handle_dma_page(const void *addr, size_t size,
/* Helper function to handle DMA data transfers. */
void kmsan_handle_dma(phys_addr_t phys, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, unsigned long attrs)
{
u64 page_offset, to_go, addr;
struct page *page;
void *kaddr;
- if (!pfn_valid(PHYS_PFN(phys)))
+ if ((attrs & ATTR_MMIO) || PageHighMem(phys_to_page(phys)))
return;
page = phys_to_page(phys);
@@ -357,19 +357,12 @@ void kmsan_handle_dma(phys_addr_t phys, size_t size,
while (size > 0) {
to_go = min(PAGE_SIZE - page_offset, (u64)size);
- if (PageHighMem(page))
- /* Handle highmem pages using kmap */
- kaddr = kmap_local_page(page);
- else
- /* Lowmem pages can be accessed directly */
- kaddr = page_address(page);
+ /* Lowmem pages can be accessed directly */
+ kaddr = page_address(page);
addr = (u64)kaddr + page_offset;
kmsan_handle_dma_page((void *)addr, to_go, dir);
- if (PageHighMem(page))
- kunmap_local(page);
-
phys += to_go;
size -= to_go;
(END)
>
> Jason
>
On Thu, Aug 14, 2025 at 04:31:06PM +0300, Leon Romanovsky wrote:
> On Thu, Aug 14, 2025 at 09:44:48AM -0300, Jason Gunthorpe wrote:
> > On Thu, Aug 14, 2025 at 03:35:06PM +0300, Leon Romanovsky wrote:
> > > > Then check attrs here, not pfn_valid.
> > >
> > > attrs are not available in kmsan_handle_dma(). I can add it if you prefer.
> >
> > That makes more sense to the overall design. The comments I gave
> > before were driving at a promise to never try to touch a struct page
> > for ATTR_MMIO and think this should be comphrensive to never touching
> > a struct page even if pfnvalid.
> >
> > > > > So let's keep this patch as is.
> > > >
> > > > Still need to fix the remarks you clipped, do not check PageHighMem
> > > > just call kmap_local_pfn(). All thie PageHighMem stuff is new to this
> > > > patch and should not be here, it is the wrong way to use highmem.
> > >
> > > Sure, thanks
> >
> > I am wondering if there is some reason it was written like this in the
> > first place. Maybe we can't even do kmap here.. So perhaps if there is
> > not a strong reason to change it just continue to check pagehighmem
> > and fail.
> >
> > if (!(attrs & ATTR_MMIO) && PageHighMem(phys_to_page(phys)))
> > return;
>
> Does this version good enough? There is no need to call to
> kmap_local_pfn() if we prevent PageHighMem pages.
Why make the rest of the changes though, isn't it just:
if (PageHighMem(page))
return;
Becomes:
if (attrs & ATTR_MMIO))
return;
page = phys_to_page(phys);
if (PageHighMem(page))
return;
Leave the rest as is?
Jason
© 2016 - 2025 Red Hat, Inc.