kernel/dma/swiotlb.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-)
When a device performs DMA to a bounce buffer, KMSAN is unaware of
the write and does not mark the data as initialized. When
swiotlb_bounce() later copies the bounce buffer back to the original
buffer, memcpy propagates the uninitialized shadow to the original
buffer, causing false positive uninit-value reports.
Fix this by calling kmsan_unpoison_memory() on the bounce buffer
before copying it back in the DMA_FROM_DEVICE path, so that memcpy
naturally propagates initialized shadow to the destination.
Suggested-by: Alexander Potapenko <glider@google.com>
Link: https://lore.kernel.org/CAG_fn=WUGta-paG1BgsGRoAR+fmuCgh3xo=R3XdzOt_-DqSdHw@mail.gmail.com/
Fixes: 7ade4f10779c ("dma: kmsan: unpoison DMA mappings")
Signed-off-by: Shigeru Yoshida <syoshida@redhat.com>
---
kernel/dma/swiotlb.c | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d8e6f1d889d5..9fd73700ddcf 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -30,6 +30,7 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/kmsan-checks.h>
#include <linux/iommu-helper.h>
#include <linux/init.h>
#include <linux/memblock.h>
@@ -901,10 +902,19 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
local_irq_save(flags);
page = pfn_to_page(pfn);
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_TO_DEVICE) {
+ /*
+ * Ideally, kmsan_check_highmem_page()
+ * could be used here to detect infoleaks,
+ * but callers may map uninitialized buffers
+ * that will be written by the device,
+ * causing false positives.
+ */
memcpy_from_page(vaddr, page, offset, sz);
- else
+ } else {
+ kmsan_unpoison_memory(vaddr, sz);
memcpy_to_page(page, offset, vaddr, sz);
+ }
local_irq_restore(flags);
size -= sz;
@@ -913,8 +923,15 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
offset = 0;
}
} else if (dir == DMA_TO_DEVICE) {
+ /*
+ * Ideally, kmsan_check_memory() could be used here to detect
+ * infoleaks (uninitialized data being sent to device), but
+ * callers may map uninitialized buffers that will be written
+ * by the device, causing false positives.
+ */
memcpy(vaddr, phys_to_virt(orig_addr), size);
} else {
+ kmsan_unpoison_memory(vaddr, size);
memcpy(phys_to_virt(orig_addr), vaddr, size);
}
}
--
2.52.0
On 15.03.2026 09:27, Shigeru Yoshida wrote:
> When a device performs DMA to a bounce buffer, KMSAN is unaware of
> the write and does not mark the data as initialized. When
> swiotlb_bounce() later copies the bounce buffer back to the original
> buffer, memcpy propagates the uninitialized shadow to the original
> buffer, causing false positive uninit-value reports.
>
> Fix this by calling kmsan_unpoison_memory() on the bounce buffer
> before copying it back in the DMA_FROM_DEVICE path, so that memcpy
> naturally propagates initialized shadow to the destination.
>
> Suggested-by: Alexander Potapenko <glider@google.com>
> Link: https://lore.kernel.org/CAG_fn=WUGta-paG1BgsGRoAR+fmuCgh3xo=R3XdzOt_-DqSdHw@mail.gmail.com/
> Fixes: 7ade4f10779c ("dma: kmsan: unpoison DMA mappings")
> Signed-off-by: Shigeru Yoshida <syoshida@redhat.com>
Applied to dma-mapping-fixes. Thanks!
> ---
> kernel/dma/swiotlb.c | 21 +++++++++++++++++++--
> 1 file changed, 19 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index d8e6f1d889d5..9fd73700ddcf 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -30,6 +30,7 @@
> #include <linux/gfp.h>
> #include <linux/highmem.h>
> #include <linux/io.h>
> +#include <linux/kmsan-checks.h>
> #include <linux/iommu-helper.h>
> #include <linux/init.h>
> #include <linux/memblock.h>
> @@ -901,10 +902,19 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
>
> local_irq_save(flags);
> page = pfn_to_page(pfn);
> - if (dir == DMA_TO_DEVICE)
> + if (dir == DMA_TO_DEVICE) {
> + /*
> + * Ideally, kmsan_check_highmem_page()
> + * could be used here to detect infoleaks,
> + * but callers may map uninitialized buffers
> + * that will be written by the device,
> + * causing false positives.
> + */
> memcpy_from_page(vaddr, page, offset, sz);
> - else
> + } else {
> + kmsan_unpoison_memory(vaddr, sz);
> memcpy_to_page(page, offset, vaddr, sz);
> + }
> local_irq_restore(flags);
>
> size -= sz;
> @@ -913,8 +923,15 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
> offset = 0;
> }
> } else if (dir == DMA_TO_DEVICE) {
> + /*
> + * Ideally, kmsan_check_memory() could be used here to detect
> + * infoleaks (uninitialized data being sent to device), but
> + * callers may map uninitialized buffers that will be written
> + * by the device, causing false positives.
> + */
> memcpy(vaddr, phys_to_virt(orig_addr), size);
> } else {
> + kmsan_unpoison_memory(vaddr, size);
> memcpy(phys_to_virt(orig_addr), vaddr, size);
> }
> }
Best regards
--
Marek Szyprowski, PhD
Samsung R&D Institute Poland
© 2016 - 2026 Red Hat, Inc.