drivers/hv/vmbus_drv.c | 90 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 89 insertions(+), 1 deletion(-)
In CVM(Confidential VM), system memory is encrypted
by default. Device drivers typically use the swiotlb
bounce buffer for DMA memory, which is decrypted
and shared between the guest and host. Confidential
Vmbus, however, supports a confidential channel
that employs encrypted memory for the Vmbus ring
buffer and external DMA memory. The support for
the confidential ring buffer has already been
integrated.
In CVM, device drivers usually employ the standard
DMA API to map DMA memory with the bounce buffer,
which remains transparent to the device driver.
For external DMA memory support, Hyper-V specific
DMA operations are introduced, bypassing the bounce
buffer when the confidential external memory flag
is set. These DMA operations might also be reused
for TDISP devices in the future, which also support
DMA operations with encrypted memory.
The DMA operations used are global architecture
DMA operations (for details, see get_arch_dma_ops()
and get_dma_ops()), and there is no need to set up
for each device individually.
Signed-off-by: Tianyu Lan <tiala@microsoft.com>
---
drivers/hv/vmbus_drv.c | 90 +++++++++++++++++++++++++++++++++++++++++-
1 file changed, 89 insertions(+), 1 deletion(-)
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0dc4692b411a..ca31231b2c32 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -39,6 +39,9 @@
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
+#include "../../kernel/dma/direct.h"
+
+extern const struct dma_map_ops *dma_ops;
struct vmbus_dynid {
struct list_head node;
@@ -1429,6 +1432,88 @@ static int vmbus_alloc_synic_and_connect(void)
return -ENOMEM;
}
+
+static bool hyperv_private_memory_dma(struct device *dev)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (hv_dev && hv_dev->channel && hv_dev->channel->co_external_memory)
+ return true;
+ else
+ return false;
+}
+
+static dma_addr_t hyperv_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t phys = page_to_phys(page) + offset;
+
+ if (hyperv_private_memory_dma(dev))
+ return __phys_to_dma(dev, phys);
+ else
+ return dma_direct_map_phys(dev, phys, size, dir, attrs);
+}
+
+static void hyperv_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (!hyperv_private_memory_dma(dev))
+ dma_direct_unmap_phys(dev, dma_handle, size, dir, attrs);
+}
+
+static int hyperv_dma_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *sg;
+ dma_addr_t dma_addr;
+ int i;
+
+ if (hyperv_private_memory_dma(dev)) {
+ for_each_sg(sgl, sg, nelems, i) {
+ dma_addr = __phys_to_dma(dev, sg_phys(sg));
+ sg_dma_address(sg) = dma_addr;
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nelems;
+ } else {
+ return dma_direct_map_sg(dev, sgl, nelems, dir, attrs);
+ }
+}
+
+static void hyperv_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (!hyperv_private_memory_dma(dev))
+ dma_direct_unmap_sg(dev, sgl, nelems, dir, attrs);
+}
+
+static int hyperv_dma_supported(struct device *dev, u64 mask)
+{
+ dev->coherent_dma_mask = mask;
+ return 1;
+}
+
+static size_t hyperv_dma_max_mapping_size(struct device *dev)
+{
+ if (hyperv_private_memory_dma(dev))
+ return SIZE_MAX;
+ else
+ return swiotlb_max_mapping_size(dev);
+}
+
+const struct dma_map_ops hyperv_dma_ops = {
+ .map_page = hyperv_dma_map_page,
+ .unmap_page = hyperv_dma_unmap_page,
+ .map_sg = hyperv_dma_map_sg,
+ .unmap_sg = hyperv_dma_unmap_sg,
+ .dma_supported = hyperv_dma_supported,
+ .max_mapping_size = hyperv_dma_max_mapping_size,
+};
+
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
@@ -1479,8 +1564,11 @@ static int vmbus_bus_init(void)
* doing that on each VP while initializing SynIC's wastes time.
*/
is_confidential = ms_hyperv.confidential_vmbus_available;
- if (is_confidential)
+ if (is_confidential) {
+ dma_ops = &hyperv_dma_ops;
pr_info("Establishing connection to the confidential VMBus\n");
+ }
+
hv_para_set_sint_proxy(!is_confidential);
ret = vmbus_alloc_synic_and_connect();
if (ret)
--
2.50.1
From: Roman Kisel <vdso@hexbites.dev>
Tianyu Lan wrote:
> In CVM(Confidential VM), system memory is encrypted
> by default. Device drivers typically use the swiotlb
> bounce buffer for DMA memory, which is decrypted
> and shared between the guest and host. Confidential
> Vmbus, however, supports a confidential channel
> that employs encrypted memory for the Vmbus ring
> buffer and external DMA memory. The support for
> the confidential ring buffer has already been
> integrated.
>
> In CVM, device drivers usually employ the standard
> DMA API to map DMA memory with the bounce buffer,
> which remains transparent to the device driver.
> For external DMA memory support, Hyper-V specific
> DMA operations are introduced, bypassing the bounce
> buffer when the confidential external memory flag
> is set. These DMA operations might also be reused
> for TDISP devices in the future, which also support
> DMA operations with encrypted memory.
>
> The DMA operations used are global architecture
> DMA operations (for details, see get_arch_dma_ops()
> and get_dma_ops()), and there is no need to set up
> for each device individually.
>
> Signed-off-by: Tianyu Lan <tiala@microsoft.com>
Tinayu,
Looks great to me!
Reviewed-by: Roman Kisel <vdso@hexbites.dev>
P.S. For the inclined reader, here is how the old, only for
storage and not satisfactory in other ways my attempt to solve this:
https://lore.kernel.org/linux-hyperv/20250409000835.285105-6-romank@linux.microsoft.com/
https://lore.kernel.org/linux-hyperv/20250409000835.285105-7-romank@linux.microsoft.com/
Maybe it'd be a good idea to CC folks who provided feedback back then.
> ---
> drivers/hv/vmbus_drv.c | 90 +++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 89 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
> index 0dc4692b411a..ca31231b2c32 100644
> --- a/drivers/hv/vmbus_drv.c
> +++ b/drivers/hv/vmbus_drv.c
> @@ -39,6 +39,9 @@
> #include <clocksource/hyperv_timer.h>
> #include <asm/mshyperv.h>
> #include "hyperv_vmbus.h"
> +#include "../../kernel/dma/direct.h"
> +
> +extern const struct dma_map_ops *dma_ops;
>
> struct vmbus_dynid {
> struct list_head node;
> @@ -1429,6 +1432,88 @@ static int vmbus_alloc_synic_and_connect(void)
> return -ENOMEM;
> }
>
> +
> +static bool hyperv_private_memory_dma(struct device *dev)
> +{
> + struct hv_device *hv_dev = device_to_hv_device(dev);
> +
> + if (hv_dev && hv_dev->channel && hv_dev->channel->co_external_memory)
> + return true;
> + else
> + return false;
> +}
> +
> +static dma_addr_t hyperv_dma_map_page(struct device *dev, struct page *page,
> + unsigned long offset, size_t size,
> + enum dma_data_direction dir,
> + unsigned long attrs)
> +{
> + phys_addr_t phys = page_to_phys(page) + offset;
> +
> + if (hyperv_private_memory_dma(dev))
> + return __phys_to_dma(dev, phys);
> + else
> + return dma_direct_map_phys(dev, phys, size, dir, attrs);
> +}
> +
> +static void hyperv_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
> + size_t size, enum dma_data_direction dir, unsigned long attrs)
> +{
> + if (!hyperv_private_memory_dma(dev))
> + dma_direct_unmap_phys(dev, dma_handle, size, dir, attrs);
> +}
> +
> +static int hyperv_dma_map_sg(struct device *dev, struct scatterlist *sgl,
> + int nelems, enum dma_data_direction dir,
> + unsigned long attrs)
> +{
> + struct scatterlist *sg;
> + dma_addr_t dma_addr;
> + int i;
> +
> + if (hyperv_private_memory_dma(dev)) {
> + for_each_sg(sgl, sg, nelems, i) {
> + dma_addr = __phys_to_dma(dev, sg_phys(sg));
> + sg_dma_address(sg) = dma_addr;
> + sg_dma_len(sg) = sg->length;
> + }
> +
> + return nelems;
> + } else {
> + return dma_direct_map_sg(dev, sgl, nelems, dir, attrs);
> + }
> +}
> +
> +static void hyperv_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
> + int nelems, enum dma_data_direction dir, unsigned long attrs)
> +{
> + if (!hyperv_private_memory_dma(dev))
> + dma_direct_unmap_sg(dev, sgl, nelems, dir, attrs);
> +}
> +
> +static int hyperv_dma_supported(struct device *dev, u64 mask)
> +{
> + dev->coherent_dma_mask = mask;
> + return 1;
> +}
> +
> +static size_t hyperv_dma_max_mapping_size(struct device *dev)
> +{
> + if (hyperv_private_memory_dma(dev))
> + return SIZE_MAX;
> + else
> + return swiotlb_max_mapping_size(dev);
> +}
> +
> +const struct dma_map_ops hyperv_dma_ops = {
> + .map_page = hyperv_dma_map_page,
> + .unmap_page = hyperv_dma_unmap_page,
> + .map_sg = hyperv_dma_map_sg,
> + .unmap_sg = hyperv_dma_unmap_sg,
> + .dma_supported = hyperv_dma_supported,
> + .max_mapping_size = hyperv_dma_max_mapping_size,
> +};
> +
> /*
> * vmbus_bus_init -Main vmbus driver initialization routine.
> *
> @@ -1479,8 +1564,11 @@ static int vmbus_bus_init(void)
> * doing that on each VP while initializing SynIC's wastes time.
> */
> is_confidential = ms_hyperv.confidential_vmbus_available;
> - if (is_confidential)
> + if (is_confidential) {
> + dma_ops = &hyperv_dma_ops;
> pr_info("Establishing connection to the confidential VMBus\n");
> + }
> +
> hv_para_set_sint_proxy(!is_confidential);
> ret = vmbus_alloc_synic_and_connect();
> if (ret)
> --
> 2.50.1
On Thu, Nov 27, 2025 at 1:16 PM <vdso@hexbites.dev> wrote:
>
> From: Roman Kisel <vdso@hexbites.dev>
>
> Tianyu Lan wrote:
>
> > In CVM(Confidential VM), system memory is encrypted
> > by default. Device drivers typically use the swiotlb
> > bounce buffer for DMA memory, which is decrypted
> > and shared between the guest and host. Confidential
> > Vmbus, however, supports a confidential channel
> > that employs encrypted memory for the Vmbus ring
> > buffer and external DMA memory. The support for
> > the confidential ring buffer has already been
> > integrated.
> >
> > In CVM, device drivers usually employ the standard
> > DMA API to map DMA memory with the bounce buffer,
> > which remains transparent to the device driver.
> > For external DMA memory support, Hyper-V specific
> > DMA operations are introduced, bypassing the bounce
> > buffer when the confidential external memory flag
> > is set. These DMA operations might also be reused
> > for TDISP devices in the future, which also support
> > DMA operations with encrypted memory.
> >
> > The DMA operations used are global architecture
> > DMA operations (for details, see get_arch_dma_ops()
> > and get_dma_ops()), and there is no need to set up
> > for each device individually.
> >
> > Signed-off-by: Tianyu Lan <tiala@microsoft.com>
>
> Tinayu,
>
> Looks great to me!
>
> Reviewed-by: Roman Kisel <vdso@hexbites.dev>
>
> P.S. For the inclined reader, here is how the old, only for
> storage and not satisfactory in other ways my attempt to solve this:
>
> https://lore.kernel.org/linux-hyperv/20250409000835.285105-6-romank@linux.microsoft.com/
> https://lore.kernel.org/linux-hyperv/20250409000835.285105-7-romank@linux.microsoft.com/
>
> Maybe it'd be a good idea to CC folks who provided feedback back then.
>
Hi Roman:
Thanks for your review. I will follow your suggestion.
Thanks
> > ---
> > drivers/hv/vmbus_drv.c | 90 +++++++++++++++++++++++++++++++++++++++++-
> > 1 file changed, 89 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
> > index 0dc4692b411a..ca31231b2c32 100644
> > --- a/drivers/hv/vmbus_drv.c
> > +++ b/drivers/hv/vmbus_drv.c
> > @@ -39,6 +39,9 @@
> > #include <clocksource/hyperv_timer.h>
> > #include <asm/mshyperv.h>
> > #include "hyperv_vmbus.h"
> > +#include "../../kernel/dma/direct.h"
> > +
> > +extern const struct dma_map_ops *dma_ops;
> >
> > struct vmbus_dynid {
> > struct list_head node;
> > @@ -1429,6 +1432,88 @@ static int vmbus_alloc_synic_and_connect(void)
> > return -ENOMEM;
> > }
> >
> > +
> > +static bool hyperv_private_memory_dma(struct device *dev)
> > +{
> > + struct hv_device *hv_dev = device_to_hv_device(dev);
> > +
> > + if (hv_dev && hv_dev->channel && hv_dev->channel->co_external_memory)
> > + return true;
> > + else
> > + return false;
> > +}
> > +
> > +static dma_addr_t hyperv_dma_map_page(struct device *dev, struct page *page,
> > + unsigned long offset, size_t size,
> > + enum dma_data_direction dir,
> > + unsigned long attrs)
> > +{
> > + phys_addr_t phys = page_to_phys(page) + offset;
> > +
> > + if (hyperv_private_memory_dma(dev))
> > + return __phys_to_dma(dev, phys);
> > + else
> > + return dma_direct_map_phys(dev, phys, size, dir, attrs);
> > +}
> > +
> > +static void hyperv_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
> > + size_t size, enum dma_data_direction dir, unsigned long attrs)
> > +{
> > + if (!hyperv_private_memory_dma(dev))
> > + dma_direct_unmap_phys(dev, dma_handle, size, dir, attrs);
> > +}
> > +
> > +static int hyperv_dma_map_sg(struct device *dev, struct scatterlist *sgl,
> > + int nelems, enum dma_data_direction dir,
> > + unsigned long attrs)
> > +{
> > + struct scatterlist *sg;
> > + dma_addr_t dma_addr;
> > + int i;
> > +
> > + if (hyperv_private_memory_dma(dev)) {
> > + for_each_sg(sgl, sg, nelems, i) {
> > + dma_addr = __phys_to_dma(dev, sg_phys(sg));
> > + sg_dma_address(sg) = dma_addr;
> > + sg_dma_len(sg) = sg->length;
> > + }
> > +
> > + return nelems;
> > + } else {
> > + return dma_direct_map_sg(dev, sgl, nelems, dir, attrs);
> > + }
> > +}
> > +
> > +static void hyperv_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
> > + int nelems, enum dma_data_direction dir, unsigned long attrs)
> > +{
> > + if (!hyperv_private_memory_dma(dev))
> > + dma_direct_unmap_sg(dev, sgl, nelems, dir, attrs);
> > +}
> > +
> > +static int hyperv_dma_supported(struct device *dev, u64 mask)
> > +{
> > + dev->coherent_dma_mask = mask;
> > + return 1;
> > +}
> > +
> > +static size_t hyperv_dma_max_mapping_size(struct device *dev)
> > +{
> > + if (hyperv_private_memory_dma(dev))
> > + return SIZE_MAX;
> > + else
> > + return swiotlb_max_mapping_size(dev);
> > +}
> > +
> > +const struct dma_map_ops hyperv_dma_ops = {
> > + .map_page = hyperv_dma_map_page,
> > + .unmap_page = hyperv_dma_unmap_page,
> > + .map_sg = hyperv_dma_map_sg,
> > + .unmap_sg = hyperv_dma_unmap_sg,
> > + .dma_supported = hyperv_dma_supported,
> > + .max_mapping_size = hyperv_dma_max_mapping_size,
> > +};
> > +
> > /*
> > * vmbus_bus_init -Main vmbus driver initialization routine.
> > *
> > @@ -1479,8 +1564,11 @@ static int vmbus_bus_init(void)
> > * doing that on each VP while initializing SynIC's wastes time.
> > */
> > is_confidential = ms_hyperv.confidential_vmbus_available;
> > - if (is_confidential)
> > + if (is_confidential) {
> > + dma_ops = &hyperv_dma_ops;
> > pr_info("Establishing connection to the confidential VMBus\n");
> > + }
> > +
> > hv_para_set_sint_proxy(!is_confidential);
> > ret = vmbus_alloc_synic_and_connect();
> > if (ret)
> > --
> > 2.50.1
--
Thanks
Tianyu Lan
© 2016 - 2025 Red Hat, Inc.