CONFIG_XEN_BACKEND is currently set when the host supports Xen,
regardless of the chosen targets. As a consequence, Xen backends can be
enabled even on targets that don't support Xen.
Fix the issue by setting CONFIG_XEN_BACKEND only for targets that
support Xen.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: groug@kaod.org
CC: groug@kaod.org
CC: pbonzini@redhat.com
CC: peter.maydell@linaro.org
CC: rth@twiddle.net
CC: stefanha@redhat.com
---
configure | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/configure b/configure
index 6c21975..6d8f752 100755
--- a/configure
+++ b/configure
@@ -5442,7 +5442,6 @@ if test "$virglrenderer" = "yes" ; then
echo "VIRGL_LIBS=$virgl_libs" >> $config_host_mak
fi
if test "$xen" = "yes" ; then
- echo "CONFIG_XEN_BACKEND=y" >> $config_host_mak
echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> $config_host_mak
if test "$xen_pv_domain_build" = "yes" ; then
echo "CONFIG_XEN_PV_DOMAIN_BUILD=y" >> $config_host_mak
@@ -6028,6 +6027,7 @@ case "$target_name" in
i386|x86_64)
if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then
echo "CONFIG_XEN=y" >> $config_target_mak
+ echo "CONFIG_XEN_BACKEND=y" >> $config_target_mak
if test "$xen_pci_passthrough" = yes; then
echo "CONFIG_XEN_PCI_PASSTHROUGH=y" >> "$config_target_mak"
fi
--
1.9.1
On 14/03/2017 00:55, Stefano Stabellini wrote: > CONFIG_XEN_BACKEND is currently set when the host supports Xen, > regardless of the chosen targets. As a consequence, Xen backends can be > enabled even on targets that don't support Xen. > > Fix the issue by setting CONFIG_XEN_BACKEND only for targets that > support Xen. > > Signed-off-by: Stefano Stabellini <stefano@aporeto.com> > CC: groug@kaod.org > CC: groug@kaod.org > CC: pbonzini@redhat.com > CC: peter.maydell@linaro.org > CC: rth@twiddle.net > CC: stefanha@redhat.com > --- > configure | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/configure b/configure > index 6c21975..6d8f752 100755 > --- a/configure > +++ b/configure > @@ -5442,7 +5442,6 @@ if test "$virglrenderer" = "yes" ; then > echo "VIRGL_LIBS=$virgl_libs" >> $config_host_mak > fi > if test "$xen" = "yes" ; then > - echo "CONFIG_XEN_BACKEND=y" >> $config_host_mak > echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> $config_host_mak > if test "$xen_pv_domain_build" = "yes" ; then > echo "CONFIG_XEN_PV_DOMAIN_BUILD=y" >> $config_host_mak > @@ -6028,6 +6027,7 @@ case "$target_name" in > i386|x86_64) > if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then > echo "CONFIG_XEN=y" >> $config_target_mak > + echo "CONFIG_XEN_BACKEND=y" >> $config_target_mak > if test "$xen_pci_passthrough" = yes; then > echo "CONFIG_XEN_PCI_PASSTHROUGH=y" >> "$config_target_mak" > fi > This messes up a bit the way xen_nic.o and friends are compiled, I think, because they are common-obj-y but they are only found when compiling in the target subdirectories. So you end up building x86_64-softmmu/../hw/net/xen_nic.o If you're unlucky, I believe this can lead to a link failure where a target is building xen_nic.o, while the other tries to link to a partially written object file. I think the files should be changed from common-obj-$(CONFIG_XEN_BACKEND) to common-obj-$(CONFIG_XEN). Then you add to Makefile: CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) CONFIG_USER_ONLY := $(if $(filter %-user,$(TARGET_DIRS)),y) +CONFIG_XEN := $(CONFIG_XEN_BACKEND) CONFIG_ALL=y -include config-all-devices.mak -include config-all-disas.mak The Makefile change ensures that they are built before descending in the target-specific directories. The Makefile.objs change ensures that Xen backends are not enabled on targets that support Xen. Paolo
On Tue, 14 Mar 2017, Paolo Bonzini wrote: > On 14/03/2017 00:55, Stefano Stabellini wrote: > > CONFIG_XEN_BACKEND is currently set when the host supports Xen, > > regardless of the chosen targets. As a consequence, Xen backends can be > > enabled even on targets that don't support Xen. > > > > Fix the issue by setting CONFIG_XEN_BACKEND only for targets that > > support Xen. > > > > Signed-off-by: Stefano Stabellini <stefano@aporeto.com> > > CC: groug@kaod.org > > CC: groug@kaod.org > > CC: pbonzini@redhat.com > > CC: peter.maydell@linaro.org > > CC: rth@twiddle.net > > CC: stefanha@redhat.com > > --- > > configure | 2 +- > > 1 file changed, 1 insertion(+), 1 deletion(-) > > > > diff --git a/configure b/configure > > index 6c21975..6d8f752 100755 > > --- a/configure > > +++ b/configure > > @@ -5442,7 +5442,6 @@ if test "$virglrenderer" = "yes" ; then > > echo "VIRGL_LIBS=$virgl_libs" >> $config_host_mak > > fi > > if test "$xen" = "yes" ; then > > - echo "CONFIG_XEN_BACKEND=y" >> $config_host_mak > > echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> $config_host_mak > > if test "$xen_pv_domain_build" = "yes" ; then > > echo "CONFIG_XEN_PV_DOMAIN_BUILD=y" >> $config_host_mak > > @@ -6028,6 +6027,7 @@ case "$target_name" in > > i386|x86_64) > > if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then > > echo "CONFIG_XEN=y" >> $config_target_mak > > + echo "CONFIG_XEN_BACKEND=y" >> $config_target_mak > > if test "$xen_pci_passthrough" = yes; then > > echo "CONFIG_XEN_PCI_PASSTHROUGH=y" >> "$config_target_mak" > > fi > > > > This messes up a bit the way xen_nic.o and friends are compiled, I > think, because they are common-obj-y but they are only found when > compiling in the target subdirectories. So you end up building > x86_64-softmmu/../hw/net/xen_nic.o > > If you're unlucky, I believe this can lead to a link failure where a > target is building xen_nic.o, while the other tries to link to a > partially written object file. > > I think the files should be changed from > common-obj-$(CONFIG_XEN_BACKEND) to common-obj-$(CONFIG_XEN). > > The Makefile.objs change ensures that Xen backends are not enabled on > targets that support Xen. Actually I thought about doing that instead of the configure change. It makes sense to me. > Then you add to Makefile: > > CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) > CONFIG_USER_ONLY := $(if $(filter %-user,$(TARGET_DIRS)),y) > +CONFIG_XEN := $(CONFIG_XEN_BACKEND) > CONFIG_ALL=y > -include config-all-devices.mak > -include config-all-disas.mak > > The Makefile change ensures that they are built before descending in the > target-specific directories. But I don't understand this. Please correct me if I am wrong, but this change looks like it would end up setting CONFIG_XEN every time that CONFIG_XEN_BACKEND is set. Without the configure change at the top, it would end up setting CONFIG_XEN whenever the host supports Xen, even for non-x86 and non-ARM targets. What am I missing?
On Tue, 14 Mar 2017, Stefano Stabellini wrote: > On Tue, 14 Mar 2017, Paolo Bonzini wrote: > > On 14/03/2017 00:55, Stefano Stabellini wrote: > > > CONFIG_XEN_BACKEND is currently set when the host supports Xen, > > > regardless of the chosen targets. As a consequence, Xen backends can be > > > enabled even on targets that don't support Xen. > > > > > > Fix the issue by setting CONFIG_XEN_BACKEND only for targets that > > > support Xen. > > > > > > Signed-off-by: Stefano Stabellini <stefano@aporeto.com> > > > CC: groug@kaod.org > > > CC: groug@kaod.org > > > CC: pbonzini@redhat.com > > > CC: peter.maydell@linaro.org > > > CC: rth@twiddle.net > > > CC: stefanha@redhat.com > > > --- > > > configure | 2 +- > > > 1 file changed, 1 insertion(+), 1 deletion(-) > > > > > > diff --git a/configure b/configure > > > index 6c21975..6d8f752 100755 > > > --- a/configure > > > +++ b/configure > > > @@ -5442,7 +5442,6 @@ if test "$virglrenderer" = "yes" ; then > > > echo "VIRGL_LIBS=$virgl_libs" >> $config_host_mak > > > fi > > > if test "$xen" = "yes" ; then > > > - echo "CONFIG_XEN_BACKEND=y" >> $config_host_mak > > > echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> $config_host_mak > > > if test "$xen_pv_domain_build" = "yes" ; then > > > echo "CONFIG_XEN_PV_DOMAIN_BUILD=y" >> $config_host_mak > > > @@ -6028,6 +6027,7 @@ case "$target_name" in > > > i386|x86_64) > > > if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then > > > echo "CONFIG_XEN=y" >> $config_target_mak > > > + echo "CONFIG_XEN_BACKEND=y" >> $config_target_mak > > > if test "$xen_pci_passthrough" = yes; then > > > echo "CONFIG_XEN_PCI_PASSTHROUGH=y" >> "$config_target_mak" > > > fi > > > > > > > This messes up a bit the way xen_nic.o and friends are compiled, I > > think, because they are common-obj-y but they are only found when > > compiling in the target subdirectories. So you end up building > > x86_64-softmmu/../hw/net/xen_nic.o > > > > If you're unlucky, I believe this can lead to a link failure where a > > target is building xen_nic.o, while the other tries to link to a > > partially written object file. > > > > I think the files should be changed from > > common-obj-$(CONFIG_XEN_BACKEND) to common-obj-$(CONFIG_XEN). > > > > The Makefile.objs change ensures that Xen backends are not enabled on > > targets that support Xen. > > Actually I thought about doing that instead of the configure change. It > makes sense to me. > > > > Then you add to Makefile: > > > > CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) > > CONFIG_USER_ONLY := $(if $(filter %-user,$(TARGET_DIRS)),y) > > +CONFIG_XEN := $(CONFIG_XEN_BACKEND) > > CONFIG_ALL=y > > -include config-all-devices.mak > > -include config-all-disas.mak > > > > The Makefile change ensures that they are built before descending in the > > target-specific directories. > > But I don't understand this. Please correct me if I am wrong, but this > change looks like it would end up setting CONFIG_XEN every time that > CONFIG_XEN_BACKEND is set. Without the configure change at the top, it > would end up setting CONFIG_XEN whenever the host supports Xen, even for > non-x86 and non-ARM targets. What am I missing? FYI I made the following changes: diff --git a/Makefile b/Makefile index 1c4c04f..b246138 100644 --- a/Makefile +++ b/Makefile @@ -26,6 +26,7 @@ endif CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) CONFIG_USER_ONLY := $(if $(filter %-user,$(TARGET_DIRS)),y) +CONFIG_XEN := $(CONFIG_XEN_BACKEND) CONFIG_ALL=y -include config-all-devices.mak -include config-all-disas.mak diff --git a/hw/block/Makefile.objs b/hw/block/Makefile.objs index d4c3ab7..e0ed980 100644 --- a/hw/block/Makefile.objs +++ b/hw/block/Makefile.objs @@ -4,7 +4,7 @@ common-obj-$(CONFIG_SSI_M25P80) += m25p80.o common-obj-$(CONFIG_NAND) += nand.o common-obj-$(CONFIG_PFLASH_CFI01) += pflash_cfi01.o common-obj-$(CONFIG_PFLASH_CFI02) += pflash_cfi02.o -common-obj-$(CONFIG_XEN_BACKEND) += xen_disk.o +common-obj-$(CONFIG_XEN) += xen_disk.o common-obj-$(CONFIG_ECC) += ecc.o common-obj-$(CONFIG_ONENAND) += onenand.o common-obj-$(CONFIG_NVME_PCI) += nvme.o diff --git a/hw/char/Makefile.objs b/hw/char/Makefile.objs index 6ea76fe..725fdc4 100644 --- a/hw/char/Makefile.objs +++ b/hw/char/Makefile.objs @@ -7,7 +7,7 @@ common-obj-$(CONFIG_SERIAL_ISA) += serial-isa.o common-obj-$(CONFIG_SERIAL_PCI) += serial-pci.o common-obj-$(CONFIG_VIRTIO) += virtio-console.o common-obj-$(CONFIG_XILINX) += xilinx_uartlite.o -common-obj-$(CONFIG_XEN_BACKEND) += xen_console.o +common-obj-$(CONFIG_XEN) += xen_console.o common-obj-$(CONFIG_CADENCE) += cadence_uart.o obj-$(CONFIG_EXYNOS4) += exynos4210_uart.o diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs index 063889b..3d02e8b 100644 --- a/hw/display/Makefile.objs +++ b/hw/display/Makefile.objs @@ -5,7 +5,7 @@ common-obj-$(CONFIG_JAZZ_LED) += jazz_led.o common-obj-$(CONFIG_PL110) += pl110.o common-obj-$(CONFIG_SSD0303) += ssd0303.o common-obj-$(CONFIG_SSD0323) += ssd0323.o -common-obj-$(CONFIG_XEN_BACKEND) += xenfb.o +common-obj-$(CONFIG_XEN) += xenfb.o common-obj-$(CONFIG_VGA_PCI) += vga-pci.o common-obj-$(CONFIG_VGA_ISA) += vga-isa.o diff --git a/hw/net/Makefile.objs b/hw/net/Makefile.objs index 610ed3e..6a95d92 100644 --- a/hw/net/Makefile.objs +++ b/hw/net/Makefile.objs @@ -1,5 +1,5 @@ common-obj-$(CONFIG_DP8393X) += dp8393x.o -common-obj-$(CONFIG_XEN_BACKEND) += xen_nic.o +common-obj-$(CONFIG_XEN) += xen_nic.o # PCI network cards common-obj-$(CONFIG_NE2000_PCI) += ne2000.o diff --git a/hw/usb/Makefile.objs b/hw/usb/Makefile.objs index 98b5c9d..5958be8 100644 --- a/hw/usb/Makefile.objs +++ b/hw/usb/Makefile.objs @@ -40,5 +40,5 @@ common-obj-$(CONFIG_USB_REDIR) += redirect.o quirks.o common-obj-y += $(patsubst %,host-%.o,$(HOST_USB)) ifeq ($(CONFIG_USB_LIBUSB),y) -common-obj-$(CONFIG_XEN_BACKEND) += xen-usb.o +common-obj-$(CONFIG_XEN) += xen-usb.o endif diff --git a/hw/xen/Makefile.objs b/hw/xen/Makefile.objs index 591cdc2..4be3ec9 100644 --- a/hw/xen/Makefile.objs +++ b/hw/xen/Makefile.objs @@ -1,5 +1,5 @@ # xen backend driver support -common-obj-$(CONFIG_XEN_BACKEND) += xen_backend.o xen_devconfig.o xen_pvdev.o +common-obj-$(CONFIG_XEN) += xen_backend.o xen_devconfig.o xen_pvdev.o obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen-host-pci-device.o obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt.o xen_pt_config_init.o xen_pt_graphics.o xen_pt_msi.o I applied the rest of the series and did ./configure --enable-virtfs --enable-xen; make -j5 I got the following error: LINK aarch64-softmmu/qemu-system-aarch64 ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': /local/qemu/hw/9pfs/xen-9p-backend.c:387: undefined reference to `xenstore_write_be_str' /local/qemu/hw/9pfs/xen-9p-backend.c:388: undefined reference to `xenstore_write_be_int' ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_connect': /local/qemu/hw/9pfs/xen-9p-backend.c:292: undefined reference to `xenstore_read_fe_int' /local/qemu/hw/9pfs/xen-9p-backend.c:313: undefined reference to `xenstore_read_fe_int' /local/qemu/hw/9pfs/xen-9p-backend.c:357: undefined reference to `xen_pv_printf' /local/qemu/hw/9pfs/xen-9p-backend.c:307: undefined reference to `xenstore_read_fe_int' /local/qemu/hw/9pfs/xen-9p-backend.c:362: undefined reference to `xenstore_read_be_str' /local/qemu/hw/9pfs/xen-9p-backend.c:363: undefined reference to `xenstore_read_be_str' /local/qemu/hw/9pfs/xen-9p-backend.c:366: undefined reference to `xenstore_read_fe_str' /local/qemu/hw/9pfs/xen-9p-backend.c:353: undefined reference to `xen_pv_printf' ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': /local/qemu/hw/9pfs/xen-9p-backend.c:389: undefined reference to `xenstore_write_be_int' collect2: error: ld returned 1 exit status make[1]: *** [qemu-system-arm] Error 1 make: *** [subdir-arm-softmmu] Error 2 ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': /local/qemu/hw/9pfs/xen-9p-backend.c:387: undefined reference to `xenstore_write_be_str' /local/qemu/hw/9pfs/xen-9p-backend.c:388: undefined reference to `xenstore_write_be_int' ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_connect': /local/qemu/hw/9pfs/xen-9p-backend.c:292: undefined reference to `xenstore_read_fe_int' /local/qemu/hw/9pfs/xen-9p-backend.c:313: undefined reference to `xenstore_read_fe_int' /local/qemu/hw/9pfs/xen-9p-backend.c:357: undefined reference to `xen_pv_printf' /local/qemu/hw/9pfs/xen-9p-backend.c:307: undefined reference to `xenstore_read_fe_int' /local/qemu/hw/9pfs/xen-9p-backend.c:362: undefined reference to `xenstore_read_be_str' /local/qemu/hw/9pfs/xen-9p-backend.c:363: undefined reference to `xenstore_read_be_str' /local/qemu/hw/9pfs/xen-9p-backend.c:366: undefined reference to `xenstore_read_fe_str' /local/qemu/hw/9pfs/xen-9p-backend.c:353: undefined reference to `xen_pv_printf' ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': /local/qemu/hw/9pfs/xen-9p-backend.c:389: undefined reference to `xenstore_write_be_int' collect2: error: ld returned 1 exit status make[1]: *** [qemu-system-aarch64] Error 1 make: *** [subdir-aarch64-softmmu] Error 2
On Tue, 14 Mar 2017 13:23:09 -0700 (PDT) Stefano Stabellini <sstabellini@kernel.org> wrote: > On Tue, 14 Mar 2017, Stefano Stabellini wrote: > > On Tue, 14 Mar 2017, Paolo Bonzini wrote: > > > On 14/03/2017 00:55, Stefano Stabellini wrote: > > > > CONFIG_XEN_BACKEND is currently set when the host supports Xen, > > > > regardless of the chosen targets. As a consequence, Xen backends can be > > > > enabled even on targets that don't support Xen. > > > > > > > > Fix the issue by setting CONFIG_XEN_BACKEND only for targets that > > > > support Xen. > > > > > > > > Signed-off-by: Stefano Stabellini <stefano@aporeto.com> > > > > CC: groug@kaod.org > > > > CC: groug@kaod.org > > > > CC: pbonzini@redhat.com > > > > CC: peter.maydell@linaro.org > > > > CC: rth@twiddle.net > > > > CC: stefanha@redhat.com > > > > --- > > > > configure | 2 +- > > > > 1 file changed, 1 insertion(+), 1 deletion(-) > > > > > > > > diff --git a/configure b/configure > > > > index 6c21975..6d8f752 100755 > > > > --- a/configure > > > > +++ b/configure > > > > @@ -5442,7 +5442,6 @@ if test "$virglrenderer" = "yes" ; then > > > > echo "VIRGL_LIBS=$virgl_libs" >> $config_host_mak > > > > fi > > > > if test "$xen" = "yes" ; then > > > > - echo "CONFIG_XEN_BACKEND=y" >> $config_host_mak > > > > echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> $config_host_mak > > > > if test "$xen_pv_domain_build" = "yes" ; then > > > > echo "CONFIG_XEN_PV_DOMAIN_BUILD=y" >> $config_host_mak > > > > @@ -6028,6 +6027,7 @@ case "$target_name" in > > > > i386|x86_64) > > > > if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then > > > > echo "CONFIG_XEN=y" >> $config_target_mak > > > > + echo "CONFIG_XEN_BACKEND=y" >> $config_target_mak > > > > if test "$xen_pci_passthrough" = yes; then > > > > echo "CONFIG_XEN_PCI_PASSTHROUGH=y" >> "$config_target_mak" > > > > fi > > > > > > > > > > This messes up a bit the way xen_nic.o and friends are compiled, I > > > think, because they are common-obj-y but they are only found when > > > compiling in the target subdirectories. So you end up building > > > x86_64-softmmu/../hw/net/xen_nic.o > > > > > > If you're unlucky, I believe this can lead to a link failure where a > > > target is building xen_nic.o, while the other tries to link to a > > > partially written object file. > > > > > > I think the files should be changed from > > > common-obj-$(CONFIG_XEN_BACKEND) to common-obj-$(CONFIG_XEN). > > > > > > The Makefile.objs change ensures that Xen backends are not enabled on > > > targets that support Xen. > > > > Actually I thought about doing that instead of the configure change. It > > makes sense to me. > > > > > > > Then you add to Makefile: > > > > > > CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) > > > CONFIG_USER_ONLY := $(if $(filter %-user,$(TARGET_DIRS)),y) > > > +CONFIG_XEN := $(CONFIG_XEN_BACKEND) > > > CONFIG_ALL=y > > > -include config-all-devices.mak > > > -include config-all-disas.mak > > > > > > The Makefile change ensures that they are built before descending in the > > > target-specific directories. > > > > But I don't understand this. Please correct me if I am wrong, but this > > change looks like it would end up setting CONFIG_XEN every time that > > CONFIG_XEN_BACKEND is set. Without the configure change at the top, it > > would end up setting CONFIG_XEN whenever the host supports Xen, even for > > non-x86 and non-ARM targets. What am I missing? > > FYI I made the following changes: > > diff --git a/Makefile b/Makefile > index 1c4c04f..b246138 100644 > --- a/Makefile > +++ b/Makefile > @@ -26,6 +26,7 @@ endif > > CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) > CONFIG_USER_ONLY := $(if $(filter %-user,$(TARGET_DIRS)),y) > +CONFIG_XEN := $(CONFIG_XEN_BACKEND) > CONFIG_ALL=y > -include config-all-devices.mak > -include config-all-disas.mak > diff --git a/hw/block/Makefile.objs b/hw/block/Makefile.objs > index d4c3ab7..e0ed980 100644 > --- a/hw/block/Makefile.objs > +++ b/hw/block/Makefile.objs > @@ -4,7 +4,7 @@ common-obj-$(CONFIG_SSI_M25P80) += m25p80.o > common-obj-$(CONFIG_NAND) += nand.o > common-obj-$(CONFIG_PFLASH_CFI01) += pflash_cfi01.o > common-obj-$(CONFIG_PFLASH_CFI02) += pflash_cfi02.o > -common-obj-$(CONFIG_XEN_BACKEND) += xen_disk.o > +common-obj-$(CONFIG_XEN) += xen_disk.o > common-obj-$(CONFIG_ECC) += ecc.o > common-obj-$(CONFIG_ONENAND) += onenand.o > common-obj-$(CONFIG_NVME_PCI) += nvme.o > diff --git a/hw/char/Makefile.objs b/hw/char/Makefile.objs > index 6ea76fe..725fdc4 100644 > --- a/hw/char/Makefile.objs > +++ b/hw/char/Makefile.objs > @@ -7,7 +7,7 @@ common-obj-$(CONFIG_SERIAL_ISA) += serial-isa.o > common-obj-$(CONFIG_SERIAL_PCI) += serial-pci.o > common-obj-$(CONFIG_VIRTIO) += virtio-console.o > common-obj-$(CONFIG_XILINX) += xilinx_uartlite.o > -common-obj-$(CONFIG_XEN_BACKEND) += xen_console.o > +common-obj-$(CONFIG_XEN) += xen_console.o > common-obj-$(CONFIG_CADENCE) += cadence_uart.o > > obj-$(CONFIG_EXYNOS4) += exynos4210_uart.o > diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs > index 063889b..3d02e8b 100644 > --- a/hw/display/Makefile.objs > +++ b/hw/display/Makefile.objs > @@ -5,7 +5,7 @@ common-obj-$(CONFIG_JAZZ_LED) += jazz_led.o > common-obj-$(CONFIG_PL110) += pl110.o > common-obj-$(CONFIG_SSD0303) += ssd0303.o > common-obj-$(CONFIG_SSD0323) += ssd0323.o > -common-obj-$(CONFIG_XEN_BACKEND) += xenfb.o > +common-obj-$(CONFIG_XEN) += xenfb.o > > common-obj-$(CONFIG_VGA_PCI) += vga-pci.o > common-obj-$(CONFIG_VGA_ISA) += vga-isa.o > diff --git a/hw/net/Makefile.objs b/hw/net/Makefile.objs > index 610ed3e..6a95d92 100644 > --- a/hw/net/Makefile.objs > +++ b/hw/net/Makefile.objs > @@ -1,5 +1,5 @@ > common-obj-$(CONFIG_DP8393X) += dp8393x.o > -common-obj-$(CONFIG_XEN_BACKEND) += xen_nic.o > +common-obj-$(CONFIG_XEN) += xen_nic.o > > # PCI network cards > common-obj-$(CONFIG_NE2000_PCI) += ne2000.o > diff --git a/hw/usb/Makefile.objs b/hw/usb/Makefile.objs > index 98b5c9d..5958be8 100644 > --- a/hw/usb/Makefile.objs > +++ b/hw/usb/Makefile.objs > @@ -40,5 +40,5 @@ common-obj-$(CONFIG_USB_REDIR) += redirect.o quirks.o > common-obj-y += $(patsubst %,host-%.o,$(HOST_USB)) > > ifeq ($(CONFIG_USB_LIBUSB),y) > -common-obj-$(CONFIG_XEN_BACKEND) += xen-usb.o > +common-obj-$(CONFIG_XEN) += xen-usb.o > endif > diff --git a/hw/xen/Makefile.objs b/hw/xen/Makefile.objs > index 591cdc2..4be3ec9 100644 > --- a/hw/xen/Makefile.objs > +++ b/hw/xen/Makefile.objs > @@ -1,5 +1,5 @@ > # xen backend driver support > -common-obj-$(CONFIG_XEN_BACKEND) += xen_backend.o xen_devconfig.o xen_pvdev.o > +common-obj-$(CONFIG_XEN) += xen_backend.o xen_devconfig.o xen_pvdev.o > > obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen-host-pci-device.o > obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt.o xen_pt_config_init.o xen_pt_graphics.o xen_pt_msi.o > > I applied the rest of the series and did > > ./configure --enable-virtfs --enable-xen; make -j5 > > I got the following error: > > LINK aarch64-softmmu/qemu-system-aarch64 > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': It looks like hw/9pfs/Makefile.objs may still have: common-obj-$(CONFIG_XEN_BACKEND) += xen-9p-backend.o instead of: common-obj-$(CONFIG_XEN) += xen-9p-backend.o ... which builds like a charm. Cheers. -- Greg > /local/qemu/hw/9pfs/xen-9p-backend.c:387: undefined reference to `xenstore_write_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:388: undefined reference to `xenstore_write_be_int' > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_connect': > /local/qemu/hw/9pfs/xen-9p-backend.c:292: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:313: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:357: undefined reference to `xen_pv_printf' > /local/qemu/hw/9pfs/xen-9p-backend.c:307: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:362: undefined reference to `xenstore_read_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:363: undefined reference to `xenstore_read_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:366: undefined reference to `xenstore_read_fe_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:353: undefined reference to `xen_pv_printf' > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': > /local/qemu/hw/9pfs/xen-9p-backend.c:389: undefined reference to `xenstore_write_be_int' > collect2: error: ld returned 1 exit status > make[1]: *** [qemu-system-arm] Error 1 > make: *** [subdir-arm-softmmu] Error 2 > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': > /local/qemu/hw/9pfs/xen-9p-backend.c:387: undefined reference to `xenstore_write_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:388: undefined reference to `xenstore_write_be_int' > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_connect': > /local/qemu/hw/9pfs/xen-9p-backend.c:292: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:313: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:357: undefined reference to `xen_pv_printf' > /local/qemu/hw/9pfs/xen-9p-backend.c:307: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:362: undefined reference to `xenstore_read_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:363: undefined reference to `xenstore_read_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:366: undefined reference to `xenstore_read_fe_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:353: undefined reference to `xen_pv_printf' > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': > /local/qemu/hw/9pfs/xen-9p-backend.c:389: undefined reference to `xenstore_write_be_int' > collect2: error: ld returned 1 exit status > make[1]: *** [qemu-system-aarch64] Error 1 > make: *** [subdir-aarch64-softmmu] Error 2 >
On 14/03/2017 21:23, Stefano Stabellini wrote: > On Tue, 14 Mar 2017, Stefano Stabellini wrote: >>> Then you add to Makefile: >>> >>> CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) >>> CONFIG_USER_ONLY := $(if $(filter %-user,$(TARGET_DIRS)),y) >>> +CONFIG_XEN := $(CONFIG_XEN_BACKEND) >>> CONFIG_ALL=y >>> -include config-all-devices.mak >>> -include config-all-disas.mak >>> >>> The Makefile change ensures that they are built before descending in the >>> target-specific directories. >> >> But I don't understand this. Please correct me if I am wrong, but this >> change looks like it would end up setting CONFIG_XEN every time that >> CONFIG_XEN_BACKEND is set. Without the configure change at the top, it >> would end up setting CONFIG_XEN whenever the host supports Xen, even for >> non-x86 and non-ARM targets. What am I missing? This CONFIG_XEN assignment applies to the toplevel only, i.e. to files that are built once. Targets will still take CONFIG_XEN from config-target.mak, and it will not be set for non-x86/non-ARM targets. This CONFIG_XEN assignment applies to files that are compiled once. The issue you reported here: > LINK aarch64-softmmu/qemu-system-aarch64 > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': > /local/qemu/hw/9pfs/xen-9p-backend.c:387: undefined reference to `xenstore_write_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:388: undefined reference to `xenstore_write_be_int' is because you need this in patch 9: -common-obj-$(CONFIG_XEN_BACKEND) += xen-9p-backend.o +common-obj-$(CONFIG_XEN) += xen-9p-backend.o > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_connect': > /local/qemu/hw/9pfs/xen-9p-backend.c:292: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:313: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:357: undefined reference to `xen_pv_printf' > /local/qemu/hw/9pfs/xen-9p-backend.c:307: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:362: undefined reference to `xenstore_read_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:363: undefined reference to `xenstore_read_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:366: undefined reference to `xenstore_read_fe_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:353: undefined reference to `xen_pv_printf' > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': > /local/qemu/hw/9pfs/xen-9p-backend.c:389: undefined reference to `xenstore_write_be_int' > collect2: error: ld returned 1 exit status > make[1]: *** [qemu-system-arm] Error 1 > make: *** [subdir-arm-softmmu] Error 2 > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': > /local/qemu/hw/9pfs/xen-9p-backend.c:387: undefined reference to `xenstore_write_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:388: undefined reference to `xenstore_write_be_int' > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_connect': > /local/qemu/hw/9pfs/xen-9p-backend.c:292: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:313: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:357: undefined reference to `xen_pv_printf' > /local/qemu/hw/9pfs/xen-9p-backend.c:307: undefined reference to `xenstore_read_fe_int' > /local/qemu/hw/9pfs/xen-9p-backend.c:362: undefined reference to `xenstore_read_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:363: undefined reference to `xenstore_read_be_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:366: undefined reference to `xenstore_read_fe_str' > /local/qemu/hw/9pfs/xen-9p-backend.c:353: undefined reference to `xen_pv_printf' > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': > /local/qemu/hw/9pfs/xen-9p-backend.c:389: undefined reference to `xenstore_write_be_int' > collect2: error: ld returned 1 exit status > make[1]: *** [qemu-system-aarch64] Error 1 > make: *** [subdir-aarch64-softmmu] Error 2 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > https://lists.xen.org/xen-devel >
On Wed, 15 Mar 2017, Paolo Bonzini wrote: > On 14/03/2017 21:23, Stefano Stabellini wrote: > > On Tue, 14 Mar 2017, Stefano Stabellini wrote: > >>> Then you add to Makefile: > >>> > >>> CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) > >>> CONFIG_USER_ONLY := $(if $(filter %-user,$(TARGET_DIRS)),y) > >>> +CONFIG_XEN := $(CONFIG_XEN_BACKEND) > >>> CONFIG_ALL=y > >>> -include config-all-devices.mak > >>> -include config-all-disas.mak > >>> > >>> The Makefile change ensures that they are built before descending in the > >>> target-specific directories. > >> > >> But I don't understand this. Please correct me if I am wrong, but this > >> change looks like it would end up setting CONFIG_XEN every time that > >> CONFIG_XEN_BACKEND is set. Without the configure change at the top, it > >> would end up setting CONFIG_XEN whenever the host supports Xen, even for > >> non-x86 and non-ARM targets. What am I missing? > > This CONFIG_XEN assignment applies to the toplevel only, i.e. to files > that are built once. Targets will still take CONFIG_XEN from > config-target.mak, and it will not be set for non-x86/non-ARM targets. > This CONFIG_XEN assignment applies to files that are compiled once. > > The issue you reported here: > > > LINK aarch64-softmmu/qemu-system-aarch64 > > ../hw/9pfs/xen-9p-backend.o: In function `xen_9pfs_alloc': > > /local/qemu/hw/9pfs/xen-9p-backend.c:387: undefined reference to `xenstore_write_be_str' > > /local/qemu/hw/9pfs/xen-9p-backend.c:388: undefined reference to `xenstore_write_be_int' > > is because you need this in patch 9: > > -common-obj-$(CONFIG_XEN_BACKEND) += xen-9p-backend.o > +common-obj-$(CONFIG_XEN) += xen-9p-backend.o > /me shakes his head in shame. Thank you for the explanation!
Do not use the ring.h header installed on the system. Instead, import
the header into the QEMU codebase. This avoids problems when QEMU is
built against a Xen version too old to provide all the ring macros.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
---
NB: The new macros have not been committed to Xen yet. Do not apply this
patch until they do.
---
---
hw/block/xen_blkif.h | 2 +-
hw/usb/xen-usb.c | 2 +-
include/hw/xen/io/ring.h | 455 +++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 457 insertions(+), 2 deletions(-)
create mode 100644 include/hw/xen/io/ring.h
diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h
index 3300b6f..3e6e1ea 100644
--- a/hw/block/xen_blkif.h
+++ b/hw/block/xen_blkif.h
@@ -1,7 +1,7 @@
#ifndef XEN_BLKIF_H
#define XEN_BLKIF_H
-#include <xen/io/ring.h>
+#include "hw/xen/io/ring.h"
#include <xen/io/blkif.h>
#include <xen/io/protocols.h>
diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
index 8e676e6..370b3d9 100644
--- a/hw/usb/xen-usb.c
+++ b/hw/usb/xen-usb.c
@@ -33,7 +33,7 @@
#include "qapi/qmp/qint.h"
#include "qapi/qmp/qstring.h"
-#include <xen/io/ring.h>
+#include "hw/xen/io/ring.h"
#include <xen/io/usbif.h>
/*
diff --git a/include/hw/xen/io/ring.h b/include/hw/xen/io/ring.h
new file mode 100644
index 0000000..cf01fc3
--- /dev/null
+++ b/include/hw/xen/io/ring.h
@@ -0,0 +1,455 @@
+/******************************************************************************
+ * ring.h
+ *
+ * Shared producer-consumer ring macros.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Tim Deegan and Andrew Warfield November 2004.
+ */
+
+#ifndef __XEN_PUBLIC_IO_RING_H__
+#define __XEN_PUBLIC_IO_RING_H__
+
+#if __XEN_INTERFACE_VERSION__ < 0x00030208
+#define xen_mb() mb()
+#define xen_rmb() rmb()
+#define xen_wmb() wmb()
+#endif
+
+typedef unsigned int RING_IDX;
+
+/* Round a 32-bit unsigned constant down to the nearest power of two. */
+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
+#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
+#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
+
+/*
+ * Calculate size of a shared ring, given the total available space for the
+ * ring and indexes (_sz), and the name tag of the request/response structure.
+ * A ring contains as many entries as will fit, rounded down to the nearest
+ * power of two (so we can mask with (size-1) to loop around).
+ */
+#define __CONST_RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
+ sizeof(((struct _s##_sring *)0)->ring[0])))
+/*
+ * The same for passing in an actual pointer instead of a name tag.
+ */
+#define __RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
+
+/*
+ * Macros to make the correct C datatypes for a new kind of ring.
+ *
+ * To make a new ring datatype, you need to have two message structures,
+ * let's say request_t, and response_t already defined.
+ *
+ * In a header where you want the ring datatype declared, you then do:
+ *
+ * DEFINE_RING_TYPES(mytag, request_t, response_t);
+ *
+ * These expand out to give you a set of types, as you can see below.
+ * The most important of these are:
+ *
+ * mytag_sring_t - The shared ring.
+ * mytag_front_ring_t - The 'front' half of the ring.
+ * mytag_back_ring_t - The 'back' half of the ring.
+ *
+ * To initialize a ring in your code you need to know the location and size
+ * of the shared memory area (PAGE_SIZE, for instance). To initialise
+ * the front half:
+ *
+ * mytag_front_ring_t front_ring;
+ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
+ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
+ *
+ * Initializing the back follows similarly (note that only the front
+ * initializes the shared ring):
+ *
+ * mytag_back_ring_t back_ring;
+ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
+ */
+
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
+ \
+/* Shared ring entry */ \
+union __name##_sring_entry { \
+ __req_t req; \
+ __rsp_t rsp; \
+}; \
+ \
+/* Shared ring page */ \
+struct __name##_sring { \
+ RING_IDX req_prod, req_event; \
+ RING_IDX rsp_prod, rsp_event; \
+ union { \
+ struct { \
+ uint8_t smartpoll_active; \
+ } netif; \
+ struct { \
+ uint8_t msg; \
+ } tapif_user; \
+ uint8_t pvt_pad[4]; \
+ } pvt; \
+ uint8_t __pad[44]; \
+ union __name##_sring_entry ring[1]; /* variable-length */ \
+}; \
+ \
+/* "Front" end's private variables */ \
+struct __name##_front_ring { \
+ RING_IDX req_prod_pvt; \
+ RING_IDX rsp_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
+/* "Back" end's private variables */ \
+struct __name##_back_ring { \
+ RING_IDX rsp_prod_pvt; \
+ RING_IDX req_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
+/* Syntactic sugar */ \
+typedef struct __name##_sring __name##_sring_t; \
+typedef struct __name##_front_ring __name##_front_ring_t; \
+typedef struct __name##_back_ring __name##_back_ring_t
+
+/*
+ * Macros for manipulating rings.
+ *
+ * FRONT_RING_whatever works on the "front end" of a ring: here
+ * requests are pushed on to the ring and responses taken off it.
+ *
+ * BACK_RING_whatever works on the "back end" of a ring: here
+ * requests are taken off the ring and responses put on.
+ *
+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
+ * This is OK in 1-for-1 request-response situations where the
+ * requestor (front end) never has more than RING_SIZE()-1
+ * outstanding requests.
+ */
+
+/* Initialising empty rings */
+#define SHARED_RING_INIT(_s) do { \
+ (_s)->req_prod = (_s)->rsp_prod = 0; \
+ (_s)->req_event = (_s)->rsp_event = 1; \
+ (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \
+ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
+} while(0)
+
+#define FRONT_RING_INIT(_r, _s, __size) do { \
+ (_r)->req_prod_pvt = 0; \
+ (_r)->rsp_cons = 0; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
+} while (0)
+
+#define BACK_RING_INIT(_r, _s, __size) do { \
+ (_r)->rsp_prod_pvt = 0; \
+ (_r)->req_cons = 0; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
+} while (0)
+
+/* How big is this ring? */
+#define RING_SIZE(_r) \
+ ((_r)->nr_ents)
+
+/* Number of free requests (for use on front side only). */
+#define RING_FREE_REQUESTS(_r) \
+ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
+
+/* Test if there is an empty slot available on the front ring.
+ * (This is only meaningful from the front. )
+ */
+#define RING_FULL(_r) \
+ (RING_FREE_REQUESTS(_r) == 0)
+
+/* Test if there are outstanding messages to be processed on a ring. */
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+ ((_r)->sring->rsp_prod - (_r)->rsp_cons)
+
+#ifdef __GNUC__
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
+ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
+ unsigned int rsp = RING_SIZE(_r) - \
+ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
+ req < rsp ? req : rsp; \
+})
+#else
+/* Same as above, but without the nice GCC ({ ... }) syntax. */
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
+ ((((_r)->sring->req_prod - (_r)->req_cons) < \
+ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
+ ((_r)->sring->req_prod - (_r)->req_cons) : \
+ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
+#endif
+
+/* Direct access to individual ring elements, by index. */
+#define RING_GET_REQUEST(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+
+/*
+ * Get a local copy of a request.
+ *
+ * Use this in preference to RING_GET_REQUEST() so all processing is
+ * done on a local copy that cannot be modified by the other end.
+ *
+ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
+ * to be ineffective where _req is a struct which consists of only bitfields.
+ */
+#define RING_COPY_REQUEST(_r, _idx, _req) do { \
+ /* Use volatile to force the copy into _req. */ \
+ *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
+} while (0)
+
+#define RING_GET_RESPONSE(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
+/* Loop termination condition: Would the specified index overflow the ring? */
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
+
+/* Ill-behaved frontend determination: Can there be this many requests? */
+#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
+ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
+
+#define RING_PUSH_REQUESTS(_r) do { \
+ xen_wmb(); /* back sees requests /before/ updated producer index */ \
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
+} while (0)
+
+#define RING_PUSH_RESPONSES(_r) do { \
+ xen_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
+} while (0)
+
+/*
+ * Notification hold-off (req_event and rsp_event):
+ *
+ * When queueing requests or responses on a shared ring, it may not always be
+ * necessary to notify the remote end. For example, if requests are in flight
+ * in a backend, the front may be able to queue further requests without
+ * notifying the back (if the back checks for new requests when it queues
+ * responses).
+ *
+ * When enqueuing requests or responses:
+ *
+ * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
+ * is a boolean return value. True indicates that the receiver requires an
+ * asynchronous notification.
+ *
+ * After dequeuing requests or responses (before sleeping the connection):
+ *
+ * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
+ * The second argument is a boolean return value. True indicates that there
+ * are pending messages on the ring (i.e., the connection should not be put
+ * to sleep).
+ *
+ * These macros will set the req_event/rsp_event field to trigger a
+ * notification on the very next message that is enqueued. If you want to
+ * create batches of work (i.e., only receive a notification after several
+ * messages have been enqueued) then you will need to create a customised
+ * version of the FINAL_CHECK macro in your own code, which sets the event
+ * field appropriately.
+ */
+
+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->req_prod; \
+ RING_IDX __new = (_r)->req_prod_pvt; \
+ xen_wmb(); /* back sees requests /before/ updated producer index */ \
+ (_r)->sring->req_prod = __new; \
+ xen_mb(); /* back sees new requests /before/ we check req_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
+ (RING_IDX)(__new - __old)); \
+} while (0)
+
+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->rsp_prod; \
+ RING_IDX __new = (_r)->rsp_prod_pvt; \
+ xen_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = __new; \
+ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
+ (RING_IDX)(__new - __old)); \
+} while (0)
+
+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->req_event = (_r)->req_cons + 1; \
+ xen_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+} while (0)
+
+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
+ xen_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+} while (0)
+
+
+/*
+ * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
+ * functions to check if there is data on the ring, and to read and
+ * write to them.
+ *
+ * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
+ * does not define the indexes page. As different protocols can have
+ * extensions to the basic format, this macro allow them to define their
+ * own struct.
+ *
+ * XEN_FLEX_RING_SIZE
+ * Convenience macro to calculate the size of one of the two rings
+ * from the overall order.
+ *
+ * $NAME_mask
+ * Function to apply the size mask to an index, to reduce the index
+ * within the range [0-size].
+ *
+ * $NAME_read_packet
+ * Function to read data from the ring. The amount of data to read is
+ * specified by the "size" argument.
+ *
+ * $NAME_write_packet
+ * Function to write data to the ring. The amount of data to write is
+ * specified by the "size" argument.
+ *
+ * $NAME_get_ring_ptr
+ * Convenience function that returns a pointer to read/write to the
+ * ring at the right location.
+ *
+ * $NAME_data_intf
+ * Indexes page, shared between frontend and backend. It also
+ * contains the array of grant refs.
+ *
+ * $NAME_queued
+ * Function to calculate how many bytes are currently on the ring,
+ * ready to be read. It can also be used to calculate how much free
+ * space is currently on the ring (ring_size - $NAME_queued()).
+ */
+#define XEN_FLEX_RING_SIZE(order) \
+ (1UL << (order + PAGE_SHIFT - 1))
+
+#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
+struct name##_data_intf { \
+ RING_IDX in_cons, in_prod; \
+ \
+ uint8_t pad1[56]; \
+ \
+ RING_IDX out_cons, out_prod; \
+ \
+ uint8_t pad2[56]; \
+ \
+ RING_IDX ring_order; \
+ grant_ref_t ref[]; \
+}; \
+DEFINE_XEN_FLEX_RING(name);
+
+#define DEFINE_XEN_FLEX_RING(name) \
+static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
+{ \
+ return (idx & (ring_size - 1)); \
+} \
+ \
+static inline RING_IDX name##_mask_order(RING_IDX idx, RING_IDX ring_order) \
+{ \
+ return (idx & (XEN_FLEX_RING_SIZE(ring_order) - 1)); \
+} \
+ \
+static inline unsigned char* name##_get_ring_ptr(unsigned char *buf, \
+ RING_IDX idx, \
+ RING_IDX ring_order) \
+{ \
+ return buf + name##_mask_order(idx, ring_order); \
+} \
+ \
+static inline void name##_read_packet(const unsigned char *buf, \
+ RING_IDX masked_prod, RING_IDX *masked_cons, \
+ RING_IDX ring_size, void *opaque, size_t size) { \
+ if (*masked_cons < masked_prod || \
+ size <= ring_size - *masked_cons) { \
+ memcpy(opaque, buf + *masked_cons, size); \
+ } else { \
+ memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
+ memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
+ size - (ring_size - *masked_cons)); \
+ } \
+ *masked_cons = name##_mask(*masked_cons + size, ring_size); \
+} \
+ \
+static inline void name##_write_packet(unsigned char *buf, \
+ RING_IDX *masked_prod, RING_IDX masked_cons, \
+ RING_IDX ring_size, const void *opaque, size_t size) { \
+ if (*masked_prod < masked_cons || \
+ size <= ring_size - *masked_prod) { \
+ memcpy(buf + *masked_prod, opaque, size); \
+ } else { \
+ memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
+ memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
+ size - (ring_size - *masked_prod)); \
+ } \
+ *masked_prod = name##_mask(*masked_prod + size, ring_size); \
+} \
+ \
+struct name##_data { \
+ unsigned char *in; /* half of the allocation */ \
+ unsigned char *out; /* half of the allocation */ \
+}; \
+ \
+ \
+static inline RING_IDX name##_queued(RING_IDX prod, \
+ RING_IDX cons, RING_IDX ring_size) \
+{ \
+ RING_IDX size; \
+ \
+ if (prod == cons) \
+ return 0; \
+ \
+ prod = name##_mask(prod, ring_size); \
+ cons = name##_mask(cons, ring_size); \
+ \
+ if (prod == cons) \
+ return ring_size; \
+ \
+ if (prod > cons) \
+ size = prod - cons; \
+ else \
+ size = ring_size - (cons - prod); \
+ return size; \
+};
+
+#endif /* __XEN_PUBLIC_IO_RING_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
1.9.1
On Mon, 13 Mar 2017 16:55:53 -0700
Stefano Stabellini <sstabellini@kernel.org> wrote:
> Do not use the ring.h header installed on the system. Instead, import
> the header into the QEMU codebase. This avoids problems when QEMU is
> built against a Xen version too old to provide all the ring macros.
>
What kind of problems ?
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: anthony.perard@citrix.com
> CC: jgross@suse.com
> ---
> NB: The new macros have not been committed to Xen yet. Do not apply this
> patch until they do.
Why ? Does this break compat with older Xen ?
> ---
> ---
> hw/block/xen_blkif.h | 2 +-
> hw/usb/xen-usb.c | 2 +-
> include/hw/xen/io/ring.h | 455 +++++++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 457 insertions(+), 2 deletions(-)
> create mode 100644 include/hw/xen/io/ring.h
>
> diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h
> index 3300b6f..3e6e1ea 100644
> --- a/hw/block/xen_blkif.h
> +++ b/hw/block/xen_blkif.h
> @@ -1,7 +1,7 @@
> #ifndef XEN_BLKIF_H
> #define XEN_BLKIF_H
>
> -#include <xen/io/ring.h>
> +#include "hw/xen/io/ring.h"
> #include <xen/io/blkif.h>
> #include <xen/io/protocols.h>
>
> diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
> index 8e676e6..370b3d9 100644
> --- a/hw/usb/xen-usb.c
> +++ b/hw/usb/xen-usb.c
> @@ -33,7 +33,7 @@
> #include "qapi/qmp/qint.h"
> #include "qapi/qmp/qstring.h"
>
> -#include <xen/io/ring.h>
> +#include "hw/xen/io/ring.h"
> #include <xen/io/usbif.h>
>
> /*
> diff --git a/include/hw/xen/io/ring.h b/include/hw/xen/io/ring.h
> new file mode 100644
> index 0000000..cf01fc3
> --- /dev/null
> +++ b/include/hw/xen/io/ring.h
> @@ -0,0 +1,455 @@
> +/******************************************************************************
> + * ring.h
> + *
> + * Shared producer-consumer ring macros.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a copy
> + * of this software and associated documentation files (the "Software"), to
> + * deal in the Software without restriction, including without limitation the
> + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
> + * sell copies of the Software, and to permit persons to whom the Software is
> + * furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
> + * DEALINGS IN THE SOFTWARE.
> + *
> + * Tim Deegan and Andrew Warfield November 2004.
> + */
> +
> +#ifndef __XEN_PUBLIC_IO_RING_H__
> +#define __XEN_PUBLIC_IO_RING_H__
> +
> +#if __XEN_INTERFACE_VERSION__ < 0x00030208
> +#define xen_mb() mb()
> +#define xen_rmb() rmb()
> +#define xen_wmb() wmb()
> +#endif
> +
> +typedef unsigned int RING_IDX;
> +
> +/* Round a 32-bit unsigned constant down to the nearest power of two. */
> +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
> +#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
> +#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
> +#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
> +#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
> +
> +/*
> + * Calculate size of a shared ring, given the total available space for the
> + * ring and indexes (_sz), and the name tag of the request/response structure.
> + * A ring contains as many entries as will fit, rounded down to the nearest
> + * power of two (so we can mask with (size-1) to loop around).
> + */
> +#define __CONST_RING_SIZE(_s, _sz) \
> + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
> + sizeof(((struct _s##_sring *)0)->ring[0])))
> +/*
> + * The same for passing in an actual pointer instead of a name tag.
> + */
> +#define __RING_SIZE(_s, _sz) \
> + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
> +
> +/*
> + * Macros to make the correct C datatypes for a new kind of ring.
> + *
> + * To make a new ring datatype, you need to have two message structures,
> + * let's say request_t, and response_t already defined.
> + *
> + * In a header where you want the ring datatype declared, you then do:
> + *
> + * DEFINE_RING_TYPES(mytag, request_t, response_t);
> + *
> + * These expand out to give you a set of types, as you can see below.
> + * The most important of these are:
> + *
> + * mytag_sring_t - The shared ring.
> + * mytag_front_ring_t - The 'front' half of the ring.
> + * mytag_back_ring_t - The 'back' half of the ring.
> + *
> + * To initialize a ring in your code you need to know the location and size
> + * of the shared memory area (PAGE_SIZE, for instance). To initialise
> + * the front half:
> + *
> + * mytag_front_ring_t front_ring;
> + * SHARED_RING_INIT((mytag_sring_t *)shared_page);
> + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
> + *
> + * Initializing the back follows similarly (note that only the front
> + * initializes the shared ring):
> + *
> + * mytag_back_ring_t back_ring;
> + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
> + */
> +
> +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
> + \
> +/* Shared ring entry */ \
> +union __name##_sring_entry { \
> + __req_t req; \
> + __rsp_t rsp; \
> +}; \
> + \
> +/* Shared ring page */ \
> +struct __name##_sring { \
> + RING_IDX req_prod, req_event; \
> + RING_IDX rsp_prod, rsp_event; \
> + union { \
> + struct { \
> + uint8_t smartpoll_active; \
> + } netif; \
> + struct { \
> + uint8_t msg; \
> + } tapif_user; \
> + uint8_t pvt_pad[4]; \
> + } pvt; \
> + uint8_t __pad[44]; \
> + union __name##_sring_entry ring[1]; /* variable-length */ \
> +}; \
> + \
> +/* "Front" end's private variables */ \
> +struct __name##_front_ring { \
> + RING_IDX req_prod_pvt; \
> + RING_IDX rsp_cons; \
> + unsigned int nr_ents; \
> + struct __name##_sring *sring; \
> +}; \
> + \
> +/* "Back" end's private variables */ \
> +struct __name##_back_ring { \
> + RING_IDX rsp_prod_pvt; \
> + RING_IDX req_cons; \
> + unsigned int nr_ents; \
> + struct __name##_sring *sring; \
> +}; \
> + \
> +/* Syntactic sugar */ \
> +typedef struct __name##_sring __name##_sring_t; \
> +typedef struct __name##_front_ring __name##_front_ring_t; \
> +typedef struct __name##_back_ring __name##_back_ring_t
> +
> +/*
> + * Macros for manipulating rings.
> + *
> + * FRONT_RING_whatever works on the "front end" of a ring: here
> + * requests are pushed on to the ring and responses taken off it.
> + *
> + * BACK_RING_whatever works on the "back end" of a ring: here
> + * requests are taken off the ring and responses put on.
> + *
> + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
> + * This is OK in 1-for-1 request-response situations where the
> + * requestor (front end) never has more than RING_SIZE()-1
> + * outstanding requests.
> + */
> +
> +/* Initialising empty rings */
> +#define SHARED_RING_INIT(_s) do { \
> + (_s)->req_prod = (_s)->rsp_prod = 0; \
> + (_s)->req_event = (_s)->rsp_event = 1; \
> + (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \
> + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
> +} while(0)
> +
> +#define FRONT_RING_INIT(_r, _s, __size) do { \
> + (_r)->req_prod_pvt = 0; \
> + (_r)->rsp_cons = 0; \
> + (_r)->nr_ents = __RING_SIZE(_s, __size); \
> + (_r)->sring = (_s); \
> +} while (0)
> +
> +#define BACK_RING_INIT(_r, _s, __size) do { \
> + (_r)->rsp_prod_pvt = 0; \
> + (_r)->req_cons = 0; \
> + (_r)->nr_ents = __RING_SIZE(_s, __size); \
> + (_r)->sring = (_s); \
> +} while (0)
> +
> +/* How big is this ring? */
> +#define RING_SIZE(_r) \
> + ((_r)->nr_ents)
> +
> +/* Number of free requests (for use on front side only). */
> +#define RING_FREE_REQUESTS(_r) \
> + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
> +
> +/* Test if there is an empty slot available on the front ring.
> + * (This is only meaningful from the front. )
> + */
> +#define RING_FULL(_r) \
> + (RING_FREE_REQUESTS(_r) == 0)
> +
> +/* Test if there are outstanding messages to be processed on a ring. */
> +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
> + ((_r)->sring->rsp_prod - (_r)->rsp_cons)
> +
> +#ifdef __GNUC__
> +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
> + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
> + unsigned int rsp = RING_SIZE(_r) - \
> + ((_r)->req_cons - (_r)->rsp_prod_pvt); \
> + req < rsp ? req : rsp; \
> +})
> +#else
> +/* Same as above, but without the nice GCC ({ ... }) syntax. */
> +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
> + ((((_r)->sring->req_prod - (_r)->req_cons) < \
> + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
> + ((_r)->sring->req_prod - (_r)->req_cons) : \
> + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
> +#endif
> +
> +/* Direct access to individual ring elements, by index. */
> +#define RING_GET_REQUEST(_r, _idx) \
> + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
> +
> +/*
> + * Get a local copy of a request.
> + *
> + * Use this in preference to RING_GET_REQUEST() so all processing is
> + * done on a local copy that cannot be modified by the other end.
> + *
> + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
> + * to be ineffective where _req is a struct which consists of only bitfields.
> + */
> +#define RING_COPY_REQUEST(_r, _idx, _req) do { \
> + /* Use volatile to force the copy into _req. */ \
> + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
> +} while (0)
> +
> +#define RING_GET_RESPONSE(_r, _idx) \
> + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
> +
> +/* Loop termination condition: Would the specified index overflow the ring? */
> +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
> + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
> +
> +/* Ill-behaved frontend determination: Can there be this many requests? */
> +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
> + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
> +
> +#define RING_PUSH_REQUESTS(_r) do { \
> + xen_wmb(); /* back sees requests /before/ updated producer index */ \
> + (_r)->sring->req_prod = (_r)->req_prod_pvt; \
> +} while (0)
> +
> +#define RING_PUSH_RESPONSES(_r) do { \
> + xen_wmb(); /* front sees resps /before/ updated producer index */ \
> + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
> +} while (0)
> +
> +/*
> + * Notification hold-off (req_event and rsp_event):
> + *
> + * When queueing requests or responses on a shared ring, it may not always be
> + * necessary to notify the remote end. For example, if requests are in flight
> + * in a backend, the front may be able to queue further requests without
> + * notifying the back (if the back checks for new requests when it queues
> + * responses).
> + *
> + * When enqueuing requests or responses:
> + *
> + * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
> + * is a boolean return value. True indicates that the receiver requires an
> + * asynchronous notification.
> + *
> + * After dequeuing requests or responses (before sleeping the connection):
> + *
> + * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
> + * The second argument is a boolean return value. True indicates that there
> + * are pending messages on the ring (i.e., the connection should not be put
> + * to sleep).
> + *
> + * These macros will set the req_event/rsp_event field to trigger a
> + * notification on the very next message that is enqueued. If you want to
> + * create batches of work (i.e., only receive a notification after several
> + * messages have been enqueued) then you will need to create a customised
> + * version of the FINAL_CHECK macro in your own code, which sets the event
> + * field appropriately.
> + */
> +
> +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
> + RING_IDX __old = (_r)->sring->req_prod; \
> + RING_IDX __new = (_r)->req_prod_pvt; \
> + xen_wmb(); /* back sees requests /before/ updated producer index */ \
> + (_r)->sring->req_prod = __new; \
> + xen_mb(); /* back sees new requests /before/ we check req_event */ \
> + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
> + (RING_IDX)(__new - __old)); \
> +} while (0)
> +
> +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
> + RING_IDX __old = (_r)->sring->rsp_prod; \
> + RING_IDX __new = (_r)->rsp_prod_pvt; \
> + xen_wmb(); /* front sees resps /before/ updated producer index */ \
> + (_r)->sring->rsp_prod = __new; \
> + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
> + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
> + (RING_IDX)(__new - __old)); \
> +} while (0)
> +
> +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
> + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
> + if (_work_to_do) break; \
> + (_r)->sring->req_event = (_r)->req_cons + 1; \
> + xen_mb(); \
> + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
> +} while (0)
> +
> +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
> + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
> + if (_work_to_do) break; \
> + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
> + xen_mb(); \
> + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
> +} while (0)
> +
> +
> +/*
> + * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
> + * functions to check if there is data on the ring, and to read and
> + * write to them.
> + *
> + * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
> + * does not define the indexes page. As different protocols can have
> + * extensions to the basic format, this macro allow them to define their
> + * own struct.
> + *
> + * XEN_FLEX_RING_SIZE
> + * Convenience macro to calculate the size of one of the two rings
> + * from the overall order.
> + *
> + * $NAME_mask
> + * Function to apply the size mask to an index, to reduce the index
> + * within the range [0-size].
> + *
> + * $NAME_read_packet
> + * Function to read data from the ring. The amount of data to read is
> + * specified by the "size" argument.
> + *
> + * $NAME_write_packet
> + * Function to write data to the ring. The amount of data to write is
> + * specified by the "size" argument.
> + *
> + * $NAME_get_ring_ptr
> + * Convenience function that returns a pointer to read/write to the
> + * ring at the right location.
> + *
> + * $NAME_data_intf
> + * Indexes page, shared between frontend and backend. It also
> + * contains the array of grant refs.
> + *
> + * $NAME_queued
> + * Function to calculate how many bytes are currently on the ring,
> + * ready to be read. It can also be used to calculate how much free
> + * space is currently on the ring (ring_size - $NAME_queued()).
> + */
> +#define XEN_FLEX_RING_SIZE(order) \
> + (1UL << (order + PAGE_SHIFT - 1))
> +
> +#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
> +struct name##_data_intf { \
> + RING_IDX in_cons, in_prod; \
> + \
> + uint8_t pad1[56]; \
> + \
> + RING_IDX out_cons, out_prod; \
> + \
> + uint8_t pad2[56]; \
> + \
> + RING_IDX ring_order; \
> + grant_ref_t ref[]; \
> +}; \
> +DEFINE_XEN_FLEX_RING(name);
> +
> +#define DEFINE_XEN_FLEX_RING(name) \
> +static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
> +{ \
> + return (idx & (ring_size - 1)); \
> +} \
> + \
> +static inline RING_IDX name##_mask_order(RING_IDX idx, RING_IDX ring_order) \
> +{ \
> + return (idx & (XEN_FLEX_RING_SIZE(ring_order) - 1)); \
> +} \
> + \
> +static inline unsigned char* name##_get_ring_ptr(unsigned char *buf, \
> + RING_IDX idx, \
> + RING_IDX ring_order) \
> +{ \
> + return buf + name##_mask_order(idx, ring_order); \
> +} \
> + \
> +static inline void name##_read_packet(const unsigned char *buf, \
> + RING_IDX masked_prod, RING_IDX *masked_cons, \
> + RING_IDX ring_size, void *opaque, size_t size) { \
> + if (*masked_cons < masked_prod || \
> + size <= ring_size - *masked_cons) { \
> + memcpy(opaque, buf + *masked_cons, size); \
> + } else { \
> + memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
> + memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
> + size - (ring_size - *masked_cons)); \
> + } \
> + *masked_cons = name##_mask(*masked_cons + size, ring_size); \
> +} \
> + \
> +static inline void name##_write_packet(unsigned char *buf, \
> + RING_IDX *masked_prod, RING_IDX masked_cons, \
> + RING_IDX ring_size, const void *opaque, size_t size) { \
> + if (*masked_prod < masked_cons || \
> + size <= ring_size - *masked_prod) { \
> + memcpy(buf + *masked_prod, opaque, size); \
> + } else { \
> + memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
> + memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
> + size - (ring_size - *masked_prod)); \
> + } \
> + *masked_prod = name##_mask(*masked_prod + size, ring_size); \
> +} \
> + \
> +struct name##_data { \
> + unsigned char *in; /* half of the allocation */ \
> + unsigned char *out; /* half of the allocation */ \
> +}; \
> + \
> + \
> +static inline RING_IDX name##_queued(RING_IDX prod, \
> + RING_IDX cons, RING_IDX ring_size) \
> +{ \
> + RING_IDX size; \
> + \
> + if (prod == cons) \
> + return 0; \
> + \
> + prod = name##_mask(prod, ring_size); \
> + cons = name##_mask(cons, ring_size); \
> + \
> + if (prod == cons) \
> + return ring_size; \
> + \
> + if (prod > cons) \
> + size = prod - cons; \
> + else \
> + size = ring_size - (cons - prod); \
> + return size; \
> +};
> +
> +#endif /* __XEN_PUBLIC_IO_RING_H__ */
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * tab-width: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
On Wed, 15 Mar 2017, Greg Kurz wrote:
> On Mon, 13 Mar 2017 16:55:53 -0700
> Stefano Stabellini <sstabellini@kernel.org> wrote:
>
> > Do not use the ring.h header installed on the system. Instead, import
> > the header into the QEMU codebase. This avoids problems when QEMU is
> > built against a Xen version too old to provide all the ring macros.
> >
>
> What kind of problems ?
The FLEX macros are only available in Xen 4.9+ (still unreleased).
However, aside from these macros, the Xen 9pfs frontends and backends
could work fine on any Xen releases. In fact, the Xen public/io headers
are only provided as reference.
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: anthony.perard@citrix.com
> > CC: jgross@suse.com
> > ---
> > NB: The new macros have not been committed to Xen yet. Do not apply this
> > patch until they do.
>
> Why ? Does this break compat with older Xen ?
No, it does not break compatibility. But I think it is a good idea to
commit the header to QEMU only after the corresponding Xen header has
been accepted. I don't want the two to diverge.
> > ---
> > ---
> > hw/block/xen_blkif.h | 2 +-
> > hw/usb/xen-usb.c | 2 +-
> > include/hw/xen/io/ring.h | 455 +++++++++++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 457 insertions(+), 2 deletions(-)
> > create mode 100644 include/hw/xen/io/ring.h
> >
> > diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h
> > index 3300b6f..3e6e1ea 100644
> > --- a/hw/block/xen_blkif.h
> > +++ b/hw/block/xen_blkif.h
> > @@ -1,7 +1,7 @@
> > #ifndef XEN_BLKIF_H
> > #define XEN_BLKIF_H
> >
> > -#include <xen/io/ring.h>
> > +#include "hw/xen/io/ring.h"
> > #include <xen/io/blkif.h>
> > #include <xen/io/protocols.h>
> >
> > diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
> > index 8e676e6..370b3d9 100644
> > --- a/hw/usb/xen-usb.c
> > +++ b/hw/usb/xen-usb.c
> > @@ -33,7 +33,7 @@
> > #include "qapi/qmp/qint.h"
> > #include "qapi/qmp/qstring.h"
> >
> > -#include <xen/io/ring.h>
> > +#include "hw/xen/io/ring.h"
> > #include <xen/io/usbif.h>
> >
> > /*
> > diff --git a/include/hw/xen/io/ring.h b/include/hw/xen/io/ring.h
> > new file mode 100644
> > index 0000000..cf01fc3
> > --- /dev/null
> > +++ b/include/hw/xen/io/ring.h
> > @@ -0,0 +1,455 @@
> > +/******************************************************************************
> > + * ring.h
> > + *
> > + * Shared producer-consumer ring macros.
> > + *
> > + * Permission is hereby granted, free of charge, to any person obtaining a copy
> > + * of this software and associated documentation files (the "Software"), to
> > + * deal in the Software without restriction, including without limitation the
> > + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
> > + * sell copies of the Software, and to permit persons to whom the Software is
> > + * furnished to do so, subject to the following conditions:
> > + *
> > + * The above copyright notice and this permission notice shall be included in
> > + * all copies or substantial portions of the Software.
> > + *
> > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> > + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
> > + * DEALINGS IN THE SOFTWARE.
> > + *
> > + * Tim Deegan and Andrew Warfield November 2004.
> > + */
> > +
> > +#ifndef __XEN_PUBLIC_IO_RING_H__
> > +#define __XEN_PUBLIC_IO_RING_H__
> > +
> > +#if __XEN_INTERFACE_VERSION__ < 0x00030208
> > +#define xen_mb() mb()
> > +#define xen_rmb() rmb()
> > +#define xen_wmb() wmb()
> > +#endif
> > +
> > +typedef unsigned int RING_IDX;
> > +
> > +/* Round a 32-bit unsigned constant down to the nearest power of two. */
> > +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
> > +#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
> > +#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
> > +#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
> > +#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
> > +
> > +/*
> > + * Calculate size of a shared ring, given the total available space for the
> > + * ring and indexes (_sz), and the name tag of the request/response structure.
> > + * A ring contains as many entries as will fit, rounded down to the nearest
> > + * power of two (so we can mask with (size-1) to loop around).
> > + */
> > +#define __CONST_RING_SIZE(_s, _sz) \
> > + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
> > + sizeof(((struct _s##_sring *)0)->ring[0])))
> > +/*
> > + * The same for passing in an actual pointer instead of a name tag.
> > + */
> > +#define __RING_SIZE(_s, _sz) \
> > + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
> > +
> > +/*
> > + * Macros to make the correct C datatypes for a new kind of ring.
> > + *
> > + * To make a new ring datatype, you need to have two message structures,
> > + * let's say request_t, and response_t already defined.
> > + *
> > + * In a header where you want the ring datatype declared, you then do:
> > + *
> > + * DEFINE_RING_TYPES(mytag, request_t, response_t);
> > + *
> > + * These expand out to give you a set of types, as you can see below.
> > + * The most important of these are:
> > + *
> > + * mytag_sring_t - The shared ring.
> > + * mytag_front_ring_t - The 'front' half of the ring.
> > + * mytag_back_ring_t - The 'back' half of the ring.
> > + *
> > + * To initialize a ring in your code you need to know the location and size
> > + * of the shared memory area (PAGE_SIZE, for instance). To initialise
> > + * the front half:
> > + *
> > + * mytag_front_ring_t front_ring;
> > + * SHARED_RING_INIT((mytag_sring_t *)shared_page);
> > + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
> > + *
> > + * Initializing the back follows similarly (note that only the front
> > + * initializes the shared ring):
> > + *
> > + * mytag_back_ring_t back_ring;
> > + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
> > + */
> > +
> > +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
> > + \
> > +/* Shared ring entry */ \
> > +union __name##_sring_entry { \
> > + __req_t req; \
> > + __rsp_t rsp; \
> > +}; \
> > + \
> > +/* Shared ring page */ \
> > +struct __name##_sring { \
> > + RING_IDX req_prod, req_event; \
> > + RING_IDX rsp_prod, rsp_event; \
> > + union { \
> > + struct { \
> > + uint8_t smartpoll_active; \
> > + } netif; \
> > + struct { \
> > + uint8_t msg; \
> > + } tapif_user; \
> > + uint8_t pvt_pad[4]; \
> > + } pvt; \
> > + uint8_t __pad[44]; \
> > + union __name##_sring_entry ring[1]; /* variable-length */ \
> > +}; \
> > + \
> > +/* "Front" end's private variables */ \
> > +struct __name##_front_ring { \
> > + RING_IDX req_prod_pvt; \
> > + RING_IDX rsp_cons; \
> > + unsigned int nr_ents; \
> > + struct __name##_sring *sring; \
> > +}; \
> > + \
> > +/* "Back" end's private variables */ \
> > +struct __name##_back_ring { \
> > + RING_IDX rsp_prod_pvt; \
> > + RING_IDX req_cons; \
> > + unsigned int nr_ents; \
> > + struct __name##_sring *sring; \
> > +}; \
> > + \
> > +/* Syntactic sugar */ \
> > +typedef struct __name##_sring __name##_sring_t; \
> > +typedef struct __name##_front_ring __name##_front_ring_t; \
> > +typedef struct __name##_back_ring __name##_back_ring_t
> > +
> > +/*
> > + * Macros for manipulating rings.
> > + *
> > + * FRONT_RING_whatever works on the "front end" of a ring: here
> > + * requests are pushed on to the ring and responses taken off it.
> > + *
> > + * BACK_RING_whatever works on the "back end" of a ring: here
> > + * requests are taken off the ring and responses put on.
> > + *
> > + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
> > + * This is OK in 1-for-1 request-response situations where the
> > + * requestor (front end) never has more than RING_SIZE()-1
> > + * outstanding requests.
> > + */
> > +
> > +/* Initialising empty rings */
> > +#define SHARED_RING_INIT(_s) do { \
> > + (_s)->req_prod = (_s)->rsp_prod = 0; \
> > + (_s)->req_event = (_s)->rsp_event = 1; \
> > + (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \
> > + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
> > +} while(0)
> > +
> > +#define FRONT_RING_INIT(_r, _s, __size) do { \
> > + (_r)->req_prod_pvt = 0; \
> > + (_r)->rsp_cons = 0; \
> > + (_r)->nr_ents = __RING_SIZE(_s, __size); \
> > + (_r)->sring = (_s); \
> > +} while (0)
> > +
> > +#define BACK_RING_INIT(_r, _s, __size) do { \
> > + (_r)->rsp_prod_pvt = 0; \
> > + (_r)->req_cons = 0; \
> > + (_r)->nr_ents = __RING_SIZE(_s, __size); \
> > + (_r)->sring = (_s); \
> > +} while (0)
> > +
> > +/* How big is this ring? */
> > +#define RING_SIZE(_r) \
> > + ((_r)->nr_ents)
> > +
> > +/* Number of free requests (for use on front side only). */
> > +#define RING_FREE_REQUESTS(_r) \
> > + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
> > +
> > +/* Test if there is an empty slot available on the front ring.
> > + * (This is only meaningful from the front. )
> > + */
> > +#define RING_FULL(_r) \
> > + (RING_FREE_REQUESTS(_r) == 0)
> > +
> > +/* Test if there are outstanding messages to be processed on a ring. */
> > +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
> > + ((_r)->sring->rsp_prod - (_r)->rsp_cons)
> > +
> > +#ifdef __GNUC__
> > +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
> > + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
> > + unsigned int rsp = RING_SIZE(_r) - \
> > + ((_r)->req_cons - (_r)->rsp_prod_pvt); \
> > + req < rsp ? req : rsp; \
> > +})
> > +#else
> > +/* Same as above, but without the nice GCC ({ ... }) syntax. */
> > +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
> > + ((((_r)->sring->req_prod - (_r)->req_cons) < \
> > + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
> > + ((_r)->sring->req_prod - (_r)->req_cons) : \
> > + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
> > +#endif
> > +
> > +/* Direct access to individual ring elements, by index. */
> > +#define RING_GET_REQUEST(_r, _idx) \
> > + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
> > +
> > +/*
> > + * Get a local copy of a request.
> > + *
> > + * Use this in preference to RING_GET_REQUEST() so all processing is
> > + * done on a local copy that cannot be modified by the other end.
> > + *
> > + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
> > + * to be ineffective where _req is a struct which consists of only bitfields.
> > + */
> > +#define RING_COPY_REQUEST(_r, _idx, _req) do { \
> > + /* Use volatile to force the copy into _req. */ \
> > + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
> > +} while (0)
> > +
> > +#define RING_GET_RESPONSE(_r, _idx) \
> > + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
> > +
> > +/* Loop termination condition: Would the specified index overflow the ring? */
> > +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
> > + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
> > +
> > +/* Ill-behaved frontend determination: Can there be this many requests? */
> > +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
> > + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
> > +
> > +#define RING_PUSH_REQUESTS(_r) do { \
> > + xen_wmb(); /* back sees requests /before/ updated producer index */ \
> > + (_r)->sring->req_prod = (_r)->req_prod_pvt; \
> > +} while (0)
> > +
> > +#define RING_PUSH_RESPONSES(_r) do { \
> > + xen_wmb(); /* front sees resps /before/ updated producer index */ \
> > + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
> > +} while (0)
> > +
> > +/*
> > + * Notification hold-off (req_event and rsp_event):
> > + *
> > + * When queueing requests or responses on a shared ring, it may not always be
> > + * necessary to notify the remote end. For example, if requests are in flight
> > + * in a backend, the front may be able to queue further requests without
> > + * notifying the back (if the back checks for new requests when it queues
> > + * responses).
> > + *
> > + * When enqueuing requests or responses:
> > + *
> > + * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
> > + * is a boolean return value. True indicates that the receiver requires an
> > + * asynchronous notification.
> > + *
> > + * After dequeuing requests or responses (before sleeping the connection):
> > + *
> > + * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
> > + * The second argument is a boolean return value. True indicates that there
> > + * are pending messages on the ring (i.e., the connection should not be put
> > + * to sleep).
> > + *
> > + * These macros will set the req_event/rsp_event field to trigger a
> > + * notification on the very next message that is enqueued. If you want to
> > + * create batches of work (i.e., only receive a notification after several
> > + * messages have been enqueued) then you will need to create a customised
> > + * version of the FINAL_CHECK macro in your own code, which sets the event
> > + * field appropriately.
> > + */
> > +
> > +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
> > + RING_IDX __old = (_r)->sring->req_prod; \
> > + RING_IDX __new = (_r)->req_prod_pvt; \
> > + xen_wmb(); /* back sees requests /before/ updated producer index */ \
> > + (_r)->sring->req_prod = __new; \
> > + xen_mb(); /* back sees new requests /before/ we check req_event */ \
> > + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
> > + (RING_IDX)(__new - __old)); \
> > +} while (0)
> > +
> > +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
> > + RING_IDX __old = (_r)->sring->rsp_prod; \
> > + RING_IDX __new = (_r)->rsp_prod_pvt; \
> > + xen_wmb(); /* front sees resps /before/ updated producer index */ \
> > + (_r)->sring->rsp_prod = __new; \
> > + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
> > + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
> > + (RING_IDX)(__new - __old)); \
> > +} while (0)
> > +
> > +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
> > + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
> > + if (_work_to_do) break; \
> > + (_r)->sring->req_event = (_r)->req_cons + 1; \
> > + xen_mb(); \
> > + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
> > +} while (0)
> > +
> > +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
> > + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
> > + if (_work_to_do) break; \
> > + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
> > + xen_mb(); \
> > + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
> > +} while (0)
> > +
> > +
> > +/*
> > + * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
> > + * functions to check if there is data on the ring, and to read and
> > + * write to them.
> > + *
> > + * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
> > + * does not define the indexes page. As different protocols can have
> > + * extensions to the basic format, this macro allow them to define their
> > + * own struct.
> > + *
> > + * XEN_FLEX_RING_SIZE
> > + * Convenience macro to calculate the size of one of the two rings
> > + * from the overall order.
> > + *
> > + * $NAME_mask
> > + * Function to apply the size mask to an index, to reduce the index
> > + * within the range [0-size].
> > + *
> > + * $NAME_read_packet
> > + * Function to read data from the ring. The amount of data to read is
> > + * specified by the "size" argument.
> > + *
> > + * $NAME_write_packet
> > + * Function to write data to the ring. The amount of data to write is
> > + * specified by the "size" argument.
> > + *
> > + * $NAME_get_ring_ptr
> > + * Convenience function that returns a pointer to read/write to the
> > + * ring at the right location.
> > + *
> > + * $NAME_data_intf
> > + * Indexes page, shared between frontend and backend. It also
> > + * contains the array of grant refs.
> > + *
> > + * $NAME_queued
> > + * Function to calculate how many bytes are currently on the ring,
> > + * ready to be read. It can also be used to calculate how much free
> > + * space is currently on the ring (ring_size - $NAME_queued()).
> > + */
> > +#define XEN_FLEX_RING_SIZE(order) \
> > + (1UL << (order + PAGE_SHIFT - 1))
> > +
> > +#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
> > +struct name##_data_intf { \
> > + RING_IDX in_cons, in_prod; \
> > + \
> > + uint8_t pad1[56]; \
> > + \
> > + RING_IDX out_cons, out_prod; \
> > + \
> > + uint8_t pad2[56]; \
> > + \
> > + RING_IDX ring_order; \
> > + grant_ref_t ref[]; \
> > +}; \
> > +DEFINE_XEN_FLEX_RING(name);
> > +
> > +#define DEFINE_XEN_FLEX_RING(name) \
> > +static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
> > +{ \
> > + return (idx & (ring_size - 1)); \
> > +} \
> > + \
> > +static inline RING_IDX name##_mask_order(RING_IDX idx, RING_IDX ring_order) \
> > +{ \
> > + return (idx & (XEN_FLEX_RING_SIZE(ring_order) - 1)); \
> > +} \
> > + \
> > +static inline unsigned char* name##_get_ring_ptr(unsigned char *buf, \
> > + RING_IDX idx, \
> > + RING_IDX ring_order) \
> > +{ \
> > + return buf + name##_mask_order(idx, ring_order); \
> > +} \
> > + \
> > +static inline void name##_read_packet(const unsigned char *buf, \
> > + RING_IDX masked_prod, RING_IDX *masked_cons, \
> > + RING_IDX ring_size, void *opaque, size_t size) { \
> > + if (*masked_cons < masked_prod || \
> > + size <= ring_size - *masked_cons) { \
> > + memcpy(opaque, buf + *masked_cons, size); \
> > + } else { \
> > + memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
> > + memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
> > + size - (ring_size - *masked_cons)); \
> > + } \
> > + *masked_cons = name##_mask(*masked_cons + size, ring_size); \
> > +} \
> > + \
> > +static inline void name##_write_packet(unsigned char *buf, \
> > + RING_IDX *masked_prod, RING_IDX masked_cons, \
> > + RING_IDX ring_size, const void *opaque, size_t size) { \
> > + if (*masked_prod < masked_cons || \
> > + size <= ring_size - *masked_prod) { \
> > + memcpy(buf + *masked_prod, opaque, size); \
> > + } else { \
> > + memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
> > + memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
> > + size - (ring_size - *masked_prod)); \
> > + } \
> > + *masked_prod = name##_mask(*masked_prod + size, ring_size); \
> > +} \
> > + \
> > +struct name##_data { \
> > + unsigned char *in; /* half of the allocation */ \
> > + unsigned char *out; /* half of the allocation */ \
> > +}; \
> > + \
> > + \
> > +static inline RING_IDX name##_queued(RING_IDX prod, \
> > + RING_IDX cons, RING_IDX ring_size) \
> > +{ \
> > + RING_IDX size; \
> > + \
> > + if (prod == cons) \
> > + return 0; \
> > + \
> > + prod = name##_mask(prod, ring_size); \
> > + cons = name##_mask(cons, ring_size); \
> > + \
> > + if (prod == cons) \
> > + return ring_size; \
> > + \
> > + if (prod > cons) \
> > + size = prod - cons; \
> > + else \
> > + size = ring_size - (cons - prod); \
> > + return size; \
> > +};
> > +
> > +#endif /* __XEN_PUBLIC_IO_RING_H__ */
> > +
> > +/*
> > + * Local variables:
> > + * mode: C
> > + * c-file-style: "BSD"
> > + * c-basic-offset: 4
> > + * tab-width: 4
> > + * indent-tabs-mode: nil
> > + * End:
> > + */
>
>
On Wed, 15 Mar 2017 11:36:12 -0700 (PDT)
Stefano Stabellini <sstabellini@kernel.org> wrote:
> On Wed, 15 Mar 2017, Greg Kurz wrote:
> > On Mon, 13 Mar 2017 16:55:53 -0700
> > Stefano Stabellini <sstabellini@kernel.org> wrote:
> >
> > > Do not use the ring.h header installed on the system. Instead, import
> > > the header into the QEMU codebase. This avoids problems when QEMU is
> > > built against a Xen version too old to provide all the ring macros.
> > >
> >
> > What kind of problems ?
>
> The FLEX macros are only available in Xen 4.9+ (still unreleased).
> However, aside from these macros, the Xen 9pfs frontends and backends
> could work fine on any Xen releases. In fact, the Xen public/io headers
> are only provided as reference.
>
Ok.
>
> > > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > > CC: anthony.perard@citrix.com
> > > CC: jgross@suse.com
> > > ---
> > > NB: The new macros have not been committed to Xen yet. Do not apply this
> > > patch until they do.
> >
> > Why ? Does this break compat with older Xen ?
>
> No, it does not break compatibility. But I think it is a good idea to
> commit the header to QEMU only after the corresponding Xen header has
> been accepted. I don't want the two to diverge.
>
Fair enough but this will put the entire patchset on hold then. When Xen 4.9
is supposed to be released ?
>
> > > ---
> > > ---
> > > hw/block/xen_blkif.h | 2 +-
> > > hw/usb/xen-usb.c | 2 +-
> > > include/hw/xen/io/ring.h | 455 +++++++++++++++++++++++++++++++++++++++++++++++
> > > 3 files changed, 457 insertions(+), 2 deletions(-)
> > > create mode 100644 include/hw/xen/io/ring.h
> > >
> > > diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h
> > > index 3300b6f..3e6e1ea 100644
> > > --- a/hw/block/xen_blkif.h
> > > +++ b/hw/block/xen_blkif.h
> > > @@ -1,7 +1,7 @@
> > > #ifndef XEN_BLKIF_H
> > > #define XEN_BLKIF_H
> > >
> > > -#include <xen/io/ring.h>
> > > +#include "hw/xen/io/ring.h"
> > > #include <xen/io/blkif.h>
> > > #include <xen/io/protocols.h>
> > >
> > > diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
> > > index 8e676e6..370b3d9 100644
> > > --- a/hw/usb/xen-usb.c
> > > +++ b/hw/usb/xen-usb.c
> > > @@ -33,7 +33,7 @@
> > > #include "qapi/qmp/qint.h"
> > > #include "qapi/qmp/qstring.h"
> > >
> > > -#include <xen/io/ring.h>
> > > +#include "hw/xen/io/ring.h"
> > > #include <xen/io/usbif.h>
> > >
> > > /*
> > > diff --git a/include/hw/xen/io/ring.h b/include/hw/xen/io/ring.h
> > > new file mode 100644
> > > index 0000000..cf01fc3
> > > --- /dev/null
> > > +++ b/include/hw/xen/io/ring.h
> > > @@ -0,0 +1,455 @@
> > > +/******************************************************************************
> > > + * ring.h
> > > + *
> > > + * Shared producer-consumer ring macros.
> > > + *
> > > + * Permission is hereby granted, free of charge, to any person obtaining a copy
> > > + * of this software and associated documentation files (the "Software"), to
> > > + * deal in the Software without restriction, including without limitation the
> > > + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
> > > + * sell copies of the Software, and to permit persons to whom the Software is
> > > + * furnished to do so, subject to the following conditions:
> > > + *
> > > + * The above copyright notice and this permission notice shall be included in
> > > + * all copies or substantial portions of the Software.
> > > + *
> > > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> > > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> > > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> > > + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> > > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> > > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
> > > + * DEALINGS IN THE SOFTWARE.
> > > + *
> > > + * Tim Deegan and Andrew Warfield November 2004.
> > > + */
> > > +
> > > +#ifndef __XEN_PUBLIC_IO_RING_H__
> > > +#define __XEN_PUBLIC_IO_RING_H__
> > > +
> > > +#if __XEN_INTERFACE_VERSION__ < 0x00030208
> > > +#define xen_mb() mb()
> > > +#define xen_rmb() rmb()
> > > +#define xen_wmb() wmb()
> > > +#endif
> > > +
> > > +typedef unsigned int RING_IDX;
> > > +
> > > +/* Round a 32-bit unsigned constant down to the nearest power of two. */
> > > +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
> > > +#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
> > > +#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
> > > +#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
> > > +#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
> > > +
> > > +/*
> > > + * Calculate size of a shared ring, given the total available space for the
> > > + * ring and indexes (_sz), and the name tag of the request/response structure.
> > > + * A ring contains as many entries as will fit, rounded down to the nearest
> > > + * power of two (so we can mask with (size-1) to loop around).
> > > + */
> > > +#define __CONST_RING_SIZE(_s, _sz) \
> > > + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
> > > + sizeof(((struct _s##_sring *)0)->ring[0])))
> > > +/*
> > > + * The same for passing in an actual pointer instead of a name tag.
> > > + */
> > > +#define __RING_SIZE(_s, _sz) \
> > > + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
> > > +
> > > +/*
> > > + * Macros to make the correct C datatypes for a new kind of ring.
> > > + *
> > > + * To make a new ring datatype, you need to have two message structures,
> > > + * let's say request_t, and response_t already defined.
> > > + *
> > > + * In a header where you want the ring datatype declared, you then do:
> > > + *
> > > + * DEFINE_RING_TYPES(mytag, request_t, response_t);
> > > + *
> > > + * These expand out to give you a set of types, as you can see below.
> > > + * The most important of these are:
> > > + *
> > > + * mytag_sring_t - The shared ring.
> > > + * mytag_front_ring_t - The 'front' half of the ring.
> > > + * mytag_back_ring_t - The 'back' half of the ring.
> > > + *
> > > + * To initialize a ring in your code you need to know the location and size
> > > + * of the shared memory area (PAGE_SIZE, for instance). To initialise
> > > + * the front half:
> > > + *
> > > + * mytag_front_ring_t front_ring;
> > > + * SHARED_RING_INIT((mytag_sring_t *)shared_page);
> > > + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
> > > + *
> > > + * Initializing the back follows similarly (note that only the front
> > > + * initializes the shared ring):
> > > + *
> > > + * mytag_back_ring_t back_ring;
> > > + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
> > > + */
> > > +
> > > +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
> > > + \
> > > +/* Shared ring entry */ \
> > > +union __name##_sring_entry { \
> > > + __req_t req; \
> > > + __rsp_t rsp; \
> > > +}; \
> > > + \
> > > +/* Shared ring page */ \
> > > +struct __name##_sring { \
> > > + RING_IDX req_prod, req_event; \
> > > + RING_IDX rsp_prod, rsp_event; \
> > > + union { \
> > > + struct { \
> > > + uint8_t smartpoll_active; \
> > > + } netif; \
> > > + struct { \
> > > + uint8_t msg; \
> > > + } tapif_user; \
> > > + uint8_t pvt_pad[4]; \
> > > + } pvt; \
> > > + uint8_t __pad[44]; \
> > > + union __name##_sring_entry ring[1]; /* variable-length */ \
> > > +}; \
> > > + \
> > > +/* "Front" end's private variables */ \
> > > +struct __name##_front_ring { \
> > > + RING_IDX req_prod_pvt; \
> > > + RING_IDX rsp_cons; \
> > > + unsigned int nr_ents; \
> > > + struct __name##_sring *sring; \
> > > +}; \
> > > + \
> > > +/* "Back" end's private variables */ \
> > > +struct __name##_back_ring { \
> > > + RING_IDX rsp_prod_pvt; \
> > > + RING_IDX req_cons; \
> > > + unsigned int nr_ents; \
> > > + struct __name##_sring *sring; \
> > > +}; \
> > > + \
> > > +/* Syntactic sugar */ \
> > > +typedef struct __name##_sring __name##_sring_t; \
> > > +typedef struct __name##_front_ring __name##_front_ring_t; \
> > > +typedef struct __name##_back_ring __name##_back_ring_t
> > > +
> > > +/*
> > > + * Macros for manipulating rings.
> > > + *
> > > + * FRONT_RING_whatever works on the "front end" of a ring: here
> > > + * requests are pushed on to the ring and responses taken off it.
> > > + *
> > > + * BACK_RING_whatever works on the "back end" of a ring: here
> > > + * requests are taken off the ring and responses put on.
> > > + *
> > > + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
> > > + * This is OK in 1-for-1 request-response situations where the
> > > + * requestor (front end) never has more than RING_SIZE()-1
> > > + * outstanding requests.
> > > + */
> > > +
> > > +/* Initialising empty rings */
> > > +#define SHARED_RING_INIT(_s) do { \
> > > + (_s)->req_prod = (_s)->rsp_prod = 0; \
> > > + (_s)->req_event = (_s)->rsp_event = 1; \
> > > + (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \
> > > + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
> > > +} while(0)
> > > +
> > > +#define FRONT_RING_INIT(_r, _s, __size) do { \
> > > + (_r)->req_prod_pvt = 0; \
> > > + (_r)->rsp_cons = 0; \
> > > + (_r)->nr_ents = __RING_SIZE(_s, __size); \
> > > + (_r)->sring = (_s); \
> > > +} while (0)
> > > +
> > > +#define BACK_RING_INIT(_r, _s, __size) do { \
> > > + (_r)->rsp_prod_pvt = 0; \
> > > + (_r)->req_cons = 0; \
> > > + (_r)->nr_ents = __RING_SIZE(_s, __size); \
> > > + (_r)->sring = (_s); \
> > > +} while (0)
> > > +
> > > +/* How big is this ring? */
> > > +#define RING_SIZE(_r) \
> > > + ((_r)->nr_ents)
> > > +
> > > +/* Number of free requests (for use on front side only). */
> > > +#define RING_FREE_REQUESTS(_r) \
> > > + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
> > > +
> > > +/* Test if there is an empty slot available on the front ring.
> > > + * (This is only meaningful from the front. )
> > > + */
> > > +#define RING_FULL(_r) \
> > > + (RING_FREE_REQUESTS(_r) == 0)
> > > +
> > > +/* Test if there are outstanding messages to be processed on a ring. */
> > > +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
> > > + ((_r)->sring->rsp_prod - (_r)->rsp_cons)
> > > +
> > > +#ifdef __GNUC__
> > > +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
> > > + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
> > > + unsigned int rsp = RING_SIZE(_r) - \
> > > + ((_r)->req_cons - (_r)->rsp_prod_pvt); \
> > > + req < rsp ? req : rsp; \
> > > +})
> > > +#else
> > > +/* Same as above, but without the nice GCC ({ ... }) syntax. */
> > > +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
> > > + ((((_r)->sring->req_prod - (_r)->req_cons) < \
> > > + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
> > > + ((_r)->sring->req_prod - (_r)->req_cons) : \
> > > + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
> > > +#endif
> > > +
> > > +/* Direct access to individual ring elements, by index. */
> > > +#define RING_GET_REQUEST(_r, _idx) \
> > > + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
> > > +
> > > +/*
> > > + * Get a local copy of a request.
> > > + *
> > > + * Use this in preference to RING_GET_REQUEST() so all processing is
> > > + * done on a local copy that cannot be modified by the other end.
> > > + *
> > > + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
> > > + * to be ineffective where _req is a struct which consists of only bitfields.
> > > + */
> > > +#define RING_COPY_REQUEST(_r, _idx, _req) do { \
> > > + /* Use volatile to force the copy into _req. */ \
> > > + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
> > > +} while (0)
> > > +
> > > +#define RING_GET_RESPONSE(_r, _idx) \
> > > + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
> > > +
> > > +/* Loop termination condition: Would the specified index overflow the ring? */
> > > +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
> > > + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
> > > +
> > > +/* Ill-behaved frontend determination: Can there be this many requests? */
> > > +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
> > > + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
> > > +
> > > +#define RING_PUSH_REQUESTS(_r) do { \
> > > + xen_wmb(); /* back sees requests /before/ updated producer index */ \
> > > + (_r)->sring->req_prod = (_r)->req_prod_pvt; \
> > > +} while (0)
> > > +
> > > +#define RING_PUSH_RESPONSES(_r) do { \
> > > + xen_wmb(); /* front sees resps /before/ updated producer index */ \
> > > + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
> > > +} while (0)
> > > +
> > > +/*
> > > + * Notification hold-off (req_event and rsp_event):
> > > + *
> > > + * When queueing requests or responses on a shared ring, it may not always be
> > > + * necessary to notify the remote end. For example, if requests are in flight
> > > + * in a backend, the front may be able to queue further requests without
> > > + * notifying the back (if the back checks for new requests when it queues
> > > + * responses).
> > > + *
> > > + * When enqueuing requests or responses:
> > > + *
> > > + * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
> > > + * is a boolean return value. True indicates that the receiver requires an
> > > + * asynchronous notification.
> > > + *
> > > + * After dequeuing requests or responses (before sleeping the connection):
> > > + *
> > > + * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
> > > + * The second argument is a boolean return value. True indicates that there
> > > + * are pending messages on the ring (i.e., the connection should not be put
> > > + * to sleep).
> > > + *
> > > + * These macros will set the req_event/rsp_event field to trigger a
> > > + * notification on the very next message that is enqueued. If you want to
> > > + * create batches of work (i.e., only receive a notification after several
> > > + * messages have been enqueued) then you will need to create a customised
> > > + * version of the FINAL_CHECK macro in your own code, which sets the event
> > > + * field appropriately.
> > > + */
> > > +
> > > +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
> > > + RING_IDX __old = (_r)->sring->req_prod; \
> > > + RING_IDX __new = (_r)->req_prod_pvt; \
> > > + xen_wmb(); /* back sees requests /before/ updated producer index */ \
> > > + (_r)->sring->req_prod = __new; \
> > > + xen_mb(); /* back sees new requests /before/ we check req_event */ \
> > > + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
> > > + (RING_IDX)(__new - __old)); \
> > > +} while (0)
> > > +
> > > +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
> > > + RING_IDX __old = (_r)->sring->rsp_prod; \
> > > + RING_IDX __new = (_r)->rsp_prod_pvt; \
> > > + xen_wmb(); /* front sees resps /before/ updated producer index */ \
> > > + (_r)->sring->rsp_prod = __new; \
> > > + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
> > > + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
> > > + (RING_IDX)(__new - __old)); \
> > > +} while (0)
> > > +
> > > +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
> > > + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
> > > + if (_work_to_do) break; \
> > > + (_r)->sring->req_event = (_r)->req_cons + 1; \
> > > + xen_mb(); \
> > > + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
> > > +} while (0)
> > > +
> > > +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
> > > + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
> > > + if (_work_to_do) break; \
> > > + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
> > > + xen_mb(); \
> > > + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
> > > +} while (0)
> > > +
> > > +
> > > +/*
> > > + * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
> > > + * functions to check if there is data on the ring, and to read and
> > > + * write to them.
> > > + *
> > > + * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
> > > + * does not define the indexes page. As different protocols can have
> > > + * extensions to the basic format, this macro allow them to define their
> > > + * own struct.
> > > + *
> > > + * XEN_FLEX_RING_SIZE
> > > + * Convenience macro to calculate the size of one of the two rings
> > > + * from the overall order.
> > > + *
> > > + * $NAME_mask
> > > + * Function to apply the size mask to an index, to reduce the index
> > > + * within the range [0-size].
> > > + *
> > > + * $NAME_read_packet
> > > + * Function to read data from the ring. The amount of data to read is
> > > + * specified by the "size" argument.
> > > + *
> > > + * $NAME_write_packet
> > > + * Function to write data to the ring. The amount of data to write is
> > > + * specified by the "size" argument.
> > > + *
> > > + * $NAME_get_ring_ptr
> > > + * Convenience function that returns a pointer to read/write to the
> > > + * ring at the right location.
> > > + *
> > > + * $NAME_data_intf
> > > + * Indexes page, shared between frontend and backend. It also
> > > + * contains the array of grant refs.
> > > + *
> > > + * $NAME_queued
> > > + * Function to calculate how many bytes are currently on the ring,
> > > + * ready to be read. It can also be used to calculate how much free
> > > + * space is currently on the ring (ring_size - $NAME_queued()).
> > > + */
> > > +#define XEN_FLEX_RING_SIZE(order) \
> > > + (1UL << (order + PAGE_SHIFT - 1))
> > > +
> > > +#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
> > > +struct name##_data_intf { \
> > > + RING_IDX in_cons, in_prod; \
> > > + \
> > > + uint8_t pad1[56]; \
> > > + \
> > > + RING_IDX out_cons, out_prod; \
> > > + \
> > > + uint8_t pad2[56]; \
> > > + \
> > > + RING_IDX ring_order; \
> > > + grant_ref_t ref[]; \
> > > +}; \
> > > +DEFINE_XEN_FLEX_RING(name);
> > > +
> > > +#define DEFINE_XEN_FLEX_RING(name) \
> > > +static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
> > > +{ \
> > > + return (idx & (ring_size - 1)); \
> > > +} \
> > > + \
> > > +static inline RING_IDX name##_mask_order(RING_IDX idx, RING_IDX ring_order) \
> > > +{ \
> > > + return (idx & (XEN_FLEX_RING_SIZE(ring_order) - 1)); \
> > > +} \
> > > + \
> > > +static inline unsigned char* name##_get_ring_ptr(unsigned char *buf, \
> > > + RING_IDX idx, \
> > > + RING_IDX ring_order) \
> > > +{ \
> > > + return buf + name##_mask_order(idx, ring_order); \
> > > +} \
> > > + \
> > > +static inline void name##_read_packet(const unsigned char *buf, \
> > > + RING_IDX masked_prod, RING_IDX *masked_cons, \
> > > + RING_IDX ring_size, void *opaque, size_t size) { \
> > > + if (*masked_cons < masked_prod || \
> > > + size <= ring_size - *masked_cons) { \
> > > + memcpy(opaque, buf + *masked_cons, size); \
> > > + } else { \
> > > + memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
> > > + memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
> > > + size - (ring_size - *masked_cons)); \
> > > + } \
> > > + *masked_cons = name##_mask(*masked_cons + size, ring_size); \
> > > +} \
> > > + \
> > > +static inline void name##_write_packet(unsigned char *buf, \
> > > + RING_IDX *masked_prod, RING_IDX masked_cons, \
> > > + RING_IDX ring_size, const void *opaque, size_t size) { \
> > > + if (*masked_prod < masked_cons || \
> > > + size <= ring_size - *masked_prod) { \
> > > + memcpy(buf + *masked_prod, opaque, size); \
> > > + } else { \
> > > + memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
> > > + memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
> > > + size - (ring_size - *masked_prod)); \
> > > + } \
> > > + *masked_prod = name##_mask(*masked_prod + size, ring_size); \
> > > +} \
> > > + \
> > > +struct name##_data { \
> > > + unsigned char *in; /* half of the allocation */ \
> > > + unsigned char *out; /* half of the allocation */ \
> > > +}; \
> > > + \
> > > + \
> > > +static inline RING_IDX name##_queued(RING_IDX prod, \
> > > + RING_IDX cons, RING_IDX ring_size) \
> > > +{ \
> > > + RING_IDX size; \
> > > + \
> > > + if (prod == cons) \
> > > + return 0; \
> > > + \
> > > + prod = name##_mask(prod, ring_size); \
> > > + cons = name##_mask(cons, ring_size); \
> > > + \
> > > + if (prod == cons) \
> > > + return ring_size; \
> > > + \
> > > + if (prod > cons) \
> > > + size = prod - cons; \
> > > + else \
> > > + size = ring_size - (cons - prod); \
> > > + return size; \
> > > +};
> > > +
> > > +#endif /* __XEN_PUBLIC_IO_RING_H__ */
> > > +
> > > +/*
> > > + * Local variables:
> > > + * mode: C
> > > + * c-file-style: "BSD"
> > > + * c-basic-offset: 4
> > > + * tab-width: 4
> > > + * indent-tabs-mode: nil
> > > + * End:
> > > + */
> >
> >
On Wed, 15 Mar 2017, Greg Kurz wrote:
> On Wed, 15 Mar 2017 11:36:12 -0700 (PDT)
> Stefano Stabellini <sstabellini@kernel.org> wrote:
>
> > On Wed, 15 Mar 2017, Greg Kurz wrote:
> > > On Mon, 13 Mar 2017 16:55:53 -0700
> > > Stefano Stabellini <sstabellini@kernel.org> wrote:
> > >
> > > > Do not use the ring.h header installed on the system. Instead, import
> > > > the header into the QEMU codebase. This avoids problems when QEMU is
> > > > built against a Xen version too old to provide all the ring macros.
> > > >
> > >
> > > What kind of problems ?
> >
> > The FLEX macros are only available in Xen 4.9+ (still unreleased).
> > However, aside from these macros, the Xen 9pfs frontends and backends
> > could work fine on any Xen releases. In fact, the Xen public/io headers
> > are only provided as reference.
> >
>
> Ok.
>
> >
> > > > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > > > CC: anthony.perard@citrix.com
> > > > CC: jgross@suse.com
> > > > ---
> > > > NB: The new macros have not been committed to Xen yet. Do not apply this
> > > > patch until they do.
> > >
> > > Why ? Does this break compat with older Xen ?
> >
> > No, it does not break compatibility. But I think it is a good idea to
> > commit the header to QEMU only after the corresponding Xen header has
> > been accepted. I don't want the two to diverge.
> >
>
> Fair enough but this will put the entire patchset on hold then. When Xen 4.9
> is supposed to be released ?
In a few months, but I don't think we need to wait until the release,
just for the reviewed-by on the new macros, that should come soon.
> > > > ---
> > > > ---
> > > > hw/block/xen_blkif.h | 2 +-
> > > > hw/usb/xen-usb.c | 2 +-
> > > > include/hw/xen/io/ring.h | 455 +++++++++++++++++++++++++++++++++++++++++++++++
> > > > 3 files changed, 457 insertions(+), 2 deletions(-)
> > > > create mode 100644 include/hw/xen/io/ring.h
> > > >
> > > > diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h
> > > > index 3300b6f..3e6e1ea 100644
> > > > --- a/hw/block/xen_blkif.h
> > > > +++ b/hw/block/xen_blkif.h
> > > > @@ -1,7 +1,7 @@
> > > > #ifndef XEN_BLKIF_H
> > > > #define XEN_BLKIF_H
> > > >
> > > > -#include <xen/io/ring.h>
> > > > +#include "hw/xen/io/ring.h"
> > > > #include <xen/io/blkif.h>
> > > > #include <xen/io/protocols.h>
> > > >
> > > > diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
> > > > index 8e676e6..370b3d9 100644
> > > > --- a/hw/usb/xen-usb.c
> > > > +++ b/hw/usb/xen-usb.c
> > > > @@ -33,7 +33,7 @@
> > > > #include "qapi/qmp/qint.h"
> > > > #include "qapi/qmp/qstring.h"
> > > >
> > > > -#include <xen/io/ring.h>
> > > > +#include "hw/xen/io/ring.h"
> > > > #include <xen/io/usbif.h>
> > > >
> > > > /*
> > > > diff --git a/include/hw/xen/io/ring.h b/include/hw/xen/io/ring.h
> > > > new file mode 100644
> > > > index 0000000..cf01fc3
> > > > --- /dev/null
> > > > +++ b/include/hw/xen/io/ring.h
> > > > @@ -0,0 +1,455 @@
> > > > +/******************************************************************************
> > > > + * ring.h
> > > > + *
> > > > + * Shared producer-consumer ring macros.
> > > > + *
> > > > + * Permission is hereby granted, free of charge, to any person obtaining a copy
> > > > + * of this software and associated documentation files (the "Software"), to
> > > > + * deal in the Software without restriction, including without limitation the
> > > > + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
> > > > + * sell copies of the Software, and to permit persons to whom the Software is
> > > > + * furnished to do so, subject to the following conditions:
> > > > + *
> > > > + * The above copyright notice and this permission notice shall be included in
> > > > + * all copies or substantial portions of the Software.
> > > > + *
> > > > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> > > > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> > > > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> > > > + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> > > > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> > > > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
> > > > + * DEALINGS IN THE SOFTWARE.
> > > > + *
> > > > + * Tim Deegan and Andrew Warfield November 2004.
> > > > + */
> > > > +
> > > > +#ifndef __XEN_PUBLIC_IO_RING_H__
> > > > +#define __XEN_PUBLIC_IO_RING_H__
> > > > +
> > > > +#if __XEN_INTERFACE_VERSION__ < 0x00030208
> > > > +#define xen_mb() mb()
> > > > +#define xen_rmb() rmb()
> > > > +#define xen_wmb() wmb()
> > > > +#endif
> > > > +
> > > > +typedef unsigned int RING_IDX;
> > > > +
> > > > +/* Round a 32-bit unsigned constant down to the nearest power of two. */
> > > > +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
> > > > +#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
> > > > +#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
> > > > +#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
> > > > +#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
> > > > +
> > > > +/*
> > > > + * Calculate size of a shared ring, given the total available space for the
> > > > + * ring and indexes (_sz), and the name tag of the request/response structure.
> > > > + * A ring contains as many entries as will fit, rounded down to the nearest
> > > > + * power of two (so we can mask with (size-1) to loop around).
> > > > + */
> > > > +#define __CONST_RING_SIZE(_s, _sz) \
> > > > + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
> > > > + sizeof(((struct _s##_sring *)0)->ring[0])))
> > > > +/*
> > > > + * The same for passing in an actual pointer instead of a name tag.
> > > > + */
> > > > +#define __RING_SIZE(_s, _sz) \
> > > > + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
> > > > +
> > > > +/*
> > > > + * Macros to make the correct C datatypes for a new kind of ring.
> > > > + *
> > > > + * To make a new ring datatype, you need to have two message structures,
> > > > + * let's say request_t, and response_t already defined.
> > > > + *
> > > > + * In a header where you want the ring datatype declared, you then do:
> > > > + *
> > > > + * DEFINE_RING_TYPES(mytag, request_t, response_t);
> > > > + *
> > > > + * These expand out to give you a set of types, as you can see below.
> > > > + * The most important of these are:
> > > > + *
> > > > + * mytag_sring_t - The shared ring.
> > > > + * mytag_front_ring_t - The 'front' half of the ring.
> > > > + * mytag_back_ring_t - The 'back' half of the ring.
> > > > + *
> > > > + * To initialize a ring in your code you need to know the location and size
> > > > + * of the shared memory area (PAGE_SIZE, for instance). To initialise
> > > > + * the front half:
> > > > + *
> > > > + * mytag_front_ring_t front_ring;
> > > > + * SHARED_RING_INIT((mytag_sring_t *)shared_page);
> > > > + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
> > > > + *
> > > > + * Initializing the back follows similarly (note that only the front
> > > > + * initializes the shared ring):
> > > > + *
> > > > + * mytag_back_ring_t back_ring;
> > > > + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
> > > > + */
> > > > +
> > > > +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
> > > > + \
> > > > +/* Shared ring entry */ \
> > > > +union __name##_sring_entry { \
> > > > + __req_t req; \
> > > > + __rsp_t rsp; \
> > > > +}; \
> > > > + \
> > > > +/* Shared ring page */ \
> > > > +struct __name##_sring { \
> > > > + RING_IDX req_prod, req_event; \
> > > > + RING_IDX rsp_prod, rsp_event; \
> > > > + union { \
> > > > + struct { \
> > > > + uint8_t smartpoll_active; \
> > > > + } netif; \
> > > > + struct { \
> > > > + uint8_t msg; \
> > > > + } tapif_user; \
> > > > + uint8_t pvt_pad[4]; \
> > > > + } pvt; \
> > > > + uint8_t __pad[44]; \
> > > > + union __name##_sring_entry ring[1]; /* variable-length */ \
> > > > +}; \
> > > > + \
> > > > +/* "Front" end's private variables */ \
> > > > +struct __name##_front_ring { \
> > > > + RING_IDX req_prod_pvt; \
> > > > + RING_IDX rsp_cons; \
> > > > + unsigned int nr_ents; \
> > > > + struct __name##_sring *sring; \
> > > > +}; \
> > > > + \
> > > > +/* "Back" end's private variables */ \
> > > > +struct __name##_back_ring { \
> > > > + RING_IDX rsp_prod_pvt; \
> > > > + RING_IDX req_cons; \
> > > > + unsigned int nr_ents; \
> > > > + struct __name##_sring *sring; \
> > > > +}; \
> > > > + \
> > > > +/* Syntactic sugar */ \
> > > > +typedef struct __name##_sring __name##_sring_t; \
> > > > +typedef struct __name##_front_ring __name##_front_ring_t; \
> > > > +typedef struct __name##_back_ring __name##_back_ring_t
> > > > +
> > > > +/*
> > > > + * Macros for manipulating rings.
> > > > + *
> > > > + * FRONT_RING_whatever works on the "front end" of a ring: here
> > > > + * requests are pushed on to the ring and responses taken off it.
> > > > + *
> > > > + * BACK_RING_whatever works on the "back end" of a ring: here
> > > > + * requests are taken off the ring and responses put on.
> > > > + *
> > > > + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
> > > > + * This is OK in 1-for-1 request-response situations where the
> > > > + * requestor (front end) never has more than RING_SIZE()-1
> > > > + * outstanding requests.
> > > > + */
> > > > +
> > > > +/* Initialising empty rings */
> > > > +#define SHARED_RING_INIT(_s) do { \
> > > > + (_s)->req_prod = (_s)->rsp_prod = 0; \
> > > > + (_s)->req_event = (_s)->rsp_event = 1; \
> > > > + (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \
> > > > + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
> > > > +} while(0)
> > > > +
> > > > +#define FRONT_RING_INIT(_r, _s, __size) do { \
> > > > + (_r)->req_prod_pvt = 0; \
> > > > + (_r)->rsp_cons = 0; \
> > > > + (_r)->nr_ents = __RING_SIZE(_s, __size); \
> > > > + (_r)->sring = (_s); \
> > > > +} while (0)
> > > > +
> > > > +#define BACK_RING_INIT(_r, _s, __size) do { \
> > > > + (_r)->rsp_prod_pvt = 0; \
> > > > + (_r)->req_cons = 0; \
> > > > + (_r)->nr_ents = __RING_SIZE(_s, __size); \
> > > > + (_r)->sring = (_s); \
> > > > +} while (0)
> > > > +
> > > > +/* How big is this ring? */
> > > > +#define RING_SIZE(_r) \
> > > > + ((_r)->nr_ents)
> > > > +
> > > > +/* Number of free requests (for use on front side only). */
> > > > +#define RING_FREE_REQUESTS(_r) \
> > > > + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
> > > > +
> > > > +/* Test if there is an empty slot available on the front ring.
> > > > + * (This is only meaningful from the front. )
> > > > + */
> > > > +#define RING_FULL(_r) \
> > > > + (RING_FREE_REQUESTS(_r) == 0)
> > > > +
> > > > +/* Test if there are outstanding messages to be processed on a ring. */
> > > > +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
> > > > + ((_r)->sring->rsp_prod - (_r)->rsp_cons)
> > > > +
> > > > +#ifdef __GNUC__
> > > > +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
> > > > + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
> > > > + unsigned int rsp = RING_SIZE(_r) - \
> > > > + ((_r)->req_cons - (_r)->rsp_prod_pvt); \
> > > > + req < rsp ? req : rsp; \
> > > > +})
> > > > +#else
> > > > +/* Same as above, but without the nice GCC ({ ... }) syntax. */
> > > > +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
> > > > + ((((_r)->sring->req_prod - (_r)->req_cons) < \
> > > > + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
> > > > + ((_r)->sring->req_prod - (_r)->req_cons) : \
> > > > + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
> > > > +#endif
> > > > +
> > > > +/* Direct access to individual ring elements, by index. */
> > > > +#define RING_GET_REQUEST(_r, _idx) \
> > > > + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
> > > > +
> > > > +/*
> > > > + * Get a local copy of a request.
> > > > + *
> > > > + * Use this in preference to RING_GET_REQUEST() so all processing is
> > > > + * done on a local copy that cannot be modified by the other end.
> > > > + *
> > > > + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
> > > > + * to be ineffective where _req is a struct which consists of only bitfields.
> > > > + */
> > > > +#define RING_COPY_REQUEST(_r, _idx, _req) do { \
> > > > + /* Use volatile to force the copy into _req. */ \
> > > > + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
> > > > +} while (0)
> > > > +
> > > > +#define RING_GET_RESPONSE(_r, _idx) \
> > > > + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
> > > > +
> > > > +/* Loop termination condition: Would the specified index overflow the ring? */
> > > > +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
> > > > + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
> > > > +
> > > > +/* Ill-behaved frontend determination: Can there be this many requests? */
> > > > +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
> > > > + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
> > > > +
> > > > +#define RING_PUSH_REQUESTS(_r) do { \
> > > > + xen_wmb(); /* back sees requests /before/ updated producer index */ \
> > > > + (_r)->sring->req_prod = (_r)->req_prod_pvt; \
> > > > +} while (0)
> > > > +
> > > > +#define RING_PUSH_RESPONSES(_r) do { \
> > > > + xen_wmb(); /* front sees resps /before/ updated producer index */ \
> > > > + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
> > > > +} while (0)
> > > > +
> > > > +/*
> > > > + * Notification hold-off (req_event and rsp_event):
> > > > + *
> > > > + * When queueing requests or responses on a shared ring, it may not always be
> > > > + * necessary to notify the remote end. For example, if requests are in flight
> > > > + * in a backend, the front may be able to queue further requests without
> > > > + * notifying the back (if the back checks for new requests when it queues
> > > > + * responses).
> > > > + *
> > > > + * When enqueuing requests or responses:
> > > > + *
> > > > + * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
> > > > + * is a boolean return value. True indicates that the receiver requires an
> > > > + * asynchronous notification.
> > > > + *
> > > > + * After dequeuing requests or responses (before sleeping the connection):
> > > > + *
> > > > + * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
> > > > + * The second argument is a boolean return value. True indicates that there
> > > > + * are pending messages on the ring (i.e., the connection should not be put
> > > > + * to sleep).
> > > > + *
> > > > + * These macros will set the req_event/rsp_event field to trigger a
> > > > + * notification on the very next message that is enqueued. If you want to
> > > > + * create batches of work (i.e., only receive a notification after several
> > > > + * messages have been enqueued) then you will need to create a customised
> > > > + * version of the FINAL_CHECK macro in your own code, which sets the event
> > > > + * field appropriately.
> > > > + */
> > > > +
> > > > +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
> > > > + RING_IDX __old = (_r)->sring->req_prod; \
> > > > + RING_IDX __new = (_r)->req_prod_pvt; \
> > > > + xen_wmb(); /* back sees requests /before/ updated producer index */ \
> > > > + (_r)->sring->req_prod = __new; \
> > > > + xen_mb(); /* back sees new requests /before/ we check req_event */ \
> > > > + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
> > > > + (RING_IDX)(__new - __old)); \
> > > > +} while (0)
> > > > +
> > > > +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
> > > > + RING_IDX __old = (_r)->sring->rsp_prod; \
> > > > + RING_IDX __new = (_r)->rsp_prod_pvt; \
> > > > + xen_wmb(); /* front sees resps /before/ updated producer index */ \
> > > > + (_r)->sring->rsp_prod = __new; \
> > > > + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
> > > > + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
> > > > + (RING_IDX)(__new - __old)); \
> > > > +} while (0)
> > > > +
> > > > +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
> > > > + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
> > > > + if (_work_to_do) break; \
> > > > + (_r)->sring->req_event = (_r)->req_cons + 1; \
> > > > + xen_mb(); \
> > > > + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
> > > > +} while (0)
> > > > +
> > > > +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
> > > > + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
> > > > + if (_work_to_do) break; \
> > > > + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
> > > > + xen_mb(); \
> > > > + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
> > > > +} while (0)
> > > > +
> > > > +
> > > > +/*
> > > > + * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
> > > > + * functions to check if there is data on the ring, and to read and
> > > > + * write to them.
> > > > + *
> > > > + * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
> > > > + * does not define the indexes page. As different protocols can have
> > > > + * extensions to the basic format, this macro allow them to define their
> > > > + * own struct.
> > > > + *
> > > > + * XEN_FLEX_RING_SIZE
> > > > + * Convenience macro to calculate the size of one of the two rings
> > > > + * from the overall order.
> > > > + *
> > > > + * $NAME_mask
> > > > + * Function to apply the size mask to an index, to reduce the index
> > > > + * within the range [0-size].
> > > > + *
> > > > + * $NAME_read_packet
> > > > + * Function to read data from the ring. The amount of data to read is
> > > > + * specified by the "size" argument.
> > > > + *
> > > > + * $NAME_write_packet
> > > > + * Function to write data to the ring. The amount of data to write is
> > > > + * specified by the "size" argument.
> > > > + *
> > > > + * $NAME_get_ring_ptr
> > > > + * Convenience function that returns a pointer to read/write to the
> > > > + * ring at the right location.
> > > > + *
> > > > + * $NAME_data_intf
> > > > + * Indexes page, shared between frontend and backend. It also
> > > > + * contains the array of grant refs.
> > > > + *
> > > > + * $NAME_queued
> > > > + * Function to calculate how many bytes are currently on the ring,
> > > > + * ready to be read. It can also be used to calculate how much free
> > > > + * space is currently on the ring (ring_size - $NAME_queued()).
> > > > + */
> > > > +#define XEN_FLEX_RING_SIZE(order) \
> > > > + (1UL << (order + PAGE_SHIFT - 1))
> > > > +
> > > > +#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
> > > > +struct name##_data_intf { \
> > > > + RING_IDX in_cons, in_prod; \
> > > > + \
> > > > + uint8_t pad1[56]; \
> > > > + \
> > > > + RING_IDX out_cons, out_prod; \
> > > > + \
> > > > + uint8_t pad2[56]; \
> > > > + \
> > > > + RING_IDX ring_order; \
> > > > + grant_ref_t ref[]; \
> > > > +}; \
> > > > +DEFINE_XEN_FLEX_RING(name);
> > > > +
> > > > +#define DEFINE_XEN_FLEX_RING(name) \
> > > > +static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
> > > > +{ \
> > > > + return (idx & (ring_size - 1)); \
> > > > +} \
> > > > + \
> > > > +static inline RING_IDX name##_mask_order(RING_IDX idx, RING_IDX ring_order) \
> > > > +{ \
> > > > + return (idx & (XEN_FLEX_RING_SIZE(ring_order) - 1)); \
> > > > +} \
> > > > + \
> > > > +static inline unsigned char* name##_get_ring_ptr(unsigned char *buf, \
> > > > + RING_IDX idx, \
> > > > + RING_IDX ring_order) \
> > > > +{ \
> > > > + return buf + name##_mask_order(idx, ring_order); \
> > > > +} \
> > > > + \
> > > > +static inline void name##_read_packet(const unsigned char *buf, \
> > > > + RING_IDX masked_prod, RING_IDX *masked_cons, \
> > > > + RING_IDX ring_size, void *opaque, size_t size) { \
> > > > + if (*masked_cons < masked_prod || \
> > > > + size <= ring_size - *masked_cons) { \
> > > > + memcpy(opaque, buf + *masked_cons, size); \
> > > > + } else { \
> > > > + memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
> > > > + memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
> > > > + size - (ring_size - *masked_cons)); \
> > > > + } \
> > > > + *masked_cons = name##_mask(*masked_cons + size, ring_size); \
> > > > +} \
> > > > + \
> > > > +static inline void name##_write_packet(unsigned char *buf, \
> > > > + RING_IDX *masked_prod, RING_IDX masked_cons, \
> > > > + RING_IDX ring_size, const void *opaque, size_t size) { \
> > > > + if (*masked_prod < masked_cons || \
> > > > + size <= ring_size - *masked_prod) { \
> > > > + memcpy(buf + *masked_prod, opaque, size); \
> > > > + } else { \
> > > > + memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
> > > > + memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
> > > > + size - (ring_size - *masked_prod)); \
> > > > + } \
> > > > + *masked_prod = name##_mask(*masked_prod + size, ring_size); \
> > > > +} \
> > > > + \
> > > > +struct name##_data { \
> > > > + unsigned char *in; /* half of the allocation */ \
> > > > + unsigned char *out; /* half of the allocation */ \
> > > > +}; \
> > > > + \
> > > > + \
> > > > +static inline RING_IDX name##_queued(RING_IDX prod, \
> > > > + RING_IDX cons, RING_IDX ring_size) \
> > > > +{ \
> > > > + RING_IDX size; \
> > > > + \
> > > > + if (prod == cons) \
> > > > + return 0; \
> > > > + \
> > > > + prod = name##_mask(prod, ring_size); \
> > > > + cons = name##_mask(cons, ring_size); \
> > > > + \
> > > > + if (prod == cons) \
> > > > + return ring_size; \
> > > > + \
> > > > + if (prod > cons) \
> > > > + size = prod - cons; \
> > > > + else \
> > > > + size = ring_size - (cons - prod); \
> > > > + return size; \
> > > > +};
> > > > +
> > > > +#endif /* __XEN_PUBLIC_IO_RING_H__ */
> > > > +
> > > > +/*
> > > > + * Local variables:
> > > > + * mode: C
> > > > + * c-file-style: "BSD"
> > > > + * c-basic-offset: 4
> > > > + * tab-width: 4
> > > > + * indent-tabs-mode: nil
> > > > + * End:
> > > > + */
> > >
> > >
>
>
It uses the new ring.h macros to declare rings and interfaces.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
---
hw/9pfs/xen_9pfs.h | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
create mode 100644 hw/9pfs/xen_9pfs.h
diff --git a/hw/9pfs/xen_9pfs.h b/hw/9pfs/xen_9pfs.h
new file mode 100644
index 0000000..c4e8901
--- /dev/null
+++ b/hw/9pfs/xen_9pfs.h
@@ -0,0 +1,20 @@
+#ifndef XEN_9PFS_H
+#define XEN_9PFS_H
+
+#include "hw/xen/io/ring.h"
+#include <xen/io/protocols.h>
+
+struct xen_9pfs_header {
+ uint32_t size;
+ uint8_t id;
+ uint16_t tag;
+
+ /* uint8_t sdata[]; */
+} __attribute__((packed));
+
+#define PAGE_SHIFT XC_PAGE_SHIFT
+#define XEN_9PFS_RING_ORDER 6
+#define XEN_9PFS_RING_SIZE XEN_FLEX_RING_SIZE(XEN_9PFS_RING_ORDER)
+DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs);
+
+#endif
--
1.9.1
On Mon, 13 Mar 2017 16:55:54 -0700
Stefano Stabellini <sstabellini@kernel.org> wrote:
> It uses the new ring.h macros to declare rings and interfaces.
>
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: anthony.perard@citrix.com
> CC: jgross@suse.com
> ---
> hw/9pfs/xen_9pfs.h | 20 ++++++++++++++++++++
> 1 file changed, 20 insertions(+)
> create mode 100644 hw/9pfs/xen_9pfs.h
>
This header file has only one user: please move its content to
hw/9pfs/xen-9p-backend.c (except the 9P header structure, see
below).
> diff --git a/hw/9pfs/xen_9pfs.h b/hw/9pfs/xen_9pfs.h
> new file mode 100644
> index 0000000..c4e8901
> --- /dev/null
> +++ b/hw/9pfs/xen_9pfs.h
> @@ -0,0 +1,20 @@
> +#ifndef XEN_9PFS_H
> +#define XEN_9PFS_H
> +
> +#include "hw/xen/io/ring.h"
> +#include <xen/io/protocols.h>
> +
> +struct xen_9pfs_header {
> + uint32_t size;
> + uint8_t id;
> + uint16_t tag;
> +
> + /* uint8_t sdata[]; */
This doesn't seem useful.
> +} __attribute__((packed));
> +
A few remarks:
- this is a 9P message header actually, which is also used with virtio,
- QEMU coding style requires a typedef in CamelCase,
- the 9P protocol explicitely uses little-endian ordering. Since we
don't have endian-specific types, it makes sense to indicate that
when naming the fields.
- we have a QEMU_PACKED macro which seems to be used a lot more than
the gcc syntax
Please define a new type in hw/9pfs/9p.h and use it in both transports.
Something like:
typedef struct {
uint32_t size_le;
uint8_t id;
uint16_t tag_le;
} QEMU_PACKED P9MsgHeader;
> +#define PAGE_SHIFT XC_PAGE_SHIFT
I don't see any user for this in hw/9pfs/xen-9p-backend.c
> +#define XEN_9PFS_RING_ORDER 6
> +#define XEN_9PFS_RING_SIZE XEN_FLEX_RING_SIZE(XEN_9PFS_RING_ORDER)
> +DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs);
> +
> +#endif
On Wed, 15 Mar 2017, Greg Kurz wrote:
> On Mon, 13 Mar 2017 16:55:54 -0700
> Stefano Stabellini <sstabellini@kernel.org> wrote:
>
> > It uses the new ring.h macros to declare rings and interfaces.
> >
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: anthony.perard@citrix.com
> > CC: jgross@suse.com
> > ---
> > hw/9pfs/xen_9pfs.h | 20 ++++++++++++++++++++
> > 1 file changed, 20 insertions(+)
> > create mode 100644 hw/9pfs/xen_9pfs.h
> >
>
> This header file has only one user: please move its content to
> hw/9pfs/xen-9p-backend.c (except the 9P header structure, see
> below).
OK, I can do that. There is going to be a very similar header in the Xen
tree under xen/include/public/io
(http://marc.info/?l=xen-devel&m=148952997709142), but from QEMU point
of view, it makes sense to fold it in xen-9p-backend.c.
> > diff --git a/hw/9pfs/xen_9pfs.h b/hw/9pfs/xen_9pfs.h
> > new file mode 100644
> > index 0000000..c4e8901
> > --- /dev/null
> > +++ b/hw/9pfs/xen_9pfs.h
> > @@ -0,0 +1,20 @@
> > +#ifndef XEN_9PFS_H
> > +#define XEN_9PFS_H
> > +
> > +#include "hw/xen/io/ring.h"
> > +#include <xen/io/protocols.h>
> > +
> > +struct xen_9pfs_header {
> > + uint32_t size;
> > + uint8_t id;
> > + uint16_t tag;
> > +
> > + /* uint8_t sdata[]; */
>
> This doesn't seem useful.
I'll remove
> > +} __attribute__((packed));
> > +
>
> A few remarks:
> - this is a 9P message header actually, which is also used with virtio,
> - QEMU coding style requires a typedef in CamelCase,
> - the 9P protocol explicitely uses little-endian ordering. Since we
> don't have endian-specific types, it makes sense to indicate that
> when naming the fields.
> - we have a QEMU_PACKED macro which seems to be used a lot more than
> the gcc syntax
>
> Please define a new type in hw/9pfs/9p.h and use it in both transports.
> Something like:
>
> typedef struct {
> uint32_t size_le;
> uint8_t id;
> uint16_t tag_le;
> } QEMU_PACKED P9MsgHeader;
OK
> > +#define PAGE_SHIFT XC_PAGE_SHIFT
>
> I don't see any user for this in hw/9pfs/xen-9p-backend.c
PAGE_SHIFT is used by the macros below, but the original Xen headers
don't have the PAGE_SHIFT definition, so, for the sake of keeping the
two in sync, I didn't add it there.
> > +#define XEN_9PFS_RING_ORDER 6
> > +#define XEN_9PFS_RING_SIZE XEN_FLEX_RING_SIZE(XEN_9PFS_RING_ORDER)
> > +DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs);
> > +
> > +#endif
Introduce the Xen 9pfs backend: add struct XenDevOps to register as a
Xen backend and add struct V9fsTransport to register as v9fs transport.
All functions are empty stubs for now.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
Reviewed-by: Greg Kurz <groug@kaod.org>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
CC: Greg Kurz <groug@kaod.org>
---
hw/9pfs/xen-9p-backend.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 96 insertions(+)
create mode 100644 hw/9pfs/xen-9p-backend.c
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
new file mode 100644
index 0000000..35032d3
--- /dev/null
+++ b/hw/9pfs/xen-9p-backend.c
@@ -0,0 +1,96 @@
+/*
+ * Xen 9p backend
+ *
+ * Copyright Aporeto 2017
+ *
+ * Authors:
+ * Stefano Stabellini <stefano@aporeto.com>
+ *
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/hw.h"
+#include "hw/9pfs/9p.h"
+#include "hw/xen/xen_backend.h"
+#include "xen_9pfs.h"
+#include "qemu/config-file.h"
+#include "fsdev/qemu-fsdev.h"
+
+typedef struct Xen9pfsDev {
+ struct XenDevice xendev; /* must be first */
+} Xen9pfsDev;
+
+static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
+ size_t offset,
+ const char *fmt,
+ va_list ap)
+{
+ return 0;
+}
+
+static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
+ size_t offset,
+ const char *fmt,
+ va_list ap)
+{
+ return 0;
+}
+
+static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
+ struct iovec **piov,
+ unsigned int *pniov)
+{
+}
+
+static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
+ struct iovec **piov,
+ unsigned int *pniov,
+ size_t size)
+{
+}
+
+static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
+{
+}
+
+static const struct V9fsTransport xen_9p_transport = {
+ .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
+ .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
+ .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
+ .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
+ .push_and_notify = xen_9pfs_push_and_notify,
+};
+
+static int xen_9pfs_init(struct XenDevice *xendev)
+{
+ return 0;
+}
+
+static int xen_9pfs_free(struct XenDevice *xendev)
+{
+ return -1;
+}
+
+static int xen_9pfs_connect(struct XenDevice *xendev)
+{
+ return 0;
+}
+
+static void xen_9pfs_alloc(struct XenDevice *xendev)
+{
+}
+
+static void xen_9pfs_disconnect(struct XenDevice *xendev)
+{
+}
+
+struct XenDevOps xen_9pfs_ops = {
+ .size = sizeof(Xen9pfsDev),
+ .flags = DEVOPS_FLAG_NEED_GNTDEV,
+ .alloc = xen_9pfs_alloc,
+ .init = xen_9pfs_init,
+ .initialise = xen_9pfs_connect,
+ .disconnect = xen_9pfs_disconnect,
+ .free = xen_9pfs_free,
+};
--
1.9.1
On 14/03/17 00:55, Stefano Stabellini wrote:
> Introduce the Xen 9pfs backend: add struct XenDevOps to register as a
> Xen backend and add struct V9fsTransport to register as v9fs transport.
>
> All functions are empty stubs for now.
>
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> Reviewed-by: Greg Kurz <groug@kaod.org>
> CC: anthony.perard@citrix.com
> CC: jgross@suse.com
> CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> CC: Greg Kurz <groug@kaod.org>
> ---
> hw/9pfs/xen-9p-backend.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 96 insertions(+)
> create mode 100644 hw/9pfs/xen-9p-backend.c
>
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> new file mode 100644
> index 0000000..35032d3
> --- /dev/null
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -0,0 +1,96 @@
> +/*
> + * Xen 9p backend
> + *
> + * Copyright Aporeto 2017
> + *
> + * Authors:
> + * Stefano Stabellini <stefano@aporeto.com>
> + *
> + */
> +
> +#include "qemu/osdep.h"
> +
> +#include "hw/hw.h"
> +#include "hw/9pfs/9p.h"
> +#include "hw/xen/xen_backend.h"
> +#include "xen_9pfs.h"
> +#include "qemu/config-file.h"
> +#include "fsdev/qemu-fsdev.h"
> +
> +typedef struct Xen9pfsDev {
> + struct XenDevice xendev; /* must be first */
> +} Xen9pfsDev;
> +
> +static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
> + size_t offset,
> + const char *fmt,
> + va_list ap)
> +{
> + return 0;
> +}
> +
> +static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
> + size_t offset,
> + const char *fmt,
> + va_list ap)
> +{
> + return 0;
> +}
> +
> +static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
> + struct iovec **piov,
> + unsigned int *pniov)
> +{
> +}
> +
> +static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
> + struct iovec **piov,
> + unsigned int *pniov,
> + size_t size)
> +{
> +}
> +
> +static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
> +{
> +}
> +
> +static const struct V9fsTransport xen_9p_transport = {
> + .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
> + .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
> + .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
> + .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
> + .push_and_notify = xen_9pfs_push_and_notify,
> +};
> +
> +static int xen_9pfs_init(struct XenDevice *xendev)
> +{
> + return 0;
> +}
> +
> +static int xen_9pfs_free(struct XenDevice *xendev)
> +{
> + return -1;
> +}
> +
> +static int xen_9pfs_connect(struct XenDevice *xendev)
> +{
> + return 0;
> +}
> +
> +static void xen_9pfs_alloc(struct XenDevice *xendev)
> +{
> +}
> +
> +static void xen_9pfs_disconnect(struct XenDevice *xendev)
> +{
> +}
> +
> +struct XenDevOps xen_9pfs_ops = {
> + .size = sizeof(Xen9pfsDev),
> + .flags = DEVOPS_FLAG_NEED_GNTDEV,
> + .alloc = xen_9pfs_alloc,
> + .init = xen_9pfs_init,
> + .initialise = xen_9pfs_connect,
Alignment?
> + .disconnect = xen_9pfs_disconnect,
> + .free = xen_9pfs_free,
> +};
>
Juergen
On Tue, 14 Mar 2017, Juergen Gross wrote:
> On 14/03/17 00:55, Stefano Stabellini wrote:
> > Introduce the Xen 9pfs backend: add struct XenDevOps to register as a
> > Xen backend and add struct V9fsTransport to register as v9fs transport.
> >
> > All functions are empty stubs for now.
> >
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > Reviewed-by: Greg Kurz <groug@kaod.org>
> > CC: anthony.perard@citrix.com
> > CC: jgross@suse.com
> > CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> > CC: Greg Kurz <groug@kaod.org>
> > ---
> > hw/9pfs/xen-9p-backend.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 96 insertions(+)
> > create mode 100644 hw/9pfs/xen-9p-backend.c
> >
> > diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> > new file mode 100644
> > index 0000000..35032d3
> > --- /dev/null
> > +++ b/hw/9pfs/xen-9p-backend.c
> > @@ -0,0 +1,96 @@
> > +/*
> > + * Xen 9p backend
> > + *
> > + * Copyright Aporeto 2017
> > + *
> > + * Authors:
> > + * Stefano Stabellini <stefano@aporeto.com>
> > + *
> > + */
> > +
> > +#include "qemu/osdep.h"
> > +
> > +#include "hw/hw.h"
> > +#include "hw/9pfs/9p.h"
> > +#include "hw/xen/xen_backend.h"
> > +#include "xen_9pfs.h"
> > +#include "qemu/config-file.h"
> > +#include "fsdev/qemu-fsdev.h"
> > +
> > +typedef struct Xen9pfsDev {
> > + struct XenDevice xendev; /* must be first */
> > +} Xen9pfsDev;
> > +
> > +static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
> > + size_t offset,
> > + const char *fmt,
> > + va_list ap)
> > +{
> > + return 0;
> > +}
> > +
> > +static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
> > + size_t offset,
> > + const char *fmt,
> > + va_list ap)
> > +{
> > + return 0;
> > +}
> > +
> > +static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
> > + struct iovec **piov,
> > + unsigned int *pniov)
> > +{
> > +}
> > +
> > +static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
> > + struct iovec **piov,
> > + unsigned int *pniov,
> > + size_t size)
> > +{
> > +}
> > +
> > +static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
> > +{
> > +}
> > +
> > +static const struct V9fsTransport xen_9p_transport = {
> > + .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
> > + .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
> > + .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
> > + .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
> > + .push_and_notify = xen_9pfs_push_and_notify,
> > +};
> > +
> > +static int xen_9pfs_init(struct XenDevice *xendev)
> > +{
> > + return 0;
> > +}
> > +
> > +static int xen_9pfs_free(struct XenDevice *xendev)
> > +{
> > + return -1;
> > +}
> > +
> > +static int xen_9pfs_connect(struct XenDevice *xendev)
> > +{
> > + return 0;
> > +}
> > +
> > +static void xen_9pfs_alloc(struct XenDevice *xendev)
> > +{
> > +}
> > +
> > +static void xen_9pfs_disconnect(struct XenDevice *xendev)
> > +{
> > +}
> > +
> > +struct XenDevOps xen_9pfs_ops = {
> > + .size = sizeof(Xen9pfsDev),
> > + .flags = DEVOPS_FLAG_NEED_GNTDEV,
> > + .alloc = xen_9pfs_alloc,
> > + .init = xen_9pfs_init,
> > + .initialise = xen_9pfs_connect,
>
> Alignment?
I'll fix, thanks
> > + .disconnect = xen_9pfs_disconnect,
> > + .free = xen_9pfs_free,
> > +};
> >
Write the limits of the backend to xenstore. Connect to the frontend.
Upon connection, allocate the rings according to the protocol
specification.
Initialize a QEMUBH to schedule work upon receiving an event channel
notification from the frontend.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
CC: Greg Kurz <groug@kaod.org>
---
hw/9pfs/xen-9p-backend.c | 159 ++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 158 insertions(+), 1 deletion(-)
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 35032d3..0e4a133 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -17,8 +17,35 @@
#include "qemu/config-file.h"
#include "fsdev/qemu-fsdev.h"
+#define VERSIONS "1"
+#define MAX_RINGS 8
+#define MAX_RING_ORDER 8
+
+struct Xen9pfsRing {
+ struct Xen9pfsDev *priv;
+
+ int ref;
+ xenevtchn_handle *evtchndev;
+ int evtchn;
+ int local_port;
+ struct xen_9pfs_data_intf *intf;
+ unsigned char *data;
+ struct xen_9pfs_data ring;
+
+ QEMUBH *bh;
+
+ /* local copies, so that we can read/write PDU data directly from
+ * the ring */
+ RING_IDX out_cons, out_size, in_cons;
+ bool inprogress;
+};
+
typedef struct Xen9pfsDev {
struct XenDevice xendev; /* must be first */
+ V9fsState state;
+
+ int num_rings;
+ struct Xen9pfsRing *rings;
} Xen9pfsDev;
static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
@@ -67,22 +94,152 @@ static int xen_9pfs_init(struct XenDevice *xendev)
return 0;
}
+static void xen_9pfs_bh(void *opaque)
+{
+}
+
+static void xen_9pfs_evtchn_event(void *opaque)
+{
+}
+
static int xen_9pfs_free(struct XenDevice *xendev)
{
- return -1;
+ int i;
+ struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
+
+ for (i = 0; i < xen_9pdev->num_rings; i++) {
+ if (xen_9pdev->rings[i].data != NULL) {
+ xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
+ xen_9pdev->rings[i].data,
+ (1 << XEN_9PFS_RING_ORDER));
+ }
+ if (xen_9pdev->rings[i].intf != NULL) {
+ xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
+ xen_9pdev->rings[i].intf,
+ 1);
+ }
+ if (xen_9pdev->rings[i].evtchndev > 0) {
+ qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
+ NULL, NULL, NULL);
+ xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, xen_9pdev->rings[i].local_port);
+ }
+ if (xen_9pdev->rings[i].bh != NULL) {
+ qemu_bh_delete(xen_9pdev->rings[i].bh);
+ }
+ }
+ g_free(xen_9pdev->rings);
+ return 0;
}
static int xen_9pfs_connect(struct XenDevice *xendev)
{
+ int i;
+ struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
+ V9fsState *s = &xen_9pdev->state;
+ QemuOpts *fsdev;
+ char *security_model, *path;
+
+ if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
+ &xen_9pdev->num_rings) == -1 ||
+ xen_9pdev->num_rings > MAX_RINGS) {
+ return -1;
+ }
+
+ xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(struct Xen9pfsRing));
+ for (i = 0; i < xen_9pdev->num_rings; i++) {
+ char str[16];
+
+ xen_9pdev->rings[i].priv = xen_9pdev;
+ xen_9pdev->rings[i].evtchn = -1;
+ xen_9pdev->rings[i].local_port = -1;
+
+ sprintf(str, "ring-ref%u", i);
+ if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
+ &xen_9pdev->rings[i].ref) == -1) {
+ goto out;
+ }
+ sprintf(str, "event-channel-%u", i);
+ if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
+ &xen_9pdev->rings[i].evtchn) == -1) {
+ goto out;
+ }
+
+ xen_9pdev->rings[i].intf = xengnttab_map_grant_ref(
+ xen_9pdev->xendev.gnttabdev,
+ xen_9pdev->xendev.dom,
+ xen_9pdev->rings[i].ref,
+ PROT_READ | PROT_WRITE);
+ if (!xen_9pdev->rings[i].intf) {
+ goto out;
+ }
+ xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs(
+ xen_9pdev->xendev.gnttabdev,
+ (1 << XEN_9PFS_RING_ORDER),
+ xen_9pdev->xendev.dom,
+ xen_9pdev->rings[i].intf->ref,
+ PROT_READ | PROT_WRITE);
+ if (!xen_9pdev->rings[i].data) {
+ goto out;
+ }
+ xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
+ xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + XEN_9PFS_RING_SIZE;
+
+ xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
+ xen_9pdev->rings[i].out_cons = 0;
+ xen_9pdev->rings[i].out_size = 0;
+ xen_9pdev->rings[i].inprogress = false;
+
+
+ xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
+ if (xen_9pdev->rings[i].evtchndev == NULL) {
+ goto out;
+ }
+ fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC);
+ xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
+ (xen_9pdev->rings[i].evtchndev, xendev->dom, xen_9pdev->rings[i].evtchn);
+ if (xen_9pdev->rings[i].local_port == -1) {
+ xen_pv_printf(xendev, 0, "xenevtchn_bind_interdomain failed port=%d\n",
+ xen_9pdev->rings[i].evtchn);
+ goto out;
+ }
+ xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
+ qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
+ xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
+ }
+
+ security_model = xenstore_read_be_str(xendev, "security_model");
+ path = xenstore_read_be_str(xendev, "path");
+ s->fsconf.fsdev_id = g_malloc(strlen("xen9p") + 6);
+ sprintf(s->fsconf.fsdev_id, "xen9p%d", xendev->dev);
+ s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
+ v9fs_register_transport(s, &xen_9p_transport);
+ fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
+ s->fsconf.tag,
+ 1, NULL);
+ qemu_opt_set(fsdev, "fsdriver", "local", NULL);
+ qemu_opt_set(fsdev, "path", path, NULL);
+ qemu_opt_set(fsdev, "security_model", security_model, NULL);
+ qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
+ qemu_fsdev_add(fsdev);
+ v9fs_device_realize_common(s, NULL);
+
return 0;
+
+out:
+ xen_9pfs_free(xendev);
+ return -1;
}
static void xen_9pfs_alloc(struct XenDevice *xendev)
{
+ xenstore_write_be_str(xendev, "versions", VERSIONS);
+ xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
+ xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
}
static void xen_9pfs_disconnect(struct XenDevice *xendev)
{
+ /* Dynamic hotplug of PV filesystems at runtime is not supported. */
}
struct XenDevOps xen_9pfs_ops = {
--
1.9.1
On 14/03/17 00:55, Stefano Stabellini wrote:
> Write the limits of the backend to xenstore. Connect to the frontend.
> Upon connection, allocate the rings according to the protocol
> specification.
>
> Initialize a QEMUBH to schedule work upon receiving an event channel
> notification from the frontend.
>
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: anthony.perard@citrix.com
> CC: jgross@suse.com
> CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> CC: Greg Kurz <groug@kaod.org>
> ---
> hw/9pfs/xen-9p-backend.c | 159 ++++++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 158 insertions(+), 1 deletion(-)
>
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 35032d3..0e4a133 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -17,8 +17,35 @@
> #include "qemu/config-file.h"
> #include "fsdev/qemu-fsdev.h"
>
> +#define VERSIONS "1"
> +#define MAX_RINGS 8
> +#define MAX_RING_ORDER 8
> +
> +struct Xen9pfsRing {
> + struct Xen9pfsDev *priv;
> +
> + int ref;
> + xenevtchn_handle *evtchndev;
> + int evtchn;
> + int local_port;
> + struct xen_9pfs_data_intf *intf;
> + unsigned char *data;
> + struct xen_9pfs_data ring;
> +
> + QEMUBH *bh;
> +
> + /* local copies, so that we can read/write PDU data directly from
> + * the ring */
> + RING_IDX out_cons, out_size, in_cons;
> + bool inprogress;
> +};
> +
> typedef struct Xen9pfsDev {
> struct XenDevice xendev; /* must be first */
> + V9fsState state;
> +
> + int num_rings;
> + struct Xen9pfsRing *rings;
> } Xen9pfsDev;
>
> static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
> @@ -67,22 +94,152 @@ static int xen_9pfs_init(struct XenDevice *xendev)
> return 0;
> }
>
> +static void xen_9pfs_bh(void *opaque)
> +{
> +}
> +
> +static void xen_9pfs_evtchn_event(void *opaque)
> +{
> +}
> +
> static int xen_9pfs_free(struct XenDevice *xendev)
> {
> - return -1;
> + int i;
> + struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
> +
> + for (i = 0; i < xen_9pdev->num_rings; i++) {
> + if (xen_9pdev->rings[i].data != NULL) {
> + xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
> + xen_9pdev->rings[i].data,
> + (1 << XEN_9PFS_RING_ORDER));
> + }
> + if (xen_9pdev->rings[i].intf != NULL) {
> + xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
> + xen_9pdev->rings[i].intf,
> + 1);
> + }
> + if (xen_9pdev->rings[i].evtchndev > 0) {
> + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
> + NULL, NULL, NULL);
> + xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, xen_9pdev->rings[i].local_port);
> + }
> + if (xen_9pdev->rings[i].bh != NULL) {
> + qemu_bh_delete(xen_9pdev->rings[i].bh);
> + }
> + }
> + g_free(xen_9pdev->rings);
> + return 0;
> }
>
> static int xen_9pfs_connect(struct XenDevice *xendev)
> {
> + int i;
> + struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
> + V9fsState *s = &xen_9pdev->state;
> + QemuOpts *fsdev;
> + char *security_model, *path;
> +
> + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
> + &xen_9pdev->num_rings) == -1 ||
> + xen_9pdev->num_rings > MAX_RINGS) {
What if num_rings is < 1?
> + return -1;
> + }
> +
> + xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(struct Xen9pfsRing));
> + for (i = 0; i < xen_9pdev->num_rings; i++) {
> + char str[16];
> +
> + xen_9pdev->rings[i].priv = xen_9pdev;
> + xen_9pdev->rings[i].evtchn = -1;
> + xen_9pdev->rings[i].local_port = -1;
> +
> + sprintf(str, "ring-ref%u", i);
use g_strdup_printf()?
> + if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
> + &xen_9pdev->rings[i].ref) == -1) {
> + goto out;
> + }
> + sprintf(str, "event-channel-%u", i);
use g_strdup_printf()?
Juergen
On Tue, 14 Mar 2017, Juergen Gross wrote:
> On 14/03/17 00:55, Stefano Stabellini wrote:
> > Write the limits of the backend to xenstore. Connect to the frontend.
> > Upon connection, allocate the rings according to the protocol
> > specification.
> >
> > Initialize a QEMUBH to schedule work upon receiving an event channel
> > notification from the frontend.
> >
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: anthony.perard@citrix.com
> > CC: jgross@suse.com
> > CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> > CC: Greg Kurz <groug@kaod.org>
> > ---
> > hw/9pfs/xen-9p-backend.c | 159 ++++++++++++++++++++++++++++++++++++++++++++++-
> > 1 file changed, 158 insertions(+), 1 deletion(-)
> >
> > diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> > index 35032d3..0e4a133 100644
> > --- a/hw/9pfs/xen-9p-backend.c
> > +++ b/hw/9pfs/xen-9p-backend.c
> > @@ -17,8 +17,35 @@
> > #include "qemu/config-file.h"
> > #include "fsdev/qemu-fsdev.h"
> >
> > +#define VERSIONS "1"
> > +#define MAX_RINGS 8
> > +#define MAX_RING_ORDER 8
> > +
> > +struct Xen9pfsRing {
> > + struct Xen9pfsDev *priv;
> > +
> > + int ref;
> > + xenevtchn_handle *evtchndev;
> > + int evtchn;
> > + int local_port;
> > + struct xen_9pfs_data_intf *intf;
> > + unsigned char *data;
> > + struct xen_9pfs_data ring;
> > +
> > + QEMUBH *bh;
> > +
> > + /* local copies, so that we can read/write PDU data directly from
> > + * the ring */
> > + RING_IDX out_cons, out_size, in_cons;
> > + bool inprogress;
> > +};
> > +
> > typedef struct Xen9pfsDev {
> > struct XenDevice xendev; /* must be first */
> > + V9fsState state;
> > +
> > + int num_rings;
> > + struct Xen9pfsRing *rings;
> > } Xen9pfsDev;
> >
> > static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
> > @@ -67,22 +94,152 @@ static int xen_9pfs_init(struct XenDevice *xendev)
> > return 0;
> > }
> >
> > +static void xen_9pfs_bh(void *opaque)
> > +{
> > +}
> > +
> > +static void xen_9pfs_evtchn_event(void *opaque)
> > +{
> > +}
> > +
> > static int xen_9pfs_free(struct XenDevice *xendev)
> > {
> > - return -1;
> > + int i;
> > + struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
> > +
> > + for (i = 0; i < xen_9pdev->num_rings; i++) {
> > + if (xen_9pdev->rings[i].data != NULL) {
> > + xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
> > + xen_9pdev->rings[i].data,
> > + (1 << XEN_9PFS_RING_ORDER));
> > + }
> > + if (xen_9pdev->rings[i].intf != NULL) {
> > + xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
> > + xen_9pdev->rings[i].intf,
> > + 1);
> > + }
> > + if (xen_9pdev->rings[i].evtchndev > 0) {
> > + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
> > + NULL, NULL, NULL);
> > + xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, xen_9pdev->rings[i].local_port);
> > + }
> > + if (xen_9pdev->rings[i].bh != NULL) {
> > + qemu_bh_delete(xen_9pdev->rings[i].bh);
> > + }
> > + }
> > + g_free(xen_9pdev->rings);
> > + return 0;
> > }
> >
> > static int xen_9pfs_connect(struct XenDevice *xendev)
> > {
> > + int i;
> > + struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
> > + V9fsState *s = &xen_9pdev->state;
> > + QemuOpts *fsdev;
> > + char *security_model, *path;
> > +
> > + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
> > + &xen_9pdev->num_rings) == -1 ||
> > + xen_9pdev->num_rings > MAX_RINGS) {
>
> What if num_rings is < 1?
Good point, I'll add a check for that
> > + return -1;
> > + }
> > +
> > + xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(struct Xen9pfsRing));
> > + for (i = 0; i < xen_9pdev->num_rings; i++) {
> > + char str[16];
> > +
> > + xen_9pdev->rings[i].priv = xen_9pdev;
> > + xen_9pdev->rings[i].evtchn = -1;
> > + xen_9pdev->rings[i].local_port = -1;
> > +
> > + sprintf(str, "ring-ref%u", i);
>
> use g_strdup_printf()?
OK
> > + if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
> > + &xen_9pdev->rings[i].ref) == -1) {
> > + goto out;
> > + }
> > + sprintf(str, "event-channel-%u", i);
>
> use g_strdup_printf()?
Sure
On Mon, 13 Mar 2017 16:55:56 -0700
Stefano Stabellini <sstabellini@kernel.org> wrote:
> Write the limits of the backend to xenstore. Connect to the frontend.
> Upon connection, allocate the rings according to the protocol
> specification.
>
> Initialize a QEMUBH to schedule work upon receiving an event channel
> notification from the frontend.
>
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: anthony.perard@citrix.com
> CC: jgross@suse.com
> CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> CC: Greg Kurz <groug@kaod.org>
> ---
> hw/9pfs/xen-9p-backend.c | 159 ++++++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 158 insertions(+), 1 deletion(-)
>
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 35032d3..0e4a133 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -17,8 +17,35 @@
> #include "qemu/config-file.h"
> #include "fsdev/qemu-fsdev.h"
>
> +#define VERSIONS "1"
> +#define MAX_RINGS 8
> +#define MAX_RING_ORDER 8
> +
> +struct Xen9pfsRing {
> + struct Xen9pfsDev *priv;
> +
> + int ref;
> + xenevtchn_handle *evtchndev;
> + int evtchn;
> + int local_port;
> + struct xen_9pfs_data_intf *intf;
> + unsigned char *data;
> + struct xen_9pfs_data ring;
> +
> + QEMUBH *bh;
> +
> + /* local copies, so that we can read/write PDU data directly from
> + * the ring */
> + RING_IDX out_cons, out_size, in_cons;
> + bool inprogress;
> +};
> +
QEMU Coding style requires a typedef. FWIW, maybe you could also convert
other structured types like XenDevice or XenDevOps.
> typedef struct Xen9pfsDev {
> struct XenDevice xendev; /* must be first */
> + V9fsState state;
> +
> + int num_rings;
> + struct Xen9pfsRing *rings;
> } Xen9pfsDev;
>
> static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
> @@ -67,22 +94,152 @@ static int xen_9pfs_init(struct XenDevice *xendev)
> return 0;
> }
>
> +static void xen_9pfs_bh(void *opaque)
> +{
> +}
> +
> +static void xen_9pfs_evtchn_event(void *opaque)
> +{
> +}
> +
> static int xen_9pfs_free(struct XenDevice *xendev)
> {
> - return -1;
> + int i;
> + struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
> +
Coding style.
> + for (i = 0; i < xen_9pdev->num_rings; i++) {
> + if (xen_9pdev->rings[i].data != NULL) {
> + xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
> + xen_9pdev->rings[i].data,
> + (1 << XEN_9PFS_RING_ORDER));
> + }
> + if (xen_9pdev->rings[i].intf != NULL) {
> + xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
> + xen_9pdev->rings[i].intf,
> + 1);
> + }
> + if (xen_9pdev->rings[i].evtchndev > 0) {
> + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
> + NULL, NULL, NULL);
> + xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, xen_9pdev->rings[i].local_port);
> + }
> + if (xen_9pdev->rings[i].bh != NULL) {
> + qemu_bh_delete(xen_9pdev->rings[i].bh);
> + }
> + }
> + g_free(xen_9pdev->rings);
> + return 0;
> }
>
> static int xen_9pfs_connect(struct XenDevice *xendev)
> {
> + int i;
> + struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
Coding style.
> + V9fsState *s = &xen_9pdev->state;
> + QemuOpts *fsdev;
> + char *security_model, *path;
> +
> + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
> + &xen_9pdev->num_rings) == -1 ||
> + xen_9pdev->num_rings > MAX_RINGS) {
> + return -1;
> + }
> +
> + xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(struct Xen9pfsRing));
> + for (i = 0; i < xen_9pdev->num_rings; i++) {
> + char str[16];
> +
> + xen_9pdev->rings[i].priv = xen_9pdev;
> + xen_9pdev->rings[i].evtchn = -1;
> + xen_9pdev->rings[i].local_port = -1;
> +
> + sprintf(str, "ring-ref%u", i);
> + if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
> + &xen_9pdev->rings[i].ref) == -1) {
> + goto out;
> + }
> + sprintf(str, "event-channel-%u", i);
> + if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
> + &xen_9pdev->rings[i].evtchn) == -1) {
> + goto out;
> + }
> +
> + xen_9pdev->rings[i].intf = xengnttab_map_grant_ref(
> + xen_9pdev->xendev.gnttabdev,
> + xen_9pdev->xendev.dom,
> + xen_9pdev->rings[i].ref,
> + PROT_READ | PROT_WRITE);
> + if (!xen_9pdev->rings[i].intf) {
> + goto out;
> + }
> + xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs(
> + xen_9pdev->xendev.gnttabdev,
> + (1 << XEN_9PFS_RING_ORDER),
> + xen_9pdev->xendev.dom,
> + xen_9pdev->rings[i].intf->ref,
> + PROT_READ | PROT_WRITE);
> + if (!xen_9pdev->rings[i].data) {
> + goto out;
> + }
> + xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
> + xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + XEN_9PFS_RING_SIZE;
> +
> + xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
> + xen_9pdev->rings[i].out_cons = 0;
> + xen_9pdev->rings[i].out_size = 0;
> + xen_9pdev->rings[i].inprogress = false;
> +
> +
> + xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
> + if (xen_9pdev->rings[i].evtchndev == NULL) {
> + goto out;
> + }
> + fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC);
> + xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
> + (xen_9pdev->rings[i].evtchndev, xendev->dom, xen_9pdev->rings[i].evtchn);
> + if (xen_9pdev->rings[i].local_port == -1) {
> + xen_pv_printf(xendev, 0, "xenevtchn_bind_interdomain failed port=%d\n",
> + xen_9pdev->rings[i].evtchn);
> + goto out;
> + }
> + xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
> + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
> + xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
> + }
> +
> + security_model = xenstore_read_be_str(xendev, "security_model");
> + path = xenstore_read_be_str(xendev, "path");
> + s->fsconf.fsdev_id = g_malloc(strlen("xen9p") + 6);
> + sprintf(s->fsconf.fsdev_id, "xen9p%d", xendev->dev);
Please use g_strdup_printf().
> + s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
> + v9fs_register_transport(s, &xen_9p_transport);
> + fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
> + s->fsconf.tag,
> + 1, NULL);
> + qemu_opt_set(fsdev, "fsdriver", "local", NULL);
> + qemu_opt_set(fsdev, "path", path, NULL);
> + qemu_opt_set(fsdev, "security_model", security_model, NULL);
> + qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
> + qemu_fsdev_add(fsdev);
> + v9fs_device_realize_common(s, NULL);
> +
> return 0;
> +
> +out:
> + xen_9pfs_free(xendev);
> + return -1;
> }
>
> static void xen_9pfs_alloc(struct XenDevice *xendev)
> {
> + xenstore_write_be_str(xendev, "versions", VERSIONS);
> + xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
> + xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
> }
>
> static void xen_9pfs_disconnect(struct XenDevice *xendev)
> {
> + /* Dynamic hotplug of PV filesystems at runtime is not supported. */
Does this mean that this function is never called ? Or that it should
report an error ? Or print one ?
In any case, it looks weird that it doesn't free the fsdev_id that
was allocated in xen_9pfs_connect().
> }
>
> struct XenDevOps xen_9pfs_ops = {
On Wed, 15 Mar 2017, Greg Kurz wrote:
> On Mon, 13 Mar 2017 16:55:56 -0700
> Stefano Stabellini <sstabellini@kernel.org> wrote:
>
> > Write the limits of the backend to xenstore. Connect to the frontend.
> > Upon connection, allocate the rings according to the protocol
> > specification.
> >
> > Initialize a QEMUBH to schedule work upon receiving an event channel
> > notification from the frontend.
> >
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: anthony.perard@citrix.com
> > CC: jgross@suse.com
> > CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> > CC: Greg Kurz <groug@kaod.org>
> > ---
> > hw/9pfs/xen-9p-backend.c | 159 ++++++++++++++++++++++++++++++++++++++++++++++-
> > 1 file changed, 158 insertions(+), 1 deletion(-)
> >
> > diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> > index 35032d3..0e4a133 100644
> > --- a/hw/9pfs/xen-9p-backend.c
> > +++ b/hw/9pfs/xen-9p-backend.c
> > @@ -17,8 +17,35 @@
> > #include "qemu/config-file.h"
> > #include "fsdev/qemu-fsdev.h"
> >
> > +#define VERSIONS "1"
> > +#define MAX_RINGS 8
> > +#define MAX_RING_ORDER 8
> > +
> > +struct Xen9pfsRing {
> > + struct Xen9pfsDev *priv;
> > +
> > + int ref;
> > + xenevtchn_handle *evtchndev;
> > + int evtchn;
> > + int local_port;
> > + struct xen_9pfs_data_intf *intf;
> > + unsigned char *data;
> > + struct xen_9pfs_data ring;
> > +
> > + QEMUBH *bh;
> > +
> > + /* local copies, so that we can read/write PDU data directly from
> > + * the ring */
> > + RING_IDX out_cons, out_size, in_cons;
> > + bool inprogress;
> > +};
> > +
>
> QEMU Coding style requires a typedef.
OK
> FWIW, maybe you could also convert
> other structured types like XenDevice or XenDevOps.
I'll do in a separate series
> > typedef struct Xen9pfsDev {
> > struct XenDevice xendev; /* must be first */
> > + V9fsState state;
> > +
> > + int num_rings;
> > + struct Xen9pfsRing *rings;
> > } Xen9pfsDev;
> >
> > static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
> > @@ -67,22 +94,152 @@ static int xen_9pfs_init(struct XenDevice *xendev)
> > return 0;
> > }
> >
> > +static void xen_9pfs_bh(void *opaque)
> > +{
> > +}
> > +
> > +static void xen_9pfs_evtchn_event(void *opaque)
> > +{
> > +}
> > +
> > static int xen_9pfs_free(struct XenDevice *xendev)
> > {
> > - return -1;
> > + int i;
> > + struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
> > +
>
> Coding style.
OK
> > + for (i = 0; i < xen_9pdev->num_rings; i++) {
> > + if (xen_9pdev->rings[i].data != NULL) {
> > + xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
> > + xen_9pdev->rings[i].data,
> > + (1 << XEN_9PFS_RING_ORDER));
> > + }
> > + if (xen_9pdev->rings[i].intf != NULL) {
> > + xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
> > + xen_9pdev->rings[i].intf,
> > + 1);
> > + }
> > + if (xen_9pdev->rings[i].evtchndev > 0) {
> > + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
> > + NULL, NULL, NULL);
> > + xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, xen_9pdev->rings[i].local_port);
> > + }
> > + if (xen_9pdev->rings[i].bh != NULL) {
> > + qemu_bh_delete(xen_9pdev->rings[i].bh);
> > + }
> > + }
> > + g_free(xen_9pdev->rings);
> > + return 0;
> > }
> >
> > static int xen_9pfs_connect(struct XenDevice *xendev)
> > {
> > + int i;
> > + struct Xen9pfsDev *xen_9pdev = container_of(xendev, struct Xen9pfsDev, xendev);
>
> Coding style.
OK
> > + V9fsState *s = &xen_9pdev->state;
> > + QemuOpts *fsdev;
> > + char *security_model, *path;
> > +
> > + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
> > + &xen_9pdev->num_rings) == -1 ||
> > + xen_9pdev->num_rings > MAX_RINGS) {
> > + return -1;
> > + }
> > +
> > + xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(struct Xen9pfsRing));
> > + for (i = 0; i < xen_9pdev->num_rings; i++) {
> > + char str[16];
> > +
> > + xen_9pdev->rings[i].priv = xen_9pdev;
> > + xen_9pdev->rings[i].evtchn = -1;
> > + xen_9pdev->rings[i].local_port = -1;
> > +
> > + sprintf(str, "ring-ref%u", i);
> > + if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
> > + &xen_9pdev->rings[i].ref) == -1) {
> > + goto out;
> > + }
> > + sprintf(str, "event-channel-%u", i);
> > + if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
> > + &xen_9pdev->rings[i].evtchn) == -1) {
> > + goto out;
> > + }
> > +
> > + xen_9pdev->rings[i].intf = xengnttab_map_grant_ref(
> > + xen_9pdev->xendev.gnttabdev,
> > + xen_9pdev->xendev.dom,
> > + xen_9pdev->rings[i].ref,
> > + PROT_READ | PROT_WRITE);
> > + if (!xen_9pdev->rings[i].intf) {
> > + goto out;
> > + }
> > + xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs(
> > + xen_9pdev->xendev.gnttabdev,
> > + (1 << XEN_9PFS_RING_ORDER),
> > + xen_9pdev->xendev.dom,
> > + xen_9pdev->rings[i].intf->ref,
> > + PROT_READ | PROT_WRITE);
> > + if (!xen_9pdev->rings[i].data) {
> > + goto out;
> > + }
> > + xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
> > + xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + XEN_9PFS_RING_SIZE;
> > +
> > + xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
> > + xen_9pdev->rings[i].out_cons = 0;
> > + xen_9pdev->rings[i].out_size = 0;
> > + xen_9pdev->rings[i].inprogress = false;
> > +
> > +
> > + xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
> > + if (xen_9pdev->rings[i].evtchndev == NULL) {
> > + goto out;
> > + }
> > + fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC);
> > + xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
> > + (xen_9pdev->rings[i].evtchndev, xendev->dom, xen_9pdev->rings[i].evtchn);
> > + if (xen_9pdev->rings[i].local_port == -1) {
> > + xen_pv_printf(xendev, 0, "xenevtchn_bind_interdomain failed port=%d\n",
> > + xen_9pdev->rings[i].evtchn);
> > + goto out;
> > + }
> > + xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
> > + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
> > + xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
> > + }
> > +
> > + security_model = xenstore_read_be_str(xendev, "security_model");
> > + path = xenstore_read_be_str(xendev, "path");
> > + s->fsconf.fsdev_id = g_malloc(strlen("xen9p") + 6);
> > + sprintf(s->fsconf.fsdev_id, "xen9p%d", xendev->dev);
>
> Please use g_strdup_printf().
I'll do
> > + s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
> > + v9fs_register_transport(s, &xen_9p_transport);
> > + fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
> > + s->fsconf.tag,
> > + 1, NULL);
> > + qemu_opt_set(fsdev, "fsdriver", "local", NULL);
> > + qemu_opt_set(fsdev, "path", path, NULL);
> > + qemu_opt_set(fsdev, "security_model", security_model, NULL);
> > + qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
> > + qemu_fsdev_add(fsdev);
> > + v9fs_device_realize_common(s, NULL);
> > +
> > return 0;
> > +
> > +out:
> > + xen_9pfs_free(xendev);
> > + return -1;
> > }
> >
> > static void xen_9pfs_alloc(struct XenDevice *xendev)
> > {
> > + xenstore_write_be_str(xendev, "versions", VERSIONS);
> > + xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
> > + xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
> > }
> >
> > static void xen_9pfs_disconnect(struct XenDevice *xendev)
> > {
> > + /* Dynamic hotplug of PV filesystems at runtime is not supported. */
>
> Does this mean that this function is never called ? Or that it should
> report an error ? Or print one ?
This function is called, but only at shutdown.
> In any case, it looks weird that it doesn't free the fsdev_id that
> was allocated in xen_9pfs_connect().
I'll free it in xen_9pfs_free
> > }
> >
> > struct XenDevOps xen_9pfs_ops = {
>
>
Upon receiving an event channel notification from the frontend, schedule
the bottom half. From the bottom half, read one request from the ring,
create a pdu and call pdu_submit to handle it.
For now, only handle one request per ring at a time.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
CC: Greg Kurz <groug@kaod.org>
---
hw/9pfs/xen-9p-backend.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 47 insertions(+)
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 0e4a133..741dd31 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -94,12 +94,59 @@ static int xen_9pfs_init(struct XenDevice *xendev)
return 0;
}
+static int xen_9pfs_receive(struct Xen9pfsRing *ring)
+{
+ struct xen_9pfs_header h;
+ RING_IDX cons, prod, masked_prod, masked_cons;
+ V9fsPDU *pdu;
+
+ if (ring->inprogress) {
+ return 0;
+ }
+
+ cons = ring->intf->out_cons;
+ prod = ring->intf->out_prod;
+ xen_rmb();
+
+ if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < sizeof(h)) {
+ return 0;
+ }
+ ring->inprogress = true;
+
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ xen_9pfs_read_packet(ring->ring.out, masked_prod, &masked_cons,
+ XEN_9PFS_RING_SIZE, (uint8_t*) &h, sizeof(h));
+
+ pdu = pdu_alloc(&ring->priv->state);
+ pdu->size = h.size;
+ pdu->id = h.id;
+ pdu->tag = h.tag;
+ ring->out_size = h.size;
+ ring->out_cons = cons + h.size;
+
+ qemu_co_queue_init(&pdu->complete);
+ pdu_submit(pdu);
+
+ return 0;
+}
+
static void xen_9pfs_bh(void *opaque)
{
+ struct Xen9pfsRing *ring = opaque;
+ xen_9pfs_receive(ring);
}
static void xen_9pfs_evtchn_event(void *opaque)
{
+ struct Xen9pfsRing *ring = opaque;
+ evtchn_port_t port;
+
+ port = xenevtchn_pending(ring->evtchndev);
+ xenevtchn_unmask(ring->evtchndev, port);
+
+ qemu_bh_schedule(ring->bh);
}
static int xen_9pfs_free(struct XenDevice *xendev)
--
1.9.1
On Mon, 13 Mar 2017 16:55:57 -0700
Stefano Stabellini <sstabellini@kernel.org> wrote:
> Upon receiving an event channel notification from the frontend, schedule
> the bottom half. From the bottom half, read one request from the ring,
> create a pdu and call pdu_submit to handle it.
>
> For now, only handle one request per ring at a time.
>
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: anthony.perard@citrix.com
> CC: jgross@suse.com
> CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> CC: Greg Kurz <groug@kaod.org>
> ---
> hw/9pfs/xen-9p-backend.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 47 insertions(+)
>
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 0e4a133..741dd31 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -94,12 +94,59 @@ static int xen_9pfs_init(struct XenDevice *xendev)
> return 0;
> }
>
> +static int xen_9pfs_receive(struct Xen9pfsRing *ring)
Coding style for structured types.
> +{
> + struct xen_9pfs_header h;
> + RING_IDX cons, prod, masked_prod, masked_cons;
> + V9fsPDU *pdu;
> +
> + if (ring->inprogress) {
> + return 0;
> + }
> +
> + cons = ring->intf->out_cons;
> + prod = ring->intf->out_prod;
> + xen_rmb();
> +
> + if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < sizeof(h)) {
> + return 0;
> + }
> + ring->inprogress = true;
> +
> + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> +
> + xen_9pfs_read_packet(ring->ring.out, masked_prod, &masked_cons,
> + XEN_9PFS_RING_SIZE, (uint8_t*) &h, sizeof(h));
> +
> + pdu = pdu_alloc(&ring->priv->state);
pdu_alloc() can theoretically return NULL. Maybe add a comment to
indicate this won't happen because "for now [we] only handle one
request per ring at a time".
> + pdu->size = h.size;
> + pdu->id = h.id;
> + pdu->tag = h.tag;
> + ring->out_size = h.size;
> + ring->out_cons = cons + h.size;
> +
> + qemu_co_queue_init(&pdu->complete);
> + pdu_submit(pdu);
> +
> + return 0;
> +}
> +
> static void xen_9pfs_bh(void *opaque)
> {
> + struct Xen9pfsRing *ring = opaque;
Coding style for structured types.
> + xen_9pfs_receive(ring);
> }
>
> static void xen_9pfs_evtchn_event(void *opaque)
> {
> + struct Xen9pfsRing *ring = opaque;
Coding style for structured types.
> + evtchn_port_t port;
> +
> + port = xenevtchn_pending(ring->evtchndev);
> + xenevtchn_unmask(ring->evtchndev, port);
> +
> + qemu_bh_schedule(ring->bh);
> }
>
> static int xen_9pfs_free(struct XenDevice *xendev)
On Wed, 15 Mar 2017, Greg Kurz wrote:
> On Mon, 13 Mar 2017 16:55:57 -0700
> Stefano Stabellini <sstabellini@kernel.org> wrote:
>
> > Upon receiving an event channel notification from the frontend, schedule
> > the bottom half. From the bottom half, read one request from the ring,
> > create a pdu and call pdu_submit to handle it.
> >
> > For now, only handle one request per ring at a time.
> >
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: anthony.perard@citrix.com
> > CC: jgross@suse.com
> > CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> > CC: Greg Kurz <groug@kaod.org>
> > ---
> > hw/9pfs/xen-9p-backend.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 47 insertions(+)
> >
> > diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> > index 0e4a133..741dd31 100644
> > --- a/hw/9pfs/xen-9p-backend.c
> > +++ b/hw/9pfs/xen-9p-backend.c
> > @@ -94,12 +94,59 @@ static int xen_9pfs_init(struct XenDevice *xendev)
> > return 0;
> > }
> >
> > +static int xen_9pfs_receive(struct Xen9pfsRing *ring)
>
> Coding style for structured types.
I'll fix
> > +{
> > + struct xen_9pfs_header h;
> > + RING_IDX cons, prod, masked_prod, masked_cons;
> > + V9fsPDU *pdu;
> > +
> > + if (ring->inprogress) {
> > + return 0;
> > + }
> > +
> > + cons = ring->intf->out_cons;
> > + prod = ring->intf->out_prod;
> > + xen_rmb();
> > +
> > + if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < sizeof(h)) {
> > + return 0;
> > + }
> > + ring->inprogress = true;
> > +
> > + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> > + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> > +
> > + xen_9pfs_read_packet(ring->ring.out, masked_prod, &masked_cons,
> > + XEN_9PFS_RING_SIZE, (uint8_t*) &h, sizeof(h));
> > +
> > + pdu = pdu_alloc(&ring->priv->state);
>
> pdu_alloc() can theoretically return NULL. Maybe add a comment to
> indicate this won't happen because "for now [we] only handle one
> request per ring at a time".
OK
> > + pdu->size = h.size;
> > + pdu->id = h.id;
> > + pdu->tag = h.tag;
> > + ring->out_size = h.size;
> > + ring->out_cons = cons + h.size;
> > +
> > + qemu_co_queue_init(&pdu->complete);
> > + pdu_submit(pdu);
> > +
> > + return 0;
> > +}
> > +
> > static void xen_9pfs_bh(void *opaque)
> > {
> > + struct Xen9pfsRing *ring = opaque;
>
> Coding style for structured types.
OK
> > + xen_9pfs_receive(ring);
> > }
> >
> > static void xen_9pfs_evtchn_event(void *opaque)
> > {
> > + struct Xen9pfsRing *ring = opaque;
>
> Coding style for structured types.
OK
> > + evtchn_port_t port;
> > +
> > + port = xenevtchn_pending(ring->evtchndev);
> > + xenevtchn_unmask(ring->evtchndev, port);
> > +
> > + qemu_bh_schedule(ring->bh);
> > }
> >
> > static int xen_9pfs_free(struct XenDevice *xendev)
On Mon, 13 Mar 2017 16:55:57 -0700
Stefano Stabellini <sstabellini@kernel.org> wrote:
> Upon receiving an event channel notification from the frontend, schedule
> the bottom half. From the bottom half, read one request from the ring,
> create a pdu and call pdu_submit to handle it.
>
> For now, only handle one request per ring at a time.
>
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: anthony.perard@citrix.com
> CC: jgross@suse.com
> CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> CC: Greg Kurz <groug@kaod.org>
> ---
Oops, one more remark I forgot in my the previous mail. See below.
> hw/9pfs/xen-9p-backend.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 47 insertions(+)
>
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 0e4a133..741dd31 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -94,12 +94,59 @@ static int xen_9pfs_init(struct XenDevice *xendev)
> return 0;
> }
>
> +static int xen_9pfs_receive(struct Xen9pfsRing *ring)
> +{
> + struct xen_9pfs_header h;
> + RING_IDX cons, prod, masked_prod, masked_cons;
> + V9fsPDU *pdu;
> +
> + if (ring->inprogress) {
> + return 0;
> + }
> +
> + cons = ring->intf->out_cons;
> + prod = ring->intf->out_prod;
> + xen_rmb();
> +
> + if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < sizeof(h)) {
> + return 0;
> + }
> + ring->inprogress = true;
> +
> + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> +
> + xen_9pfs_read_packet(ring->ring.out, masked_prod, &masked_cons,
> + XEN_9PFS_RING_SIZE, (uint8_t*) &h, sizeof(h));
> +
> + pdu = pdu_alloc(&ring->priv->state);
> + pdu->size = h.size;
9P uses little-endian, so this should be:
pdu->size = le32_to_cpu(h.size);
> + pdu->id = h.id;
> + pdu->tag = h.tag;
and:
pdu->tag = le16_to_cpu(h.tag);
> + ring->out_size = h.size;
> + ring->out_cons = cons + h.size;
Same here.
> +
> + qemu_co_queue_init(&pdu->complete);
> + pdu_submit(pdu);
> +
> + return 0;
> +}
> +
> static void xen_9pfs_bh(void *opaque)
> {
> + struct Xen9pfsRing *ring = opaque;
> + xen_9pfs_receive(ring);
> }
>
> static void xen_9pfs_evtchn_event(void *opaque)
> {
> + struct Xen9pfsRing *ring = opaque;
> + evtchn_port_t port;
> +
> + port = xenevtchn_pending(ring->evtchndev);
> + xenevtchn_unmask(ring->evtchndev, port);
> +
> + qemu_bh_schedule(ring->bh);
> }
>
> static int xen_9pfs_free(struct XenDevice *xendev)
On Wed, 15 Mar 2017, Greg Kurz wrote:
> On Mon, 13 Mar 2017 16:55:57 -0700
> Stefano Stabellini <sstabellini@kernel.org> wrote:
>
> > Upon receiving an event channel notification from the frontend, schedule
> > the bottom half. From the bottom half, read one request from the ring,
> > create a pdu and call pdu_submit to handle it.
> >
> > For now, only handle one request per ring at a time.
> >
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: anthony.perard@citrix.com
> > CC: jgross@suse.com
> > CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> > CC: Greg Kurz <groug@kaod.org>
> > ---
>
> Oops, one more remark I forgot in my the previous mail. See below.
>
> > hw/9pfs/xen-9p-backend.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 47 insertions(+)
> >
> > diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> > index 0e4a133..741dd31 100644
> > --- a/hw/9pfs/xen-9p-backend.c
> > +++ b/hw/9pfs/xen-9p-backend.c
> > @@ -94,12 +94,59 @@ static int xen_9pfs_init(struct XenDevice *xendev)
> > return 0;
> > }
> >
> > +static int xen_9pfs_receive(struct Xen9pfsRing *ring)
> > +{
> > + struct xen_9pfs_header h;
> > + RING_IDX cons, prod, masked_prod, masked_cons;
> > + V9fsPDU *pdu;
> > +
> > + if (ring->inprogress) {
> > + return 0;
> > + }
> > +
> > + cons = ring->intf->out_cons;
> > + prod = ring->intf->out_prod;
> > + xen_rmb();
> > +
> > + if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < sizeof(h)) {
> > + return 0;
> > + }
> > + ring->inprogress = true;
> > +
> > + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> > + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> > +
> > + xen_9pfs_read_packet(ring->ring.out, masked_prod, &masked_cons,
> > + XEN_9PFS_RING_SIZE, (uint8_t*) &h, sizeof(h));
> > +
> > + pdu = pdu_alloc(&ring->priv->state);
> > + pdu->size = h.size;
>
> 9P uses little-endian, so this should be:
>
> pdu->size = le32_to_cpu(h.size);
>
> > + pdu->id = h.id;
> > + pdu->tag = h.tag;
>
> and:
>
> pdu->tag = le16_to_cpu(h.tag);
>
> > + ring->out_size = h.size;
> > + ring->out_cons = cons + h.size;
>
> Same here.
OK, thanks. Xen doesn't support big endian today, but they don't hurt.
> > +
> > + qemu_co_queue_init(&pdu->complete);
> > + pdu_submit(pdu);
> > +
> > + return 0;
> > +}
> > +
> > static void xen_9pfs_bh(void *opaque)
> > {
> > + struct Xen9pfsRing *ring = opaque;
> > + xen_9pfs_receive(ring);
> > }
> >
> > static void xen_9pfs_evtchn_event(void *opaque)
> > {
> > + struct Xen9pfsRing *ring = opaque;
> > + evtchn_port_t port;
> > +
> > + port = xenevtchn_pending(ring->evtchndev);
> > + xenevtchn_unmask(ring->evtchndev, port);
> > +
> > + qemu_bh_schedule(ring->bh);
> > }
> >
> > static int xen_9pfs_free(struct XenDevice *xendev)
>
>
Implement xen_9pfs_init_in/out_iov_from_pdu and
xen_9pfs_pdu_vmarshal/vunmarshall by creating new sg pointing to the
data on the ring.
This is safe as we only handle one request per ring at any given time.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
CC: Greg Kurz <groug@kaod.org>
---
hw/9pfs/xen-9p-backend.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 89 insertions(+), 2 deletions(-)
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 741dd31..d72a749 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -48,12 +48,77 @@ typedef struct Xen9pfsDev {
struct Xen9pfsRing *rings;
} Xen9pfsDev;
+static void xen_9pfs_in_sg(struct Xen9pfsRing *ring,
+ struct iovec *in_sg,
+ int *num,
+ uint32_t idx,
+ uint32_t size)
+{
+ RING_IDX cons, prod, masked_prod, masked_cons;
+
+ cons = ring->intf->in_cons;
+ prod = ring->intf->in_prod;
+ xen_rmb();
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ if (masked_prod < masked_cons) {
+ in_sg[0].iov_base = ring->ring.in + masked_prod;
+ in_sg[0].iov_len = masked_cons - masked_prod;
+ *num = 1;
+ } else {
+ in_sg[0].iov_base = ring->ring.in + masked_prod;
+ in_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_prod;
+ in_sg[1].iov_base = ring->ring.in;
+ in_sg[1].iov_len = masked_cons;
+ *num = 2;
+ }
+}
+
+static void xen_9pfs_out_sg(struct Xen9pfsRing *ring,
+ struct iovec *out_sg,
+ int *num,
+ uint32_t idx)
+{
+ RING_IDX cons, prod, masked_prod, masked_cons;
+
+ cons = ring->intf->out_cons;
+ prod = ring->intf->out_prod;
+ xen_rmb();
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ if (masked_cons < masked_prod) {
+ out_sg[0].iov_base = ring->ring.out + masked_cons;
+ out_sg[0].iov_len = ring->out_size;
+ *num = 1;
+ } else {
+ if (ring->out_size > (XEN_9PFS_RING_SIZE - masked_cons)) {
+ out_sg[0].iov_base = ring->ring.out + masked_cons;
+ out_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_cons;
+ out_sg[1].iov_base = ring->ring.out;
+ out_sg[1].iov_len = ring->out_size - (XEN_9PFS_RING_SIZE - masked_cons);
+ *num = 2;
+ } else {
+ out_sg[0].iov_base = ring->ring.out + masked_cons;
+ out_sg[0].iov_len = ring->out_size;
+ *num = 1;
+ }
+ }
+}
+
static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
size_t offset,
const char *fmt,
va_list ap)
{
- return 0;
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
+ struct iovec in_sg[2];
+ int num;
+
+ xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
+ in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
+ return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
}
static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
@@ -61,13 +126,27 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
const char *fmt,
va_list ap)
{
- return 0;
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
+ struct iovec out_sg[2];
+ int num;
+
+ xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
+ out_sg, &num, pdu->idx);
+ return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
}
static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
struct iovec **piov,
unsigned int *pniov)
{
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
+ struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
+ struct iovec *sg = g_malloc0(sizeof(*sg)*2);
+ int num;
+
+ xen_9pfs_out_sg(ring, sg, &num, pdu->idx);
+ *piov = sg;
+ *pniov = num;
}
static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
@@ -75,6 +154,14 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
unsigned int *pniov,
size_t size)
{
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
+ struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
+ struct iovec *sg = g_malloc0(sizeof(*sg)*2);
+ int num;
+
+ xen_9pfs_in_sg(ring, sg, &num, pdu->idx, size);
+ *piov = sg;
+ *pniov = num;
}
static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
--
1.9.1
On Mon, 13 Mar 2017 16:55:58 -0700
Stefano Stabellini <sstabellini@kernel.org> wrote:
> Implement xen_9pfs_init_in/out_iov_from_pdu and
> xen_9pfs_pdu_vmarshal/vunmarshall by creating new sg pointing to the
> data on the ring.
>
> This is safe as we only handle one request per ring at any given time.
>
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: anthony.perard@citrix.com
> CC: jgross@suse.com
> CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> CC: Greg Kurz <groug@kaod.org>
> ---
> hw/9pfs/xen-9p-backend.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 89 insertions(+), 2 deletions(-)
>
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 741dd31..d72a749 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -48,12 +48,77 @@ typedef struct Xen9pfsDev {
> struct Xen9pfsRing *rings;
> } Xen9pfsDev;
>
> +static void xen_9pfs_in_sg(struct Xen9pfsRing *ring,
Coding style for structured types.
> + struct iovec *in_sg,
> + int *num,
> + uint32_t idx,
> + uint32_t size)
> +{
> + RING_IDX cons, prod, masked_prod, masked_cons;
> +
> + cons = ring->intf->in_cons;
> + prod = ring->intf->in_prod;
> + xen_rmb();
> + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> +
> + if (masked_prod < masked_cons) {
> + in_sg[0].iov_base = ring->ring.in + masked_prod;
> + in_sg[0].iov_len = masked_cons - masked_prod;
> + *num = 1;
> + } else {
> + in_sg[0].iov_base = ring->ring.in + masked_prod;
> + in_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_prod;
> + in_sg[1].iov_base = ring->ring.in;
> + in_sg[1].iov_len = masked_cons;
> + *num = 2;
> + }
> +}
> +
> +static void xen_9pfs_out_sg(struct Xen9pfsRing *ring,
Coding style for structured types.
> + struct iovec *out_sg,
> + int *num,
> + uint32_t idx)
> +{
> + RING_IDX cons, prod, masked_prod, masked_cons;
> +
> + cons = ring->intf->out_cons;
> + prod = ring->intf->out_prod;
> + xen_rmb();
> + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> +
> + if (masked_cons < masked_prod) {
> + out_sg[0].iov_base = ring->ring.out + masked_cons;
> + out_sg[0].iov_len = ring->out_size;
> + *num = 1;
> + } else {
> + if (ring->out_size > (XEN_9PFS_RING_SIZE - masked_cons)) {
> + out_sg[0].iov_base = ring->ring.out + masked_cons;
> + out_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_cons;
> + out_sg[1].iov_base = ring->ring.out;
> + out_sg[1].iov_len = ring->out_size - (XEN_9PFS_RING_SIZE - masked_cons);
> + *num = 2;
> + } else {
> + out_sg[0].iov_base = ring->ring.out + masked_cons;
> + out_sg[0].iov_len = ring->out_size;
> + *num = 1;
> + }
> + }
> +}
> +
> static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
> size_t offset,
> const char *fmt,
> va_list ap)
> {
> - return 0;
> + struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
Coding style for structured types.
> + struct iovec in_sg[2];
> + int num;
> +
> + xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
> + in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
> + return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
> }
>
> static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
> @@ -61,13 +126,27 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
> const char *fmt,
> va_list ap)
> {
> - return 0;
> + struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
Coding style for structured types.
> + struct iovec out_sg[2];
> + int num;
> +
> + xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
> + out_sg, &num, pdu->idx);
> + return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
> }
>
> static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
> struct iovec **piov,
> unsigned int *pniov)
> {
> + struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
> + struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
Coding style for structured types.
> + struct iovec *sg = g_malloc0(sizeof(*sg)*2);
> + int num;
> +
> + xen_9pfs_out_sg(ring, sg, &num, pdu->idx);
> + *piov = sg;
> + *pniov = num;
> }
>
> static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
> @@ -75,6 +154,14 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
> unsigned int *pniov,
> size_t size)
> {
> + struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
> + struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
Coding style for structured types.
> + struct iovec *sg = g_malloc0(sizeof(*sg)*2);
> + int num;
> +
> + xen_9pfs_in_sg(ring, sg, &num, pdu->idx, size);
> + *piov = sg;
> + *pniov = num;
> }
>
> static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
On Wed, 15 Mar 2017, Greg Kurz wrote:
> On Mon, 13 Mar 2017 16:55:58 -0700
> Stefano Stabellini <sstabellini@kernel.org> wrote:
>
> > Implement xen_9pfs_init_in/out_iov_from_pdu and
> > xen_9pfs_pdu_vmarshal/vunmarshall by creating new sg pointing to the
> > data on the ring.
> >
> > This is safe as we only handle one request per ring at any given time.
> >
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: anthony.perard@citrix.com
> > CC: jgross@suse.com
> > CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> > CC: Greg Kurz <groug@kaod.org>
> > ---
> > hw/9pfs/xen-9p-backend.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++--
> > 1 file changed, 89 insertions(+), 2 deletions(-)
> >
> > diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> > index 741dd31..d72a749 100644
> > --- a/hw/9pfs/xen-9p-backend.c
> > +++ b/hw/9pfs/xen-9p-backend.c
> > @@ -48,12 +48,77 @@ typedef struct Xen9pfsDev {
> > struct Xen9pfsRing *rings;
> > } Xen9pfsDev;
> >
> > +static void xen_9pfs_in_sg(struct Xen9pfsRing *ring,
>
> Coding style for structured types.
OK
> > + struct iovec *in_sg,
> > + int *num,
> > + uint32_t idx,
> > + uint32_t size)
> > +{
> > + RING_IDX cons, prod, masked_prod, masked_cons;
> > +
> > + cons = ring->intf->in_cons;
> > + prod = ring->intf->in_prod;
> > + xen_rmb();
> > + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> > + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> > +
> > + if (masked_prod < masked_cons) {
> > + in_sg[0].iov_base = ring->ring.in + masked_prod;
> > + in_sg[0].iov_len = masked_cons - masked_prod;
> > + *num = 1;
> > + } else {
> > + in_sg[0].iov_base = ring->ring.in + masked_prod;
> > + in_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_prod;
> > + in_sg[1].iov_base = ring->ring.in;
> > + in_sg[1].iov_len = masked_cons;
> > + *num = 2;
> > + }
> > +}
> > +
> > +static void xen_9pfs_out_sg(struct Xen9pfsRing *ring,
>
> Coding style for structured types.
OK
> > + struct iovec *out_sg,
> > + int *num,
> > + uint32_t idx)
> > +{
> > + RING_IDX cons, prod, masked_prod, masked_cons;
> > +
> > + cons = ring->intf->out_cons;
> > + prod = ring->intf->out_prod;
> > + xen_rmb();
> > + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> > + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> > +
> > + if (masked_cons < masked_prod) {
> > + out_sg[0].iov_base = ring->ring.out + masked_cons;
> > + out_sg[0].iov_len = ring->out_size;
> > + *num = 1;
> > + } else {
> > + if (ring->out_size > (XEN_9PFS_RING_SIZE - masked_cons)) {
> > + out_sg[0].iov_base = ring->ring.out + masked_cons;
> > + out_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_cons;
> > + out_sg[1].iov_base = ring->ring.out;
> > + out_sg[1].iov_len = ring->out_size - (XEN_9PFS_RING_SIZE - masked_cons);
> > + *num = 2;
> > + } else {
> > + out_sg[0].iov_base = ring->ring.out + masked_cons;
> > + out_sg[0].iov_len = ring->out_size;
> > + *num = 1;
> > + }
> > + }
> > +}
> > +
> > static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
> > size_t offset,
> > const char *fmt,
> > va_list ap)
> > {
> > - return 0;
> > + struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
>
> Coding style for structured types.
OK
> > + struct iovec in_sg[2];
> > + int num;
> > +
> > + xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
> > + in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
> > + return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
> > }
> >
> > static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
> > @@ -61,13 +126,27 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
> > const char *fmt,
> > va_list ap)
> > {
> > - return 0;
> > + struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
>
> Coding style for structured types.
OK
> > + struct iovec out_sg[2];
> > + int num;
> > +
> > + xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
> > + out_sg, &num, pdu->idx);
> > + return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
> > }
> >
> > static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
> > struct iovec **piov,
> > unsigned int *pniov)
> > {
> > + struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
> > + struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
>
> Coding style for structured types.
OK
> > + struct iovec *sg = g_malloc0(sizeof(*sg)*2);
> > + int num;
> > +
> > + xen_9pfs_out_sg(ring, sg, &num, pdu->idx);
> > + *piov = sg;
> > + *pniov = num;
> > }
> >
> > static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
> > @@ -75,6 +154,14 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
> > unsigned int *pniov,
> > size_t size)
> > {
> > + struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev, state);
> > + struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
>
> Coding style for structured types.
OK :-)
> > + struct iovec *sg = g_malloc0(sizeof(*sg)*2);
> > + int num;
> > +
> > + xen_9pfs_in_sg(ring, sg, &num, pdu->idx, size);
> > + *piov = sg;
> > + *pniov = num;
> > }
> >
> > static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
>
>
Once a request is completed, xen_9pfs_push_and_notify gets called. In
xen_9pfs_push_and_notify, update the indexes (data has already been
copied to the sg by the common code) and send a notification to the
frontend.
Schedule the bottom-half to check if we already have any other requests
pending.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
CC: Greg Kurz <groug@kaod.org>
---
hw/9pfs/xen-9p-backend.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index d72a749..fed369f 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -166,6 +166,22 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
{
+ RING_IDX prod;
+ struct Xen9pfsDev *priv = container_of(pdu->s, struct Xen9pfsDev, state);
+ struct Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
+
+ ring->intf->out_cons = ring->out_cons;
+ xen_wmb();
+
+ prod = ring->intf->in_prod;
+ xen_rmb();
+ ring->intf->in_prod = prod + pdu->size;
+ xen_wmb();
+
+ ring->inprogress = false;
+ xenevtchn_notify(ring->evtchndev, ring->local_port);
+
+ qemu_bh_schedule(ring->bh);
}
static const struct V9fsTransport xen_9p_transport = {
--
1.9.1
On Mon, 13 Mar 2017 16:55:59 -0700
Stefano Stabellini <sstabellini@kernel.org> wrote:
> Once a request is completed, xen_9pfs_push_and_notify gets called. In
> xen_9pfs_push_and_notify, update the indexes (data has already been
> copied to the sg by the common code) and send a notification to the
> frontend.
>
> Schedule the bottom-half to check if we already have any other requests
> pending.
>
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: anthony.perard@citrix.com
> CC: jgross@suse.com
> CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> CC: Greg Kurz <groug@kaod.org>
> ---
> hw/9pfs/xen-9p-backend.c | 16 ++++++++++++++++
> 1 file changed, 16 insertions(+)
>
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index d72a749..fed369f 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -166,6 +166,22 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
>
> static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
> {
> + RING_IDX prod;
> + struct Xen9pfsDev *priv = container_of(pdu->s, struct Xen9pfsDev, state);
> + struct Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
> +
Coding style for structured types.
> + ring->intf->out_cons = ring->out_cons;
> + xen_wmb();
> +
> + prod = ring->intf->in_prod;
> + xen_rmb();
> + ring->intf->in_prod = prod + pdu->size;
> + xen_wmb();
> +
> + ring->inprogress = false;
> + xenevtchn_notify(ring->evtchndev, ring->local_port);
> +
> + qemu_bh_schedule(ring->bh);
> }
>
> static const struct V9fsTransport xen_9p_transport = {
On Wed, 15 Mar 2017, Greg Kurz wrote:
> On Mon, 13 Mar 2017 16:55:59 -0700
> Stefano Stabellini <sstabellini@kernel.org> wrote:
>
> > Once a request is completed, xen_9pfs_push_and_notify gets called. In
> > xen_9pfs_push_and_notify, update the indexes (data has already been
> > copied to the sg by the common code) and send a notification to the
> > frontend.
> >
> > Schedule the bottom-half to check if we already have any other requests
> > pending.
> >
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: anthony.perard@citrix.com
> > CC: jgross@suse.com
> > CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> > CC: Greg Kurz <groug@kaod.org>
> > ---
> > hw/9pfs/xen-9p-backend.c | 16 ++++++++++++++++
> > 1 file changed, 16 insertions(+)
> >
> > diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> > index d72a749..fed369f 100644
> > --- a/hw/9pfs/xen-9p-backend.c
> > +++ b/hw/9pfs/xen-9p-backend.c
> > @@ -166,6 +166,22 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
> >
> > static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
> > {
> > + RING_IDX prod;
> > + struct Xen9pfsDev *priv = container_of(pdu->s, struct Xen9pfsDev, state);
> > + struct Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
> > +
>
> Coding style for structured types.
Yep
> > + ring->intf->out_cons = ring->out_cons;
> > + xen_wmb();
> > +
> > + prod = ring->intf->in_prod;
> > + xen_rmb();
> > + ring->intf->in_prod = prod + pdu->size;
> > + xen_wmb();
> > +
> > + ring->inprogress = false;
> > + xenevtchn_notify(ring->evtchndev, ring->local_port);
> > +
> > + qemu_bh_schedule(ring->bh);
> > }
> >
> > static const struct V9fsTransport xen_9p_transport = {
>
>
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
Reviewed-by: Greg Kurz <groug@kaod.org>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
CC: Greg Kurz <groug@kaod.org>
---
hw/9pfs/Makefile.objs | 1 +
hw/xen/xen_backend.c | 3 +++
include/hw/xen/xen_backend.h | 3 +++
3 files changed, 7 insertions(+)
diff --git a/hw/9pfs/Makefile.objs b/hw/9pfs/Makefile.objs
index da0ae0c..673f9f9 100644
--- a/hw/9pfs/Makefile.objs
+++ b/hw/9pfs/Makefile.objs
@@ -5,5 +5,6 @@ common-obj-y += coth.o cofs.o codir.o cofile.o
common-obj-y += coxattr.o 9p-synth.o
common-obj-$(CONFIG_OPEN_BY_HANDLE) += 9p-handle.o
common-obj-y += 9p-proxy.o
+common-obj-$(CONFIG_XEN_BACKEND) += xen-9p-backend.o
obj-y += virtio-9p-device.o
diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c
index 6c21c37..fe737e9 100644
--- a/hw/xen/xen_backend.c
+++ b/hw/xen/xen_backend.c
@@ -585,6 +585,9 @@ void xen_be_register_common(void)
xen_be_register("console", &xen_console_ops);
xen_be_register("vkbd", &xen_kbdmouse_ops);
xen_be_register("qdisk", &xen_blkdev_ops);
+#ifdef CONFIG_VIRTFS
+ xen_be_register("9pfs", &xen_9pfs_ops);
+#endif
#ifdef CONFIG_USB_LIBUSB
xen_be_register("qusb", &xen_usb_ops);
#endif
diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h
index 4f4799a..7b72f1a 100644
--- a/include/hw/xen/xen_backend.h
+++ b/include/hw/xen/xen_backend.h
@@ -49,6 +49,9 @@ extern struct XenDevOps xen_console_ops; /* xen_console.c */
extern struct XenDevOps xen_kbdmouse_ops; /* xen_framebuffer.c */
extern struct XenDevOps xen_framebuffer_ops; /* xen_framebuffer.c */
extern struct XenDevOps xen_blkdev_ops; /* xen_disk.c */
+#ifdef CONFIG_VIRTFS
+extern struct XenDevOps xen_9pfs_ops; /* xen-9p-backend.c */
+#endif
extern struct XenDevOps xen_netdev_ops; /* xen_nic.c */
#ifdef CONFIG_USB_LIBUSB
extern struct XenDevOps xen_usb_ops; /* xen-usb.c */
--
1.9.1
© 2016 - 2025 Red Hat, Inc.