MAINTAINERS | 1 + tests/functional/x86_64/meson.build | 1 + .../x86_64/test_vfio_user_client.py | 197 ++++++++++++++++++ 3 files changed, 199 insertions(+) create mode 100755 tests/functional/x86_64/test_vfio_user_client.py
From: Mark Cave-Ayland <mark.caveayland@nutanix.com>
Add a basic test of the vfio-user PCI client implementation.
Co-authored-by: John Levon <john.levon@nutanix.com>
Signed-off-by: Mark Cave-Ayland <mark.caveayland@nutanix.com>
Signed-off-by: John Levon <john.levon@nutanix.com>
---
MAINTAINERS | 1 +
tests/functional/x86_64/meson.build | 1 +
.../x86_64/test_vfio_user_client.py | 197 ++++++++++++++++++
3 files changed, 199 insertions(+)
create mode 100755 tests/functional/x86_64/test_vfio_user_client.py
diff --git a/MAINTAINERS b/MAINTAINERS
index fb045388b9..738db4f83b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4307,6 +4307,7 @@ F: docs/system/devices/vfio-user.rst
F: hw/vfio-user/*
F: include/hw/vfio-user/*
F: subprojects/libvfio-user
+F: tests/functional/x86_64/test_vfio_user_client.py
EBPF:
M: Jason Wang <jasowang@redhat.com>
diff --git a/tests/functional/x86_64/meson.build b/tests/functional/x86_64/meson.build
index d0b4667bb8..eed1936976 100644
--- a/tests/functional/x86_64/meson.build
+++ b/tests/functional/x86_64/meson.build
@@ -31,6 +31,7 @@ tests_x86_64_system_thorough = [
'replay',
'reverse_debug',
'tuxrun',
+ 'vfio_user_client',
'virtio_balloon',
'virtio_gpu',
]
diff --git a/tests/functional/x86_64/test_vfio_user_client.py b/tests/functional/x86_64/test_vfio_user_client.py
new file mode 100755
index 0000000000..1e4c5bc875
--- /dev/null
+++ b/tests/functional/x86_64/test_vfio_user_client.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2025 Nutanix, Inc.
+#
+# Author:
+# Mark Cave-Ayland <mark.caveayland@nutanix.com>
+# John Levon <john.levon@nutanix.com>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+"""
+Check basic vfio-user-pci client functionality. The test starts two VMs:
+
+ - the server VM runs the libvfio-user "gpio" example server inside it,
+ piping vfio-user traffic between a local UNIX socket and a virtio-serial
+ port. On the host, the virtio-serial port is backed by a local socket.
+
+ - the client VM loads the gpio-pci-idio-16 kernel module, with the
+ vfio-user client connecting to the above local UNIX socket.
+
+This way, we don't depend on trying to run a vfio-user server on the host
+itself.
+
+Once both VMs are running, we run some basic configuration on the gpio device
+and verify that the server is logging the expected out. As this is consistent
+given the same VM images, we just do a simple direct comparison.
+"""
+
+import difflib
+import logging
+import os
+import select
+import shutil
+import socket
+import subprocess
+import time
+
+from qemu_test import Asset
+from qemu_test import QemuSystemTest
+from qemu_test import exec_command
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+# Exact output can vary, so we just sample for some expected lines.
+EXPECTED_SERVER_LINES = [
+ "gpio: adding DMA region [0, 0xc0000) offset=0 flags=0x3",
+ "gpio: devinfo flags 0x3, num_regions 9, num_irqs 5",
+ "gpio: region_info[0] offset 0 flags 0 size 0 argsz 32",
+ "gpio: region_info[1] offset 0 flags 0 size 0 argsz 32",
+ "gpio: region_info[2] offset 0 flags 0x3 size 256 argsz 32",
+ "gpio: region_info[3] offset 0 flags 0 size 0 argsz 32",
+ "gpio: region_info[4] offset 0 flags 0 size 0 argsz 32",
+ "gpio: region_info[5] offset 0 flags 0 size 0 argsz 32",
+ "gpio: region_info[7] offset 0 flags 0x3 size 256 argsz 32",
+ "gpio: region7: read 256 bytes at 0",
+ "gpio: region7: read 0 from (0x30:4)",
+ "gpio: cleared EROM",
+ "gpio: I/O space enabled",
+ "gpio: memory space enabled",
+ "gpio: SERR# enabled",
+ "gpio: region7: wrote 0x103 to (0x4:2)",
+ "gpio: I/O space enabled",
+ "gpio: memory space enabled",
+]
+
+class VfioUserClient(QemuSystemTest):
+
+ ASSET_REPO = 'https://github.com/mcayland-ntx/libvfio-user-test'
+
+ ASSET_KERNEL = Asset(
+ f'{ASSET_REPO}/raw/refs/heads/main/images/bzImage',
+ '40292fa6ce95d516e26bccf5974e138d0db65a6de0bc540cabae060fe9dea605'
+ )
+
+ ASSET_ROOTFS = Asset(
+ f'{ASSET_REPO}/raw/refs/heads/main/images/rootfs.ext2',
+ 'e1e3abae8aebb8e6e77f08b1c531caeacf46250c94c815655c6bbea59fc3d1c1'
+ )
+
+
+ def prepare_images(self):
+ """Download the images for the VMs."""
+ self.kernel_path = self.ASSET_KERNEL.fetch()
+ self.rootfs_path = self.ASSET_ROOTFS.fetch()
+
+ def configure_server_vm_args(self, server_vm, sock_path):
+ """
+ Configuration for the server VM. Set up virtio-serial device backed by
+ the given socket path.
+ """
+ server_vm.add_args('-kernel', self.kernel_path)
+ server_vm.add_args('-append', 'console=ttyS0 root=/dev/sda')
+ server_vm.add_args('-drive',
+ f"file={self.rootfs_path},if=ide,format=raw,id=drv0")
+ server_vm.add_args('-snapshot')
+ server_vm.add_args('-chardev',
+ f"socket,id=sock0,path={sock_path},telnet=off,server=on,wait=off")
+ server_vm.add_args('-device', 'virtio-serial')
+ server_vm.add_args('-device',
+ 'virtserialport,chardev=sock0,name=org.fedoraproject.port.0')
+
+ def configure_client_vm_args(self, client_vm, sock_path):
+ """
+ Configuration for the client VM. Point the vfio-user-pci device to the
+ socket path configured above.
+ """
+
+ client_vm.add_args('-kernel', self.kernel_path)
+ client_vm.add_args('-append', 'console=ttyS0 root=/dev/sda')
+ client_vm.add_args('-drive',
+ f'file={self.rootfs_path},if=ide,format=raw,id=drv0')
+ client_vm.add_args('-snapshot')
+ client_vm.add_args('-device',
+ '{"driver":"vfio-user-pci",' +
+ '"socket":{"path": "%s", "type": "unix"}}' % sock_path)
+
+ def setup_vfio_user_pci_server(self, server_vm):
+ """
+ Start the libvfio-user server within the server VM, and arrange
+ for data to shuttle between its socket and the virtio serial port.
+ """
+ wait_for_console_pattern(self, 'login:', None, server_vm)
+ exec_command_and_wait_for_pattern(self, 'root', '#', None, server_vm)
+
+ exec_command_and_wait_for_pattern(self,
+ 'gpio-pci-idio-16 -v /tmp/vfio-user.sock >/var/tmp/gpio.out 2>&1 &',
+ '#', None, server_vm)
+ # wait for libvfio-user to initialize properly
+ exec_command_and_wait_for_pattern(self, 'sleep 5', '#', None, server_vm)
+ exec_command_and_wait_for_pattern(self,
+ 'socat UNIX-CONNECT:/tmp/vfio-user.sock /dev/vport0p1,ignoreeof ' +
+ ' &', '#', None, server_vm)
+
+ def test_vfio_user_pci(self):
+ self.prepare_images()
+ self.set_machine('pc')
+ self.require_device('virtio-serial')
+ self.require_device('vfio-user-pci')
+
+ sock_dir = self.socket_dir()
+ socket_path = sock_dir.name + '/vfio-user.sock'
+
+ server_vm = self.get_vm(name='server')
+ server_vm.set_console()
+ self.configure_server_vm_args(server_vm, socket_path)
+
+ server_vm.launch()
+
+ self.log.debug('starting libvfio-user server')
+
+ self.setup_vfio_user_pci_server(server_vm)
+
+ client_vm = self.get_vm(name="client")
+ client_vm.set_console()
+ self.configure_client_vm_args(client_vm, socket_path)
+
+ try:
+ client_vm.launch()
+ except:
+ self.log.error('client VM failed to start, dumping server logs')
+ exec_command_and_wait_for_pattern(self, 'cat /var/tmp/gpio.out',
+ '#', None, server_vm)
+ raise
+
+ self.log.debug('waiting for client VM boot')
+
+ wait_for_console_pattern(self, 'login:', None, client_vm)
+ exec_command_and_wait_for_pattern(self, 'root', '#', None, client_vm)
+
+ #
+ # Here, we'd like to actually interact with the gpio device a little
+ # more as described at:
+ #
+ # https://github.com/nutanix/libvfio-user/blob/master/docs/qemu.md
+ #
+ # Unfortunately, the buildroot Linux kernel has some undiagnosed issue
+ # so we don't get /sys/class/gpio. Nonetheless just the basic
+ # initialization and setup is enough for basic testing of vfio-user.
+ #
+
+ self.log.debug('collecting libvfio-user server output')
+
+ out = exec_command_and_wait_for_pattern(self,
+ 'cat /var/tmp/gpio.out',
+ 'gpio: region2: wrote 0 to (0x1:1)',
+ None, server_vm)
+
+ gpio_server_out = [s for s in out.decode().splitlines()
+ if s.startswith("gpio:")]
+
+ for line in EXPECTED_SERVER_LINES:
+ if line not in gpio_server_out:
+ self.log.error(f'Missing server debug line: {line}')
+ self.fail(False)
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
--
2.43.0
Hi! On 11/09/2025 16.22, John Levon wrote: > From: Mark Cave-Ayland <mark.caveayland@nutanix.com> > > Add a basic test of the vfio-user PCI client implementation. > > Co-authored-by: John Levon <john.levon@nutanix.com> > Signed-off-by: Mark Cave-Ayland <mark.caveayland@nutanix.com> > Signed-off-by: John Levon <john.levon@nutanix.com> > --- ... > diff --git a/tests/functional/x86_64/test_vfio_user_client.py b/tests/functional/x86_64/test_vfio_user_client.py > new file mode 100755 > index 0000000000..1e4c5bc875 > --- /dev/null > +++ b/tests/functional/x86_64/test_vfio_user_client.py > @@ -0,0 +1,197 @@ > +#!/usr/bin/env python3 > +# > +# Copyright (c) 2025 Nutanix, Inc. > +# > +# Author: > +# Mark Cave-Ayland <mark.caveayland@nutanix.com> > +# John Levon <john.levon@nutanix.com> > +# > +# SPDX-License-Identifier: GPL-2.0-or-later > +""" > +Check basic vfio-user-pci client functionality. The test starts two VMs: > + > + - the server VM runs the libvfio-user "gpio" example server inside it, > + piping vfio-user traffic between a local UNIX socket and a virtio-serial > + port. On the host, the virtio-serial port is backed by a local socket. > + > + - the client VM loads the gpio-pci-idio-16 kernel module, with the > + vfio-user client connecting to the above local UNIX socket. > + > +This way, we don't depend on trying to run a vfio-user server on the host > +itself. > + > +Once both VMs are running, we run some basic configuration on the gpio device > +and verify that the server is logging the expected out. As this is consistent > +given the same VM images, we just do a simple direct comparison. > +""" I'm not a python expert, but I guess it would make sense to move that description block next to the "class VfioUserClient(QemuSystemTest):" line so that it's the description for the class? (that would fix the "Missing class docstring" that you get when using "pylint" on your code) > +import difflib > +import logging > +import os > +import select > +import shutil > +import socket > +import subprocess > +import time pylint complains: tests/functional/x86_64/test_vfio_user_client.py:28:0: W0611: Unused import difflib (unused-import) tests/functional/x86_64/test_vfio_user_client.py:29:0: W0611: Unused import logging (unused-import) tests/functional/x86_64/test_vfio_user_client.py:30:0: W0611: Unused import os (unused-import) tests/functional/x86_64/test_vfio_user_client.py:31:0: W0611: Unused import select (unused-import) tests/functional/x86_64/test_vfio_user_client.py:32:0: W0611: Unused import shutil (unused-import) tests/functional/x86_64/test_vfio_user_client.py:33:0: W0611: Unused import socket (unused-import) tests/functional/x86_64/test_vfio_user_client.py:34:0: W0611: Unused import subprocess (unused-import) tests/functional/x86_64/test_vfio_user_client.py:35:0: W0611: Unused import time (unused-import) ... so I think you can remove those. > +from qemu_test import Asset > +from qemu_test import QemuSystemTest > +from qemu_test import exec_command Same for "exec_command" ... you don't use it in your test here. > +from qemu_test import exec_command_and_wait_for_pattern > +from qemu_test import wait_for_console_pattern > + > +# Exact output can vary, so we just sample for some expected lines. > +EXPECTED_SERVER_LINES = [ > + "gpio: adding DMA region [0, 0xc0000) offset=0 flags=0x3", > + "gpio: devinfo flags 0x3, num_regions 9, num_irqs 5", > + "gpio: region_info[0] offset 0 flags 0 size 0 argsz 32", > + "gpio: region_info[1] offset 0 flags 0 size 0 argsz 32", > + "gpio: region_info[2] offset 0 flags 0x3 size 256 argsz 32", > + "gpio: region_info[3] offset 0 flags 0 size 0 argsz 32", > + "gpio: region_info[4] offset 0 flags 0 size 0 argsz 32", > + "gpio: region_info[5] offset 0 flags 0 size 0 argsz 32", > + "gpio: region_info[7] offset 0 flags 0x3 size 256 argsz 32", > + "gpio: region7: read 256 bytes at 0", > + "gpio: region7: read 0 from (0x30:4)", > + "gpio: cleared EROM", > + "gpio: I/O space enabled", > + "gpio: memory space enabled", > + "gpio: SERR# enabled", > + "gpio: region7: wrote 0x103 to (0x4:2)", > + "gpio: I/O space enabled", > + "gpio: memory space enabled", > +] > + > +class VfioUserClient(QemuSystemTest): > + > + ASSET_REPO = 'https://github.com/mcayland-ntx/libvfio-user-test' Not sure whether that indirection works with the asset pre-caching mechanism? Daniel, could you comment on that? > + ASSET_KERNEL = Asset( > + f'{ASSET_REPO}/raw/refs/heads/main/images/bzImage', > + '40292fa6ce95d516e26bccf5974e138d0db65a6de0bc540cabae060fe9dea605' > + ) > + > + ASSET_ROOTFS = Asset( > + f'{ASSET_REPO}/raw/refs/heads/main/images/rootfs.ext2', > + 'e1e3abae8aebb8e6e77f08b1c531caeacf46250c94c815655c6bbea59fc3d1c1' > + ) > + > + > + def prepare_images(self): > + """Download the images for the VMs.""" > + self.kernel_path = self.ASSET_KERNEL.fetch() > + self.rootfs_path = self.ASSET_ROOTFS.fetch() > + > + def configure_server_vm_args(self, server_vm, sock_path): > + """ > + Configuration for the server VM. Set up virtio-serial device backed by > + the given socket path. > + """ > + server_vm.add_args('-kernel', self.kernel_path) > + server_vm.add_args('-append', 'console=ttyS0 root=/dev/sda') > + server_vm.add_args('-drive', > + f"file={self.rootfs_path},if=ide,format=raw,id=drv0") > + server_vm.add_args('-snapshot') > + server_vm.add_args('-chardev', > + f"socket,id=sock0,path={sock_path},telnet=off,server=on,wait=off") > + server_vm.add_args('-device', 'virtio-serial') > + server_vm.add_args('-device', > + 'virtserialport,chardev=sock0,name=org.fedoraproject.port.0') > + > + def configure_client_vm_args(self, client_vm, sock_path): > + """ > + Configuration for the client VM. Point the vfio-user-pci device to the > + socket path configured above. > + """ > + > + client_vm.add_args('-kernel', self.kernel_path) > + client_vm.add_args('-append', 'console=ttyS0 root=/dev/sda') > + client_vm.add_args('-drive', > + f'file={self.rootfs_path},if=ide,format=raw,id=drv0') > + client_vm.add_args('-snapshot') > + client_vm.add_args('-device', > + '{"driver":"vfio-user-pci",' + > + '"socket":{"path": "%s", "type": "unix"}}' % sock_path) > + > + def setup_vfio_user_pci_server(self, server_vm): > + """ > + Start the libvfio-user server within the server VM, and arrange > + for data to shuttle between its socket and the virtio serial port. > + """ > + wait_for_console_pattern(self, 'login:', None, server_vm) > + exec_command_and_wait_for_pattern(self, 'root', '#', None, server_vm) > + > + exec_command_and_wait_for_pattern(self, > + 'gpio-pci-idio-16 -v /tmp/vfio-user.sock >/var/tmp/gpio.out 2>&1 &', > + '#', None, server_vm) > + # wait for libvfio-user to initialize properly > + exec_command_and_wait_for_pattern(self, 'sleep 5', '#', None, server_vm) Could the sleep be avoided? ... it's still a race condition (even if it's unlikely when you wait for 5 seconds), and always sleeping 5 seconds slows down the test quite a bit ... Could you maybe poll something instead, e.g. output of "dmesg" or something in the file system? (sorry, I don't have any clue about vfio-user, so I don't know any better suggestions) > + exec_command_and_wait_for_pattern(self, > + 'socat UNIX-CONNECT:/tmp/vfio-user.sock /dev/vport0p1,ignoreeof ' + > + ' &', '#', None, server_vm) > + > + def test_vfio_user_pci(self): > + self.prepare_images() Please move the "prepare_images" after the set_machine() and require_device() calls. Reason: set_machine() and require_device() could skip the test if it's not available in the qemu binary, so in that case you don't want to try to fetch the assets first. > + self.set_machine('pc') > + self.require_device('virtio-serial') > + self.require_device('vfio-user-pci') > + > + sock_dir = self.socket_dir() > + socket_path = sock_dir.name + '/vfio-user.sock' Better use os.path.join() instead of hard-coding slashes. Thanks, Thomas
On Thu, Sep 11, 2025 at 05:27:24PM +0200, Thomas Huth wrote: > I'm not a python expert, but I guess it would make sense to move that > description block next to the "class VfioUserClient(QemuSystemTest):" line > so that it's the description for the class? (that would fix the "Missing > class docstring" that you get when using "pylint" on your code) Then pylint complains about the module missing docs, so I think the current location is a bit better, and I'll add a single line for the class, and made the file pylint clean generally. > > + # wait for libvfio-user to initialize properly > > + exec_command_and_wait_for_pattern(self, 'sleep 5', '#', None, server_vm) > > Could the sleep be avoided? ... it's still a race condition (even if it's > unlikely when you wait for 5 seconds), and always sleeping 5 seconds slows > down the test quite a bit ... Currently stress testing a loop that just waits for the socket to appear, so far so good. thanks john
On Thu, Sep 11, 2025 at 05:27:24PM +0200, Thomas Huth wrote: > Hi! > > On 11/09/2025 16.22, John Levon wrote: > > From: Mark Cave-Ayland <mark.caveayland@nutanix.com> > > > > Add a basic test of the vfio-user PCI client implementation. > > > > Co-authored-by: John Levon <john.levon@nutanix.com> > > Signed-off-by: Mark Cave-Ayland <mark.caveayland@nutanix.com> > > Signed-off-by: John Levon <john.levon@nutanix.com> > > --- > ... > > diff --git a/tests/functional/x86_64/test_vfio_user_client.py b/tests/functional/x86_64/test_vfio_user_client.py > > new file mode 100755 > > index 0000000000..1e4c5bc875 > > --- /dev/null > > +++ b/tests/functional/x86_64/test_vfio_user_client.py > > +class VfioUserClient(QemuSystemTest): > > + > > + ASSET_REPO = 'https://github.com/mcayland-ntx/libvfio-user-test' > > Not sure whether that indirection works with the asset pre-caching > mechanism? Daniel, could you comment on that? It should be fine - the asset caching loads the class and at that time python will have done the substitution. > > > + ASSET_KERNEL = Asset( > > + f'{ASSET_REPO}/raw/refs/heads/main/images/bzImage', > > + '40292fa6ce95d516e26bccf5974e138d0db65a6de0bc540cabae060fe9dea605' > > + ) > > + > > + ASSET_ROOTFS = Asset( > > + f'{ASSET_REPO}/raw/refs/heads/main/images/rootfs.ext2', > > + 'e1e3abae8aebb8e6e77f08b1c531caeacf46250c94c815655c6bbea59fc3d1c1' > > + ) > > + def prepare_images(self): > > + """Download the images for the VMs.""" > > + self.kernel_path = self.ASSET_KERNEL.fetch() > > + self.rootfs_path = self.ASSET_ROOTFS.fetch() Just put this inline, it doesn't seem this method now we removed the extra copying logic > > + > > + def configure_server_vm_args(self, server_vm, sock_path): > > + """ > > + Configuration for the server VM. Set up virtio-serial device backed by > > + the given socket path. > > + """ > > + server_vm.add_args('-kernel', self.kernel_path) > > + server_vm.add_args('-append', 'console=ttyS0 root=/dev/sda') > > + server_vm.add_args('-drive', > > + f"file={self.rootfs_path},if=ide,format=raw,id=drv0") > > + server_vm.add_args('-snapshot') > > + server_vm.add_args('-chardev', > > + f"socket,id=sock0,path={sock_path},telnet=off,server=on,wait=off") > > + server_vm.add_args('-device', 'virtio-serial') > > + server_vm.add_args('-device', > > + 'virtserialport,chardev=sock0,name=org.fedoraproject.port.0') > > + > > + def configure_client_vm_args(self, client_vm, sock_path): > > + """ > > + Configuration for the client VM. Point the vfio-user-pci device to the > > + socket path configured above. > > + """ > > + > > + client_vm.add_args('-kernel', self.kernel_path) > > + client_vm.add_args('-append', 'console=ttyS0 root=/dev/sda') > > + client_vm.add_args('-drive', > > + f'file={self.rootfs_path},if=ide,format=raw,id=drv0') > > + client_vm.add_args('-snapshot') > > + client_vm.add_args('-device', > > + '{"driver":"vfio-user-pci",' + > > + '"socket":{"path": "%s", "type": "unix"}}' % sock_path) > > + > > + def setup_vfio_user_pci_server(self, server_vm): > > + """ > > + Start the libvfio-user server within the server VM, and arrange > > + for data to shuttle between its socket and the virtio serial port. > > + """ > > + wait_for_console_pattern(self, 'login:', None, server_vm) > > + exec_command_and_wait_for_pattern(self, 'root', '#', None, server_vm) > > + > > + exec_command_and_wait_for_pattern(self, > > + 'gpio-pci-idio-16 -v /tmp/vfio-user.sock >/var/tmp/gpio.out 2>&1 &', > > + '#', None, server_vm) > > + # wait for libvfio-user to initialize properly > > + exec_command_and_wait_for_pattern(self, 'sleep 5', '#', None, server_vm) > > Could the sleep be avoided? ... it's still a race condition (even if it's > unlikely when you wait for 5 seconds), and always sleeping 5 seconds slows > down the test quite a bit ... > > Could you maybe poll something instead, e.g. output of "dmesg" or something > in the file system? (sorry, I don't have any clue about vfio-user, so I > don't know any better suggestions) > > > + exec_command_and_wait_for_pattern(self, > > + 'socat UNIX-CONNECT:/tmp/vfio-user.sock /dev/vport0p1,ignoreeof ' + > > + ' &', '#', None, server_vm) > > + > > + def test_vfio_user_pci(self): > > + self.prepare_images() > > Please move the "prepare_images" after the set_machine() and > require_device() calls. Reason: set_machine() and require_device() could > skip the test if it's not available in the qemu binary, so in that case you > don't want to try to fetch the assets first. > > > + self.set_machine('pc') > > + self.require_device('virtio-serial') > > + self.require_device('vfio-user-pci') > > + > > + sock_dir = self.socket_dir() > > + socket_path = sock_dir.name + '/vfio-user.sock' > > Better use os.path.join() instead of hard-coding slashes. > > Thanks, > Thomas > With regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
© 2016 - 2025 Red Hat, Inc.