[PATCH v7 6/8] tests/qtest: Add GET_SHMEM validation test

Albert Esteve posted 8 patches 2 months, 4 weeks ago
Maintainers: "Michael S. Tsirkin" <mst@redhat.com>, Stefano Garzarella <sgarzare@redhat.com>, "Alex Bennée" <alex.bennee@linaro.org>, Fabiano Rosas <farosas@suse.de>, Laurent Vivier <lvivier@redhat.com>, Paolo Bonzini <pbonzini@redhat.com>
There is a newer version of this series
[PATCH v7 6/8] tests/qtest: Add GET_SHMEM validation test
Posted by Albert Esteve 2 months, 4 weeks ago
Improve vhost-user-test to properly validate
VHOST_USER_GET_SHMEM_CONFIG message handling by
directly simulating the message exchange.

The test manually triggers the
VHOST_USER_GET_SHMEM_CONFIG message by calling
chr_read() with a crafted VhostUserMsg, allowing direct
validation of the shmem configuration response handler.

Added TestServerShmem structure to track shmem
configuration state, including nregions_sent and
sizes_sent arrays for comprehensive validation.
The test verifies that the response contains the expected
number of shared memory regions and their corresponding
sizes.

Signed-off-by: Albert Esteve <aesteve@redhat.com>
---
 tests/qtest/vhost-user-test.c | 91 +++++++++++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)

diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c
index 75cb3e44b2..44a5e90b2e 100644
--- a/tests/qtest/vhost-user-test.c
+++ b/tests/qtest/vhost-user-test.c
@@ -88,6 +88,7 @@ typedef enum VhostUserRequest {
     VHOST_USER_SET_VRING_ENABLE = 18,
     VHOST_USER_GET_CONFIG = 24,
     VHOST_USER_SET_CONFIG = 25,
+    VHOST_USER_GET_SHMEM_CONFIG = 44,
     VHOST_USER_MAX
 } VhostUserRequest;
 
@@ -109,6 +110,20 @@ typedef struct VhostUserLog {
     uint64_t mmap_offset;
 } VhostUserLog;
 
+#define VIRTIO_MAX_SHMEM_REGIONS 256
+
+typedef struct VhostUserShMemConfig {
+    uint32_t nregions;
+    uint32_t padding;
+    uint64_t memory_sizes[VIRTIO_MAX_SHMEM_REGIONS];
+} VhostUserShMemConfig;
+
+typedef struct TestServerShmem {
+    bool test_enabled;
+    uint32_t nregions_sent;
+    uint64_t sizes_sent[VIRTIO_MAX_SHMEM_REGIONS];
+} TestServerShmem;
+
 typedef struct VhostUserMsg {
     VhostUserRequest request;
 
@@ -124,6 +139,7 @@ typedef struct VhostUserMsg {
         struct vhost_vring_addr addr;
         VhostUserMemory memory;
         VhostUserLog log;
+        VhostUserShMemConfig shmem;
     } payload;
 } QEMU_PACKED VhostUserMsg;
 
@@ -170,6 +186,7 @@ typedef struct TestServer {
     bool test_fail;
     int test_flags;
     int queues;
+    TestServerShmem shmem;
     struct vhost_user_ops *vu_ops;
 } TestServer;
 
@@ -513,6 +530,31 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
         qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
                    msg.payload.state.num ? "enabled" : "disabled");
         break;
+    
+    case VHOST_USER_GET_SHMEM_CONFIG:
+        if (!s->shmem.test_enabled) {
+            /* Reply with error if shmem feature not enabled */
+            msg.flags |= VHOST_USER_REPLY_MASK;
+            msg.size = sizeof(uint64_t);
+            msg.payload.u64 = -1; /* Error */
+            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
+        } else {
+            /* Reply with test shmem configuration */
+            msg.flags |= VHOST_USER_REPLY_MASK;
+            msg.size = sizeof(VhostUserShMemConfig);
+            msg.payload.shmem.nregions = 2; /* Test with 2 regions */
+            msg.payload.shmem.padding = 0;
+            msg.payload.shmem.memory_sizes[0] = 0x100000; /* 1MB */
+            msg.payload.shmem.memory_sizes[1] = 0x200000; /* 2MB */
+            
+            /* Record what we're sending for test validation */
+            s->shmem.nregions_sent = msg.payload.shmem.nregions;
+            s->shmem.sizes_sent[0] = msg.payload.shmem.memory_sizes[0];
+            s->shmem.sizes_sent[1] = msg.payload.shmem.memory_sizes[1];
+            
+            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
+        }
+        break;
 
     default:
         qos_printf("vhost-user: un-handled message: %d\n", msg.request);
@@ -809,6 +851,22 @@ static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg)
     return server;
 }
 
+static void *vhost_user_test_setup_shmem_config(GString *cmd_line, void *arg)
+{
+    TestServer *server = test_server_new("vhost-user-test", arg);
+    test_server_listen(server);
+
+    /* Enable shmem testing for this server */
+    server->shmem.test_enabled = true;
+
+    append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM);
+    server->vu_ops->append_opts(server, cmd_line, "");
+
+    g_test_queue_destroy(vhost_user_test_cleanup, server);
+
+    return server;
+}
+
 static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
 {
     TestServer *server = arg;
@@ -1089,6 +1147,33 @@ static struct vhost_user_ops g_vu_net_ops = {
     .get_protocol_features = vu_net_get_protocol_features,
 };
 
+/* Test function for VHOST_USER_GET_SHMEM_CONFIG message */
+static void test_shmem_config(void *obj, void *arg, QGuestAllocator *alloc)
+{
+    TestServer *s = arg;
+    
+    g_assert_true(s->shmem.test_enabled);
+    
+    g_mutex_lock(&s->data_mutex);
+    s->shmem.nregions_sent = 0;
+    s->shmem.sizes_sent[0] = 0;
+    s->shmem.sizes_sent[1] = 0;
+    g_mutex_unlock(&s->data_mutex);
+    
+    VhostUserMsg msg = {
+        .request = VHOST_USER_GET_SHMEM_CONFIG,
+        .flags = VHOST_USER_VERSION,
+        .size = 0,
+    };
+    chr_read(s, (uint8_t *) &msg, VHOST_USER_HDR_SIZE);
+
+    g_mutex_lock(&s->data_mutex);
+    g_assert_cmpint(s->shmem.nregions_sent, ==, 2);
+    g_assert_cmpint(s->shmem.sizes_sent[0], ==, 0x100000); /* 1MB */
+    g_assert_cmpint(s->shmem.sizes_sent[1], ==, 0x200000); /* 2MB */
+    g_mutex_unlock(&s->data_mutex);
+}
+
 static void register_vhost_user_test(void)
 {
     QOSGraphTestOptions opts = {
@@ -1136,6 +1221,12 @@ static void register_vhost_user_test(void)
     qos_add_test("vhost-user/multiqueue",
                  "virtio-net",
                  test_multiqueue, &opts);
+    
+    opts.before = vhost_user_test_setup_shmem_config;
+    opts.edge.extra_device_opts = "";
+    qos_add_test("vhost-user/shmem-config",
+                 "virtio-net",
+                 test_shmem_config, &opts);
 }
 libqos_init(register_vhost_user_test);
 
-- 
2.49.0
Re: [PATCH v7 6/8] tests/qtest: Add GET_SHMEM validation test
Posted by Stefan Hajnoczi 2 months, 4 weeks ago
On Mon, Aug 18, 2025 at 12:03:51PM +0200, Albert Esteve wrote:
> Improve vhost-user-test to properly validate
> VHOST_USER_GET_SHMEM_CONFIG message handling by
> directly simulating the message exchange.
> 
> The test manually triggers the
> VHOST_USER_GET_SHMEM_CONFIG message by calling
> chr_read() with a crafted VhostUserMsg, allowing direct
> validation of the shmem configuration response handler.

It looks like this test case invokes its own chr_read() function without
going through QEMU, so I don't understand what this is testing?

> 
> Added TestServerShmem structure to track shmem
> configuration state, including nregions_sent and
> sizes_sent arrays for comprehensive validation.
> The test verifies that the response contains the expected
> number of shared memory regions and their corresponding
> sizes.
> 
> Signed-off-by: Albert Esteve <aesteve@redhat.com>
> ---
>  tests/qtest/vhost-user-test.c | 91 +++++++++++++++++++++++++++++++++++
>  1 file changed, 91 insertions(+)
> 
> diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c
> index 75cb3e44b2..44a5e90b2e 100644
> --- a/tests/qtest/vhost-user-test.c
> +++ b/tests/qtest/vhost-user-test.c
> @@ -88,6 +88,7 @@ typedef enum VhostUserRequest {
>      VHOST_USER_SET_VRING_ENABLE = 18,
>      VHOST_USER_GET_CONFIG = 24,
>      VHOST_USER_SET_CONFIG = 25,
> +    VHOST_USER_GET_SHMEM_CONFIG = 44,
>      VHOST_USER_MAX
>  } VhostUserRequest;
>  
> @@ -109,6 +110,20 @@ typedef struct VhostUserLog {
>      uint64_t mmap_offset;
>  } VhostUserLog;
>  
> +#define VIRTIO_MAX_SHMEM_REGIONS 256
> +
> +typedef struct VhostUserShMemConfig {
> +    uint32_t nregions;
> +    uint32_t padding;
> +    uint64_t memory_sizes[VIRTIO_MAX_SHMEM_REGIONS];
> +} VhostUserShMemConfig;
> +
> +typedef struct TestServerShmem {
> +    bool test_enabled;
> +    uint32_t nregions_sent;
> +    uint64_t sizes_sent[VIRTIO_MAX_SHMEM_REGIONS];
> +} TestServerShmem;
> +
>  typedef struct VhostUserMsg {
>      VhostUserRequest request;
>  
> @@ -124,6 +139,7 @@ typedef struct VhostUserMsg {
>          struct vhost_vring_addr addr;
>          VhostUserMemory memory;
>          VhostUserLog log;
> +        VhostUserShMemConfig shmem;
>      } payload;
>  } QEMU_PACKED VhostUserMsg;
>  
> @@ -170,6 +186,7 @@ typedef struct TestServer {
>      bool test_fail;
>      int test_flags;
>      int queues;
> +    TestServerShmem shmem;
>      struct vhost_user_ops *vu_ops;
>  } TestServer;
>  
> @@ -513,6 +530,31 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
>          qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
>                     msg.payload.state.num ? "enabled" : "disabled");
>          break;
> +    
> +    case VHOST_USER_GET_SHMEM_CONFIG:
> +        if (!s->shmem.test_enabled) {
> +            /* Reply with error if shmem feature not enabled */
> +            msg.flags |= VHOST_USER_REPLY_MASK;
> +            msg.size = sizeof(uint64_t);
> +            msg.payload.u64 = -1; /* Error */
> +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> +        } else {
> +            /* Reply with test shmem configuration */
> +            msg.flags |= VHOST_USER_REPLY_MASK;
> +            msg.size = sizeof(VhostUserShMemConfig);
> +            msg.payload.shmem.nregions = 2; /* Test with 2 regions */
> +            msg.payload.shmem.padding = 0;
> +            msg.payload.shmem.memory_sizes[0] = 0x100000; /* 1MB */
> +            msg.payload.shmem.memory_sizes[1] = 0x200000; /* 2MB */
> +            
> +            /* Record what we're sending for test validation */
> +            s->shmem.nregions_sent = msg.payload.shmem.nregions;
> +            s->shmem.sizes_sent[0] = msg.payload.shmem.memory_sizes[0];
> +            s->shmem.sizes_sent[1] = msg.payload.shmem.memory_sizes[1];
> +            
> +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> +        }
> +        break;
>  
>      default:
>          qos_printf("vhost-user: un-handled message: %d\n", msg.request);
> @@ -809,6 +851,22 @@ static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg)
>      return server;
>  }
>  
> +static void *vhost_user_test_setup_shmem_config(GString *cmd_line, void *arg)
> +{
> +    TestServer *server = test_server_new("vhost-user-test", arg);
> +    test_server_listen(server);
> +
> +    /* Enable shmem testing for this server */
> +    server->shmem.test_enabled = true;
> +
> +    append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM);
> +    server->vu_ops->append_opts(server, cmd_line, "");
> +
> +    g_test_queue_destroy(vhost_user_test_cleanup, server);
> +
> +    return server;
> +}
> +
>  static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
>  {
>      TestServer *server = arg;
> @@ -1089,6 +1147,33 @@ static struct vhost_user_ops g_vu_net_ops = {
>      .get_protocol_features = vu_net_get_protocol_features,
>  };
>  
> +/* Test function for VHOST_USER_GET_SHMEM_CONFIG message */
> +static void test_shmem_config(void *obj, void *arg, QGuestAllocator *alloc)
> +{
> +    TestServer *s = arg;
> +    
> +    g_assert_true(s->shmem.test_enabled);
> +    
> +    g_mutex_lock(&s->data_mutex);
> +    s->shmem.nregions_sent = 0;
> +    s->shmem.sizes_sent[0] = 0;
> +    s->shmem.sizes_sent[1] = 0;
> +    g_mutex_unlock(&s->data_mutex);
> +    
> +    VhostUserMsg msg = {
> +        .request = VHOST_USER_GET_SHMEM_CONFIG,
> +        .flags = VHOST_USER_VERSION,
> +        .size = 0,
> +    };
> +    chr_read(s, (uint8_t *) &msg, VHOST_USER_HDR_SIZE);
> +
> +    g_mutex_lock(&s->data_mutex);
> +    g_assert_cmpint(s->shmem.nregions_sent, ==, 2);
> +    g_assert_cmpint(s->shmem.sizes_sent[0], ==, 0x100000); /* 1MB */
> +    g_assert_cmpint(s->shmem.sizes_sent[1], ==, 0x200000); /* 2MB */
> +    g_mutex_unlock(&s->data_mutex);
> +}
> +
>  static void register_vhost_user_test(void)
>  {
>      QOSGraphTestOptions opts = {
> @@ -1136,6 +1221,12 @@ static void register_vhost_user_test(void)
>      qos_add_test("vhost-user/multiqueue",
>                   "virtio-net",
>                   test_multiqueue, &opts);
> +    
> +    opts.before = vhost_user_test_setup_shmem_config;
> +    opts.edge.extra_device_opts = "";
> +    qos_add_test("vhost-user/shmem-config",
> +                 "virtio-net",
> +                 test_shmem_config, &opts);
>  }
>  libqos_init(register_vhost_user_test);
>  
> -- 
> 2.49.0
> 
Re: [PATCH v7 6/8] tests/qtest: Add GET_SHMEM validation test
Posted by Albert Esteve 2 months, 3 weeks ago
On Tue, Aug 19, 2025 at 12:42 PM Stefan Hajnoczi <stefanha@redhat.com> wrote:
>
> On Mon, Aug 18, 2025 at 12:03:51PM +0200, Albert Esteve wrote:
> > Improve vhost-user-test to properly validate
> > VHOST_USER_GET_SHMEM_CONFIG message handling by
> > directly simulating the message exchange.
> >
> > The test manually triggers the
> > VHOST_USER_GET_SHMEM_CONFIG message by calling
> > chr_read() with a crafted VhostUserMsg, allowing direct
> > validation of the shmem configuration response handler.
>
> It looks like this test case invokes its own chr_read() function without
> going through QEMU, so I don't understand what this is testing?

I spent some time trying to test it, but in the end I could not
instatiate vhost-user-device because it is non user_creatable. I did
not find any test for vhost-user-device anywhere else either. But I
had already added most of the infrastructure here so I fallback to
chr_read() communication to avoid having to delete everything. My
though was that once we have other devices that use shared memory,
they could tweak the test to instantiate the proper device and test
this and the map/unmap operations.

Although after writing this, I think other devices will actually a
specific layout for their shared memory. So
VHOST_USER_GET_SHMEM_CONFIG is only ever going to be used by
vhost-user-device.

In general, trying to test this patch series has been a headache other
than trying with external device code I have. If you have an idea that
I could try to test this, I can try. Otherwise, probably is best to
remove this commit from the series and wait for another vhost-user
device that uses map/unmap to land to be able to test it.



>
> >
> > Added TestServerShmem structure to track shmem
> > configuration state, including nregions_sent and
> > sizes_sent arrays for comprehensive validation.
> > The test verifies that the response contains the expected
> > number of shared memory regions and their corresponding
> > sizes.
> >
> > Signed-off-by: Albert Esteve <aesteve@redhat.com>
> > ---
> >  tests/qtest/vhost-user-test.c | 91 +++++++++++++++++++++++++++++++++++
> >  1 file changed, 91 insertions(+)
> >
> > diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c
> > index 75cb3e44b2..44a5e90b2e 100644
> > --- a/tests/qtest/vhost-user-test.c
> > +++ b/tests/qtest/vhost-user-test.c
> > @@ -88,6 +88,7 @@ typedef enum VhostUserRequest {
> >      VHOST_USER_SET_VRING_ENABLE = 18,
> >      VHOST_USER_GET_CONFIG = 24,
> >      VHOST_USER_SET_CONFIG = 25,
> > +    VHOST_USER_GET_SHMEM_CONFIG = 44,
> >      VHOST_USER_MAX
> >  } VhostUserRequest;
> >
> > @@ -109,6 +110,20 @@ typedef struct VhostUserLog {
> >      uint64_t mmap_offset;
> >  } VhostUserLog;
> >
> > +#define VIRTIO_MAX_SHMEM_REGIONS 256
> > +
> > +typedef struct VhostUserShMemConfig {
> > +    uint32_t nregions;
> > +    uint32_t padding;
> > +    uint64_t memory_sizes[VIRTIO_MAX_SHMEM_REGIONS];
> > +} VhostUserShMemConfig;
> > +
> > +typedef struct TestServerShmem {
> > +    bool test_enabled;
> > +    uint32_t nregions_sent;
> > +    uint64_t sizes_sent[VIRTIO_MAX_SHMEM_REGIONS];
> > +} TestServerShmem;
> > +
> >  typedef struct VhostUserMsg {
> >      VhostUserRequest request;
> >
> > @@ -124,6 +139,7 @@ typedef struct VhostUserMsg {
> >          struct vhost_vring_addr addr;
> >          VhostUserMemory memory;
> >          VhostUserLog log;
> > +        VhostUserShMemConfig shmem;
> >      } payload;
> >  } QEMU_PACKED VhostUserMsg;
> >
> > @@ -170,6 +186,7 @@ typedef struct TestServer {
> >      bool test_fail;
> >      int test_flags;
> >      int queues;
> > +    TestServerShmem shmem;
> >      struct vhost_user_ops *vu_ops;
> >  } TestServer;
> >
> > @@ -513,6 +530,31 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
> >          qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
> >                     msg.payload.state.num ? "enabled" : "disabled");
> >          break;
> > +
> > +    case VHOST_USER_GET_SHMEM_CONFIG:
> > +        if (!s->shmem.test_enabled) {
> > +            /* Reply with error if shmem feature not enabled */
> > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > +            msg.size = sizeof(uint64_t);
> > +            msg.payload.u64 = -1; /* Error */
> > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > +        } else {
> > +            /* Reply with test shmem configuration */
> > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > +            msg.size = sizeof(VhostUserShMemConfig);
> > +            msg.payload.shmem.nregions = 2; /* Test with 2 regions */
> > +            msg.payload.shmem.padding = 0;
> > +            msg.payload.shmem.memory_sizes[0] = 0x100000; /* 1MB */
> > +            msg.payload.shmem.memory_sizes[1] = 0x200000; /* 2MB */
> > +
> > +            /* Record what we're sending for test validation */
> > +            s->shmem.nregions_sent = msg.payload.shmem.nregions;
> > +            s->shmem.sizes_sent[0] = msg.payload.shmem.memory_sizes[0];
> > +            s->shmem.sizes_sent[1] = msg.payload.shmem.memory_sizes[1];
> > +
> > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > +        }
> > +        break;
> >
> >      default:
> >          qos_printf("vhost-user: un-handled message: %d\n", msg.request);
> > @@ -809,6 +851,22 @@ static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg)
> >      return server;
> >  }
> >
> > +static void *vhost_user_test_setup_shmem_config(GString *cmd_line, void *arg)
> > +{
> > +    TestServer *server = test_server_new("vhost-user-test", arg);
> > +    test_server_listen(server);
> > +
> > +    /* Enable shmem testing for this server */
> > +    server->shmem.test_enabled = true;
> > +
> > +    append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM);
> > +    server->vu_ops->append_opts(server, cmd_line, "");
> > +
> > +    g_test_queue_destroy(vhost_user_test_cleanup, server);
> > +
> > +    return server;
> > +}
> > +
> >  static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
> >  {
> >      TestServer *server = arg;
> > @@ -1089,6 +1147,33 @@ static struct vhost_user_ops g_vu_net_ops = {
> >      .get_protocol_features = vu_net_get_protocol_features,
> >  };
> >
> > +/* Test function for VHOST_USER_GET_SHMEM_CONFIG message */
> > +static void test_shmem_config(void *obj, void *arg, QGuestAllocator *alloc)
> > +{
> > +    TestServer *s = arg;
> > +
> > +    g_assert_true(s->shmem.test_enabled);
> > +
> > +    g_mutex_lock(&s->data_mutex);
> > +    s->shmem.nregions_sent = 0;
> > +    s->shmem.sizes_sent[0] = 0;
> > +    s->shmem.sizes_sent[1] = 0;
> > +    g_mutex_unlock(&s->data_mutex);
> > +
> > +    VhostUserMsg msg = {
> > +        .request = VHOST_USER_GET_SHMEM_CONFIG,
> > +        .flags = VHOST_USER_VERSION,
> > +        .size = 0,
> > +    };
> > +    chr_read(s, (uint8_t *) &msg, VHOST_USER_HDR_SIZE);
> > +
> > +    g_mutex_lock(&s->data_mutex);
> > +    g_assert_cmpint(s->shmem.nregions_sent, ==, 2);
> > +    g_assert_cmpint(s->shmem.sizes_sent[0], ==, 0x100000); /* 1MB */
> > +    g_assert_cmpint(s->shmem.sizes_sent[1], ==, 0x200000); /* 2MB */
> > +    g_mutex_unlock(&s->data_mutex);
> > +}
> > +
> >  static void register_vhost_user_test(void)
> >  {
> >      QOSGraphTestOptions opts = {
> > @@ -1136,6 +1221,12 @@ static void register_vhost_user_test(void)
> >      qos_add_test("vhost-user/multiqueue",
> >                   "virtio-net",
> >                   test_multiqueue, &opts);
> > +
> > +    opts.before = vhost_user_test_setup_shmem_config;
> > +    opts.edge.extra_device_opts = "";
> > +    qos_add_test("vhost-user/shmem-config",
> > +                 "virtio-net",
> > +                 test_shmem_config, &opts);
> >  }
> >  libqos_init(register_vhost_user_test);
> >
> > --
> > 2.49.0
> >
Re: [PATCH v7 6/8] tests/qtest: Add GET_SHMEM validation test
Posted by Stefan Hajnoczi 2 months, 3 weeks ago
On Tue, Aug 19, 2025 at 02:16:47PM +0200, Albert Esteve wrote:
> On Tue, Aug 19, 2025 at 12:42 PM Stefan Hajnoczi <stefanha@redhat.com> wrote:
> >
> > On Mon, Aug 18, 2025 at 12:03:51PM +0200, Albert Esteve wrote:
> > > Improve vhost-user-test to properly validate
> > > VHOST_USER_GET_SHMEM_CONFIG message handling by
> > > directly simulating the message exchange.
> > >
> > > The test manually triggers the
> > > VHOST_USER_GET_SHMEM_CONFIG message by calling
> > > chr_read() with a crafted VhostUserMsg, allowing direct
> > > validation of the shmem configuration response handler.
> >
> > It looks like this test case invokes its own chr_read() function without
> > going through QEMU, so I don't understand what this is testing?
> 
> I spent some time trying to test it, but in the end I could not
> instatiate vhost-user-device because it is non user_creatable. I did
> not find any test for vhost-user-device anywhere else either. But I
> had already added most of the infrastructure here so I fallback to
> chr_read() communication to avoid having to delete everything. My
> though was that once we have other devices that use shared memory,
> they could tweak the test to instantiate the proper device and test
> this and the map/unmap operations.
> 
> Although after writing this, I think other devices will actually a
> specific layout for their shared memory. So
> VHOST_USER_GET_SHMEM_CONFIG is only ever going to be used by
> vhost-user-device.
> 
> In general, trying to test this patch series has been a headache other
> than trying with external device code I have. If you have an idea that
> I could try to test this, I can try. Otherwise, probably is best to
> remove this commit from the series and wait for another vhost-user
> device that uses map/unmap to land to be able to test it.

Alex Bennee has renamed vhost-user-device to vhost-user-test-device and
set user_creatable = true:
https://lore.kernel.org/qemu-devel/20250820195632.1956795-1-alex.bennee@linaro.org/T/#t

> 
> 
> 
> >
> > >
> > > Added TestServerShmem structure to track shmem
> > > configuration state, including nregions_sent and
> > > sizes_sent arrays for comprehensive validation.
> > > The test verifies that the response contains the expected
> > > number of shared memory regions and their corresponding
> > > sizes.
> > >
> > > Signed-off-by: Albert Esteve <aesteve@redhat.com>
> > > ---
> > >  tests/qtest/vhost-user-test.c | 91 +++++++++++++++++++++++++++++++++++
> > >  1 file changed, 91 insertions(+)
> > >
> > > diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c
> > > index 75cb3e44b2..44a5e90b2e 100644
> > > --- a/tests/qtest/vhost-user-test.c
> > > +++ b/tests/qtest/vhost-user-test.c
> > > @@ -88,6 +88,7 @@ typedef enum VhostUserRequest {
> > >      VHOST_USER_SET_VRING_ENABLE = 18,
> > >      VHOST_USER_GET_CONFIG = 24,
> > >      VHOST_USER_SET_CONFIG = 25,
> > > +    VHOST_USER_GET_SHMEM_CONFIG = 44,
> > >      VHOST_USER_MAX
> > >  } VhostUserRequest;
> > >
> > > @@ -109,6 +110,20 @@ typedef struct VhostUserLog {
> > >      uint64_t mmap_offset;
> > >  } VhostUserLog;
> > >
> > > +#define VIRTIO_MAX_SHMEM_REGIONS 256
> > > +
> > > +typedef struct VhostUserShMemConfig {
> > > +    uint32_t nregions;
> > > +    uint32_t padding;
> > > +    uint64_t memory_sizes[VIRTIO_MAX_SHMEM_REGIONS];
> > > +} VhostUserShMemConfig;
> > > +
> > > +typedef struct TestServerShmem {
> > > +    bool test_enabled;
> > > +    uint32_t nregions_sent;
> > > +    uint64_t sizes_sent[VIRTIO_MAX_SHMEM_REGIONS];
> > > +} TestServerShmem;
> > > +
> > >  typedef struct VhostUserMsg {
> > >      VhostUserRequest request;
> > >
> > > @@ -124,6 +139,7 @@ typedef struct VhostUserMsg {
> > >          struct vhost_vring_addr addr;
> > >          VhostUserMemory memory;
> > >          VhostUserLog log;
> > > +        VhostUserShMemConfig shmem;
> > >      } payload;
> > >  } QEMU_PACKED VhostUserMsg;
> > >
> > > @@ -170,6 +186,7 @@ typedef struct TestServer {
> > >      bool test_fail;
> > >      int test_flags;
> > >      int queues;
> > > +    TestServerShmem shmem;
> > >      struct vhost_user_ops *vu_ops;
> > >  } TestServer;
> > >
> > > @@ -513,6 +530,31 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
> > >          qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
> > >                     msg.payload.state.num ? "enabled" : "disabled");
> > >          break;
> > > +
> > > +    case VHOST_USER_GET_SHMEM_CONFIG:
> > > +        if (!s->shmem.test_enabled) {
> > > +            /* Reply with error if shmem feature not enabled */
> > > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > > +            msg.size = sizeof(uint64_t);
> > > +            msg.payload.u64 = -1; /* Error */
> > > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > > +        } else {
> > > +            /* Reply with test shmem configuration */
> > > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > > +            msg.size = sizeof(VhostUserShMemConfig);
> > > +            msg.payload.shmem.nregions = 2; /* Test with 2 regions */
> > > +            msg.payload.shmem.padding = 0;
> > > +            msg.payload.shmem.memory_sizes[0] = 0x100000; /* 1MB */
> > > +            msg.payload.shmem.memory_sizes[1] = 0x200000; /* 2MB */
> > > +
> > > +            /* Record what we're sending for test validation */
> > > +            s->shmem.nregions_sent = msg.payload.shmem.nregions;
> > > +            s->shmem.sizes_sent[0] = msg.payload.shmem.memory_sizes[0];
> > > +            s->shmem.sizes_sent[1] = msg.payload.shmem.memory_sizes[1];
> > > +
> > > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > > +        }
> > > +        break;
> > >
> > >      default:
> > >          qos_printf("vhost-user: un-handled message: %d\n", msg.request);
> > > @@ -809,6 +851,22 @@ static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg)
> > >      return server;
> > >  }
> > >
> > > +static void *vhost_user_test_setup_shmem_config(GString *cmd_line, void *arg)
> > > +{
> > > +    TestServer *server = test_server_new("vhost-user-test", arg);
> > > +    test_server_listen(server);
> > > +
> > > +    /* Enable shmem testing for this server */
> > > +    server->shmem.test_enabled = true;
> > > +
> > > +    append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM);
> > > +    server->vu_ops->append_opts(server, cmd_line, "");
> > > +
> > > +    g_test_queue_destroy(vhost_user_test_cleanup, server);
> > > +
> > > +    return server;
> > > +}
> > > +
> > >  static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
> > >  {
> > >      TestServer *server = arg;
> > > @@ -1089,6 +1147,33 @@ static struct vhost_user_ops g_vu_net_ops = {
> > >      .get_protocol_features = vu_net_get_protocol_features,
> > >  };
> > >
> > > +/* Test function for VHOST_USER_GET_SHMEM_CONFIG message */
> > > +static void test_shmem_config(void *obj, void *arg, QGuestAllocator *alloc)
> > > +{
> > > +    TestServer *s = arg;
> > > +
> > > +    g_assert_true(s->shmem.test_enabled);
> > > +
> > > +    g_mutex_lock(&s->data_mutex);
> > > +    s->shmem.nregions_sent = 0;
> > > +    s->shmem.sizes_sent[0] = 0;
> > > +    s->shmem.sizes_sent[1] = 0;
> > > +    g_mutex_unlock(&s->data_mutex);
> > > +
> > > +    VhostUserMsg msg = {
> > > +        .request = VHOST_USER_GET_SHMEM_CONFIG,
> > > +        .flags = VHOST_USER_VERSION,
> > > +        .size = 0,
> > > +    };
> > > +    chr_read(s, (uint8_t *) &msg, VHOST_USER_HDR_SIZE);
> > > +
> > > +    g_mutex_lock(&s->data_mutex);
> > > +    g_assert_cmpint(s->shmem.nregions_sent, ==, 2);
> > > +    g_assert_cmpint(s->shmem.sizes_sent[0], ==, 0x100000); /* 1MB */
> > > +    g_assert_cmpint(s->shmem.sizes_sent[1], ==, 0x200000); /* 2MB */
> > > +    g_mutex_unlock(&s->data_mutex);
> > > +}
> > > +
> > >  static void register_vhost_user_test(void)
> > >  {
> > >      QOSGraphTestOptions opts = {
> > > @@ -1136,6 +1221,12 @@ static void register_vhost_user_test(void)
> > >      qos_add_test("vhost-user/multiqueue",
> > >                   "virtio-net",
> > >                   test_multiqueue, &opts);
> > > +
> > > +    opts.before = vhost_user_test_setup_shmem_config;
> > > +    opts.edge.extra_device_opts = "";
> > > +    qos_add_test("vhost-user/shmem-config",
> > > +                 "virtio-net",
> > > +                 test_shmem_config, &opts);
> > >  }
> > >  libqos_init(register_vhost_user_test);
> > >
> > > --
> > > 2.49.0
> > >
> 
Re: [PATCH v7 6/8] tests/qtest: Add GET_SHMEM validation test
Posted by Albert Esteve 2 months, 3 weeks ago
On Wed, Aug 20, 2025 at 10:33 PM Stefan Hajnoczi <stefanha@redhat.com> wrote:
>
> On Tue, Aug 19, 2025 at 02:16:47PM +0200, Albert Esteve wrote:
> > On Tue, Aug 19, 2025 at 12:42 PM Stefan Hajnoczi <stefanha@redhat.com> wrote:
> > >
> > > On Mon, Aug 18, 2025 at 12:03:51PM +0200, Albert Esteve wrote:
> > > > Improve vhost-user-test to properly validate
> > > > VHOST_USER_GET_SHMEM_CONFIG message handling by
> > > > directly simulating the message exchange.
> > > >
> > > > The test manually triggers the
> > > > VHOST_USER_GET_SHMEM_CONFIG message by calling
> > > > chr_read() with a crafted VhostUserMsg, allowing direct
> > > > validation of the shmem configuration response handler.
> > >
> > > It looks like this test case invokes its own chr_read() function without
> > > going through QEMU, so I don't understand what this is testing?
> >
> > I spent some time trying to test it, but in the end I could not
> > instatiate vhost-user-device because it is non user_creatable. I did
> > not find any test for vhost-user-device anywhere else either. But I
> > had already added most of the infrastructure here so I fallback to
> > chr_read() communication to avoid having to delete everything. My
> > though was that once we have other devices that use shared memory,
> > they could tweak the test to instantiate the proper device and test
> > this and the map/unmap operations.
> >
> > Although after writing this, I think other devices will actually a
> > specific layout for their shared memory. So
> > VHOST_USER_GET_SHMEM_CONFIG is only ever going to be used by
> > vhost-user-device.
> >
> > In general, trying to test this patch series has been a headache other
> > than trying with external device code I have. If you have an idea that
> > I could try to test this, I can try. Otherwise, probably is best to
> > remove this commit from the series and wait for another vhost-user
> > device that uses map/unmap to land to be able to test it.
>
> Alex Bennee has renamed vhost-user-device to vhost-user-test-device and
> set user_creatable = true:
> https://lore.kernel.org/qemu-devel/20250820195632.1956795-1-alex.bennee@linaro.org/T/#t

Oh, great! Thanks for letting me know.

That allows having a QTest with the vhost-user-test-device available
and run it in piplines if necessary, without manually
changing/recompiling. I'll try to add it to the test again in this
commit.

Thank you, Stefan and Alyssa, for the hints.

>
> >
> >
> >
> > >
> > > >
> > > > Added TestServerShmem structure to track shmem
> > > > configuration state, including nregions_sent and
> > > > sizes_sent arrays for comprehensive validation.
> > > > The test verifies that the response contains the expected
> > > > number of shared memory regions and their corresponding
> > > > sizes.
> > > >
> > > > Signed-off-by: Albert Esteve <aesteve@redhat.com>
> > > > ---
> > > >  tests/qtest/vhost-user-test.c | 91 +++++++++++++++++++++++++++++++++++
> > > >  1 file changed, 91 insertions(+)
> > > >
> > > > diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c
> > > > index 75cb3e44b2..44a5e90b2e 100644
> > > > --- a/tests/qtest/vhost-user-test.c
> > > > +++ b/tests/qtest/vhost-user-test.c
> > > > @@ -88,6 +88,7 @@ typedef enum VhostUserRequest {
> > > >      VHOST_USER_SET_VRING_ENABLE = 18,
> > > >      VHOST_USER_GET_CONFIG = 24,
> > > >      VHOST_USER_SET_CONFIG = 25,
> > > > +    VHOST_USER_GET_SHMEM_CONFIG = 44,
> > > >      VHOST_USER_MAX
> > > >  } VhostUserRequest;
> > > >
> > > > @@ -109,6 +110,20 @@ typedef struct VhostUserLog {
> > > >      uint64_t mmap_offset;
> > > >  } VhostUserLog;
> > > >
> > > > +#define VIRTIO_MAX_SHMEM_REGIONS 256
> > > > +
> > > > +typedef struct VhostUserShMemConfig {
> > > > +    uint32_t nregions;
> > > > +    uint32_t padding;
> > > > +    uint64_t memory_sizes[VIRTIO_MAX_SHMEM_REGIONS];
> > > > +} VhostUserShMemConfig;
> > > > +
> > > > +typedef struct TestServerShmem {
> > > > +    bool test_enabled;
> > > > +    uint32_t nregions_sent;
> > > > +    uint64_t sizes_sent[VIRTIO_MAX_SHMEM_REGIONS];
> > > > +} TestServerShmem;
> > > > +
> > > >  typedef struct VhostUserMsg {
> > > >      VhostUserRequest request;
> > > >
> > > > @@ -124,6 +139,7 @@ typedef struct VhostUserMsg {
> > > >          struct vhost_vring_addr addr;
> > > >          VhostUserMemory memory;
> > > >          VhostUserLog log;
> > > > +        VhostUserShMemConfig shmem;
> > > >      } payload;
> > > >  } QEMU_PACKED VhostUserMsg;
> > > >
> > > > @@ -170,6 +186,7 @@ typedef struct TestServer {
> > > >      bool test_fail;
> > > >      int test_flags;
> > > >      int queues;
> > > > +    TestServerShmem shmem;
> > > >      struct vhost_user_ops *vu_ops;
> > > >  } TestServer;
> > > >
> > > > @@ -513,6 +530,31 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
> > > >          qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
> > > >                     msg.payload.state.num ? "enabled" : "disabled");
> > > >          break;
> > > > +
> > > > +    case VHOST_USER_GET_SHMEM_CONFIG:
> > > > +        if (!s->shmem.test_enabled) {
> > > > +            /* Reply with error if shmem feature not enabled */
> > > > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > > > +            msg.size = sizeof(uint64_t);
> > > > +            msg.payload.u64 = -1; /* Error */
> > > > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > > > +        } else {
> > > > +            /* Reply with test shmem configuration */
> > > > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > > > +            msg.size = sizeof(VhostUserShMemConfig);
> > > > +            msg.payload.shmem.nregions = 2; /* Test with 2 regions */
> > > > +            msg.payload.shmem.padding = 0;
> > > > +            msg.payload.shmem.memory_sizes[0] = 0x100000; /* 1MB */
> > > > +            msg.payload.shmem.memory_sizes[1] = 0x200000; /* 2MB */
> > > > +
> > > > +            /* Record what we're sending for test validation */
> > > > +            s->shmem.nregions_sent = msg.payload.shmem.nregions;
> > > > +            s->shmem.sizes_sent[0] = msg.payload.shmem.memory_sizes[0];
> > > > +            s->shmem.sizes_sent[1] = msg.payload.shmem.memory_sizes[1];
> > > > +
> > > > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > > > +        }
> > > > +        break;
> > > >
> > > >      default:
> > > >          qos_printf("vhost-user: un-handled message: %d\n", msg.request);
> > > > @@ -809,6 +851,22 @@ static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg)
> > > >      return server;
> > > >  }
> > > >
> > > > +static void *vhost_user_test_setup_shmem_config(GString *cmd_line, void *arg)
> > > > +{
> > > > +    TestServer *server = test_server_new("vhost-user-test", arg);
> > > > +    test_server_listen(server);
> > > > +
> > > > +    /* Enable shmem testing for this server */
> > > > +    server->shmem.test_enabled = true;
> > > > +
> > > > +    append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM);
> > > > +    server->vu_ops->append_opts(server, cmd_line, "");
> > > > +
> > > > +    g_test_queue_destroy(vhost_user_test_cleanup, server);
> > > > +
> > > > +    return server;
> > > > +}
> > > > +
> > > >  static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
> > > >  {
> > > >      TestServer *server = arg;
> > > > @@ -1089,6 +1147,33 @@ static struct vhost_user_ops g_vu_net_ops = {
> > > >      .get_protocol_features = vu_net_get_protocol_features,
> > > >  };
> > > >
> > > > +/* Test function for VHOST_USER_GET_SHMEM_CONFIG message */
> > > > +static void test_shmem_config(void *obj, void *arg, QGuestAllocator *alloc)
> > > > +{
> > > > +    TestServer *s = arg;
> > > > +
> > > > +    g_assert_true(s->shmem.test_enabled);
> > > > +
> > > > +    g_mutex_lock(&s->data_mutex);
> > > > +    s->shmem.nregions_sent = 0;
> > > > +    s->shmem.sizes_sent[0] = 0;
> > > > +    s->shmem.sizes_sent[1] = 0;
> > > > +    g_mutex_unlock(&s->data_mutex);
> > > > +
> > > > +    VhostUserMsg msg = {
> > > > +        .request = VHOST_USER_GET_SHMEM_CONFIG,
> > > > +        .flags = VHOST_USER_VERSION,
> > > > +        .size = 0,
> > > > +    };
> > > > +    chr_read(s, (uint8_t *) &msg, VHOST_USER_HDR_SIZE);
> > > > +
> > > > +    g_mutex_lock(&s->data_mutex);
> > > > +    g_assert_cmpint(s->shmem.nregions_sent, ==, 2);
> > > > +    g_assert_cmpint(s->shmem.sizes_sent[0], ==, 0x100000); /* 1MB */
> > > > +    g_assert_cmpint(s->shmem.sizes_sent[1], ==, 0x200000); /* 2MB */
> > > > +    g_mutex_unlock(&s->data_mutex);
> > > > +}
> > > > +
> > > >  static void register_vhost_user_test(void)
> > > >  {
> > > >      QOSGraphTestOptions opts = {
> > > > @@ -1136,6 +1221,12 @@ static void register_vhost_user_test(void)
> > > >      qos_add_test("vhost-user/multiqueue",
> > > >                   "virtio-net",
> > > >                   test_multiqueue, &opts);
> > > > +
> > > > +    opts.before = vhost_user_test_setup_shmem_config;
> > > > +    opts.edge.extra_device_opts = "";
> > > > +    qos_add_test("vhost-user/shmem-config",
> > > > +                 "virtio-net",
> > > > +                 test_shmem_config, &opts);
> > > >  }
> > > >  libqos_init(register_vhost_user_test);
> > > >
> > > > --
> > > > 2.49.0
> > > >
> >
Re: [PATCH v7 6/8] tests/qtest: Add GET_SHMEM validation test
Posted by Albert Esteve 2 months ago
On Thu, Aug 21, 2025 at 8:50 AM Albert Esteve <aesteve@redhat.com> wrote:
>
> On Wed, Aug 20, 2025 at 10:33 PM Stefan Hajnoczi <stefanha@redhat.com> wrote:
> >
> > On Tue, Aug 19, 2025 at 02:16:47PM +0200, Albert Esteve wrote:
> > > On Tue, Aug 19, 2025 at 12:42 PM Stefan Hajnoczi <stefanha@redhat.com> wrote:
> > > >
> > > > On Mon, Aug 18, 2025 at 12:03:51PM +0200, Albert Esteve wrote:
> > > > > Improve vhost-user-test to properly validate
> > > > > VHOST_USER_GET_SHMEM_CONFIG message handling by
> > > > > directly simulating the message exchange.
> > > > >
> > > > > The test manually triggers the
> > > > > VHOST_USER_GET_SHMEM_CONFIG message by calling
> > > > > chr_read() with a crafted VhostUserMsg, allowing direct
> > > > > validation of the shmem configuration response handler.
> > > >
> > > > It looks like this test case invokes its own chr_read() function without
> > > > going through QEMU, so I don't understand what this is testing?
> > >
> > > I spent some time trying to test it, but in the end I could not
> > > instatiate vhost-user-device because it is non user_creatable. I did
> > > not find any test for vhost-user-device anywhere else either. But I
> > > had already added most of the infrastructure here so I fallback to
> > > chr_read() communication to avoid having to delete everything. My
> > > though was that once we have other devices that use shared memory,
> > > they could tweak the test to instantiate the proper device and test
> > > this and the map/unmap operations.
> > >
> > > Although after writing this, I think other devices will actually a
> > > specific layout for their shared memory. So
> > > VHOST_USER_GET_SHMEM_CONFIG is only ever going to be used by
> > > vhost-user-device.
> > >
> > > In general, trying to test this patch series has been a headache other
> > > than trying with external device code I have. If you have an idea that
> > > I could try to test this, I can try. Otherwise, probably is best to
> > > remove this commit from the series and wait for another vhost-user
> > > device that uses map/unmap to land to be able to test it.
> >
> > Alex Bennee has renamed vhost-user-device to vhost-user-test-device and
> > set user_creatable = true:
> > https://lore.kernel.org/qemu-devel/20250820195632.1956795-1-alex.bennee@linaro.org/T/#t
>
> Oh, great! Thanks for letting me know.
>
> That allows having a QTest with the vhost-user-test-device available
> and run it in piplines if necessary, without manually
> changing/recompiling. I'll try to add it to the test again in this
> commit.
>
> Thank you, Stefan and Alyssa, for the hints.

Hi,

I wanted to make a note before sending the next version. I have been
trying to test it by forcing user_creatable to true locally while the
other PATCH lands. But it will need more work, and I do not want to
delay the new version much further. Thus, I will remove this commit
from the next version and keep working locally.

>
> >
> > >
> > >
> > >
> > > >
> > > > >
> > > > > Added TestServerShmem structure to track shmem
> > > > > configuration state, including nregions_sent and
> > > > > sizes_sent arrays for comprehensive validation.
> > > > > The test verifies that the response contains the expected
> > > > > number of shared memory regions and their corresponding
> > > > > sizes.
> > > > >
> > > > > Signed-off-by: Albert Esteve <aesteve@redhat.com>
> > > > > ---
> > > > >  tests/qtest/vhost-user-test.c | 91 +++++++++++++++++++++++++++++++++++
> > > > >  1 file changed, 91 insertions(+)
> > > > >
> > > > > diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c
> > > > > index 75cb3e44b2..44a5e90b2e 100644
> > > > > --- a/tests/qtest/vhost-user-test.c
> > > > > +++ b/tests/qtest/vhost-user-test.c
> > > > > @@ -88,6 +88,7 @@ typedef enum VhostUserRequest {
> > > > >      VHOST_USER_SET_VRING_ENABLE = 18,
> > > > >      VHOST_USER_GET_CONFIG = 24,
> > > > >      VHOST_USER_SET_CONFIG = 25,
> > > > > +    VHOST_USER_GET_SHMEM_CONFIG = 44,
> > > > >      VHOST_USER_MAX
> > > > >  } VhostUserRequest;
> > > > >
> > > > > @@ -109,6 +110,20 @@ typedef struct VhostUserLog {
> > > > >      uint64_t mmap_offset;
> > > > >  } VhostUserLog;
> > > > >
> > > > > +#define VIRTIO_MAX_SHMEM_REGIONS 256
> > > > > +
> > > > > +typedef struct VhostUserShMemConfig {
> > > > > +    uint32_t nregions;
> > > > > +    uint32_t padding;
> > > > > +    uint64_t memory_sizes[VIRTIO_MAX_SHMEM_REGIONS];
> > > > > +} VhostUserShMemConfig;
> > > > > +
> > > > > +typedef struct TestServerShmem {
> > > > > +    bool test_enabled;
> > > > > +    uint32_t nregions_sent;
> > > > > +    uint64_t sizes_sent[VIRTIO_MAX_SHMEM_REGIONS];
> > > > > +} TestServerShmem;
> > > > > +
> > > > >  typedef struct VhostUserMsg {
> > > > >      VhostUserRequest request;
> > > > >
> > > > > @@ -124,6 +139,7 @@ typedef struct VhostUserMsg {
> > > > >          struct vhost_vring_addr addr;
> > > > >          VhostUserMemory memory;
> > > > >          VhostUserLog log;
> > > > > +        VhostUserShMemConfig shmem;
> > > > >      } payload;
> > > > >  } QEMU_PACKED VhostUserMsg;
> > > > >
> > > > > @@ -170,6 +186,7 @@ typedef struct TestServer {
> > > > >      bool test_fail;
> > > > >      int test_flags;
> > > > >      int queues;
> > > > > +    TestServerShmem shmem;
> > > > >      struct vhost_user_ops *vu_ops;
> > > > >  } TestServer;
> > > > >
> > > > > @@ -513,6 +530,31 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
> > > > >          qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
> > > > >                     msg.payload.state.num ? "enabled" : "disabled");
> > > > >          break;
> > > > > +
> > > > > +    case VHOST_USER_GET_SHMEM_CONFIG:
> > > > > +        if (!s->shmem.test_enabled) {
> > > > > +            /* Reply with error if shmem feature not enabled */
> > > > > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > > > > +            msg.size = sizeof(uint64_t);
> > > > > +            msg.payload.u64 = -1; /* Error */
> > > > > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > > > > +        } else {
> > > > > +            /* Reply with test shmem configuration */
> > > > > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > > > > +            msg.size = sizeof(VhostUserShMemConfig);
> > > > > +            msg.payload.shmem.nregions = 2; /* Test with 2 regions */
> > > > > +            msg.payload.shmem.padding = 0;
> > > > > +            msg.payload.shmem.memory_sizes[0] = 0x100000; /* 1MB */
> > > > > +            msg.payload.shmem.memory_sizes[1] = 0x200000; /* 2MB */
> > > > > +
> > > > > +            /* Record what we're sending for test validation */
> > > > > +            s->shmem.nregions_sent = msg.payload.shmem.nregions;
> > > > > +            s->shmem.sizes_sent[0] = msg.payload.shmem.memory_sizes[0];
> > > > > +            s->shmem.sizes_sent[1] = msg.payload.shmem.memory_sizes[1];
> > > > > +
> > > > > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > > > > +        }
> > > > > +        break;
> > > > >
> > > > >      default:
> > > > >          qos_printf("vhost-user: un-handled message: %d\n", msg.request);
> > > > > @@ -809,6 +851,22 @@ static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg)
> > > > >      return server;
> > > > >  }
> > > > >
> > > > > +static void *vhost_user_test_setup_shmem_config(GString *cmd_line, void *arg)
> > > > > +{
> > > > > +    TestServer *server = test_server_new("vhost-user-test", arg);
> > > > > +    test_server_listen(server);
> > > > > +
> > > > > +    /* Enable shmem testing for this server */
> > > > > +    server->shmem.test_enabled = true;
> > > > > +
> > > > > +    append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM);
> > > > > +    server->vu_ops->append_opts(server, cmd_line, "");
> > > > > +
> > > > > +    g_test_queue_destroy(vhost_user_test_cleanup, server);
> > > > > +
> > > > > +    return server;
> > > > > +}
> > > > > +
> > > > >  static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
> > > > >  {
> > > > >      TestServer *server = arg;
> > > > > @@ -1089,6 +1147,33 @@ static struct vhost_user_ops g_vu_net_ops = {
> > > > >      .get_protocol_features = vu_net_get_protocol_features,
> > > > >  };
> > > > >
> > > > > +/* Test function for VHOST_USER_GET_SHMEM_CONFIG message */
> > > > > +static void test_shmem_config(void *obj, void *arg, QGuestAllocator *alloc)
> > > > > +{
> > > > > +    TestServer *s = arg;
> > > > > +
> > > > > +    g_assert_true(s->shmem.test_enabled);
> > > > > +
> > > > > +    g_mutex_lock(&s->data_mutex);
> > > > > +    s->shmem.nregions_sent = 0;
> > > > > +    s->shmem.sizes_sent[0] = 0;
> > > > > +    s->shmem.sizes_sent[1] = 0;
> > > > > +    g_mutex_unlock(&s->data_mutex);
> > > > > +
> > > > > +    VhostUserMsg msg = {
> > > > > +        .request = VHOST_USER_GET_SHMEM_CONFIG,
> > > > > +        .flags = VHOST_USER_VERSION,
> > > > > +        .size = 0,
> > > > > +    };
> > > > > +    chr_read(s, (uint8_t *) &msg, VHOST_USER_HDR_SIZE);
> > > > > +
> > > > > +    g_mutex_lock(&s->data_mutex);
> > > > > +    g_assert_cmpint(s->shmem.nregions_sent, ==, 2);
> > > > > +    g_assert_cmpint(s->shmem.sizes_sent[0], ==, 0x100000); /* 1MB */
> > > > > +    g_assert_cmpint(s->shmem.sizes_sent[1], ==, 0x200000); /* 2MB */
> > > > > +    g_mutex_unlock(&s->data_mutex);
> > > > > +}
> > > > > +
> > > > >  static void register_vhost_user_test(void)
> > > > >  {
> > > > >      QOSGraphTestOptions opts = {
> > > > > @@ -1136,6 +1221,12 @@ static void register_vhost_user_test(void)
> > > > >      qos_add_test("vhost-user/multiqueue",
> > > > >                   "virtio-net",
> > > > >                   test_multiqueue, &opts);
> > > > > +
> > > > > +    opts.before = vhost_user_test_setup_shmem_config;
> > > > > +    opts.edge.extra_device_opts = "";
> > > > > +    qos_add_test("vhost-user/shmem-config",
> > > > > +                 "virtio-net",
> > > > > +                 test_shmem_config, &opts);
> > > > >  }
> > > > >  libqos_init(register_vhost_user_test);
> > > > >
> > > > > --
> > > > > 2.49.0
> > > > >
> > >
Re: [PATCH v7 6/8] tests/qtest: Add GET_SHMEM validation test
Posted by Stefan Hajnoczi 2 months, 3 weeks ago
On Tue, Aug 19, 2025 at 02:16:47PM +0200, Albert Esteve wrote:
> On Tue, Aug 19, 2025 at 12:42 PM Stefan Hajnoczi <stefanha@redhat.com> wrote:
> >
> > On Mon, Aug 18, 2025 at 12:03:51PM +0200, Albert Esteve wrote:
> > > Improve vhost-user-test to properly validate
> > > VHOST_USER_GET_SHMEM_CONFIG message handling by
> > > directly simulating the message exchange.
> > >
> > > The test manually triggers the
> > > VHOST_USER_GET_SHMEM_CONFIG message by calling
> > > chr_read() with a crafted VhostUserMsg, allowing direct
> > > validation of the shmem configuration response handler.
> >
> > It looks like this test case invokes its own chr_read() function without
> > going through QEMU, so I don't understand what this is testing?
> 
> I spent some time trying to test it, but in the end I could not
> instatiate vhost-user-device because it is non user_creatable. I did
> not find any test for vhost-user-device anywhere else either. But I

My understanding is that you need to manually comment out user_creatable
= false in the QEMU source code and recompile. This choice was made
because users were confused with the vhost-user-device and this is an
attempt to hide the device. However, it is a useful device for testing.

> had already added most of the infrastructure here so I fallback to
> chr_read() communication to avoid having to delete everything. My
> though was that once we have other devices that use shared memory,
> they could tweak the test to instantiate the proper device and test
> this and the map/unmap operations.
> 
> Although after writing this, I think other devices will actually a
> specific layout for their shared memory. So
> VHOST_USER_GET_SHMEM_CONFIG is only ever going to be used by
> vhost-user-device.
> 
> In general, trying to test this patch series has been a headache other
> than trying with external device code I have. If you have an idea that
> I could try to test this, I can try. Otherwise, probably is best to
> remove this commit from the series and wait for another vhost-user
> device that uses map/unmap to land to be able to test it.

Here is an idea:
- Extend vhost-user-test.c's chr_read() to handle GET_SHMEM_CONFIG.
- Add -device vhost-user-test-device (which inherits from
  vhost-user-device but sets user_creatable to true) to QEMU.
- Add a shmem test case to vhost-user-test.c that instantiates
  vhost-user-test-device, sends MAP/UNMAP messages, and verifies that
  qtest memory qtest_readb()/qtest_writeb() are able to modify the
  shared memory as intended.

That means vhost-user-test.c provides the vhost-user backend and QEMU
uses vhost-user-test-device to connect. The test case uses qtest to
control QEMU and check that the VIRTIO Shared Memory Regions behave as
intended.

Stefan

> 
> 
> 
> >
> > >
> > > Added TestServerShmem structure to track shmem
> > > configuration state, including nregions_sent and
> > > sizes_sent arrays for comprehensive validation.
> > > The test verifies that the response contains the expected
> > > number of shared memory regions and their corresponding
> > > sizes.
> > >
> > > Signed-off-by: Albert Esteve <aesteve@redhat.com>
> > > ---
> > >  tests/qtest/vhost-user-test.c | 91 +++++++++++++++++++++++++++++++++++
> > >  1 file changed, 91 insertions(+)
> > >
> > > diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c
> > > index 75cb3e44b2..44a5e90b2e 100644
> > > --- a/tests/qtest/vhost-user-test.c
> > > +++ b/tests/qtest/vhost-user-test.c
> > > @@ -88,6 +88,7 @@ typedef enum VhostUserRequest {
> > >      VHOST_USER_SET_VRING_ENABLE = 18,
> > >      VHOST_USER_GET_CONFIG = 24,
> > >      VHOST_USER_SET_CONFIG = 25,
> > > +    VHOST_USER_GET_SHMEM_CONFIG = 44,
> > >      VHOST_USER_MAX
> > >  } VhostUserRequest;
> > >
> > > @@ -109,6 +110,20 @@ typedef struct VhostUserLog {
> > >      uint64_t mmap_offset;
> > >  } VhostUserLog;
> > >
> > > +#define VIRTIO_MAX_SHMEM_REGIONS 256
> > > +
> > > +typedef struct VhostUserShMemConfig {
> > > +    uint32_t nregions;
> > > +    uint32_t padding;
> > > +    uint64_t memory_sizes[VIRTIO_MAX_SHMEM_REGIONS];
> > > +} VhostUserShMemConfig;
> > > +
> > > +typedef struct TestServerShmem {
> > > +    bool test_enabled;
> > > +    uint32_t nregions_sent;
> > > +    uint64_t sizes_sent[VIRTIO_MAX_SHMEM_REGIONS];
> > > +} TestServerShmem;
> > > +
> > >  typedef struct VhostUserMsg {
> > >      VhostUserRequest request;
> > >
> > > @@ -124,6 +139,7 @@ typedef struct VhostUserMsg {
> > >          struct vhost_vring_addr addr;
> > >          VhostUserMemory memory;
> > >          VhostUserLog log;
> > > +        VhostUserShMemConfig shmem;
> > >      } payload;
> > >  } QEMU_PACKED VhostUserMsg;
> > >
> > > @@ -170,6 +186,7 @@ typedef struct TestServer {
> > >      bool test_fail;
> > >      int test_flags;
> > >      int queues;
> > > +    TestServerShmem shmem;
> > >      struct vhost_user_ops *vu_ops;
> > >  } TestServer;
> > >
> > > @@ -513,6 +530,31 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
> > >          qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
> > >                     msg.payload.state.num ? "enabled" : "disabled");
> > >          break;
> > > +
> > > +    case VHOST_USER_GET_SHMEM_CONFIG:
> > > +        if (!s->shmem.test_enabled) {
> > > +            /* Reply with error if shmem feature not enabled */
> > > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > > +            msg.size = sizeof(uint64_t);
> > > +            msg.payload.u64 = -1; /* Error */
> > > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > > +        } else {
> > > +            /* Reply with test shmem configuration */
> > > +            msg.flags |= VHOST_USER_REPLY_MASK;
> > > +            msg.size = sizeof(VhostUserShMemConfig);
> > > +            msg.payload.shmem.nregions = 2; /* Test with 2 regions */
> > > +            msg.payload.shmem.padding = 0;
> > > +            msg.payload.shmem.memory_sizes[0] = 0x100000; /* 1MB */
> > > +            msg.payload.shmem.memory_sizes[1] = 0x200000; /* 2MB */
> > > +
> > > +            /* Record what we're sending for test validation */
> > > +            s->shmem.nregions_sent = msg.payload.shmem.nregions;
> > > +            s->shmem.sizes_sent[0] = msg.payload.shmem.memory_sizes[0];
> > > +            s->shmem.sizes_sent[1] = msg.payload.shmem.memory_sizes[1];
> > > +
> > > +            qemu_chr_fe_write_all(chr, (uint8_t *) &msg, VHOST_USER_HDR_SIZE + msg.size);
> > > +        }
> > > +        break;
> > >
> > >      default:
> > >          qos_printf("vhost-user: un-handled message: %d\n", msg.request);
> > > @@ -809,6 +851,22 @@ static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg)
> > >      return server;
> > >  }
> > >
> > > +static void *vhost_user_test_setup_shmem_config(GString *cmd_line, void *arg)
> > > +{
> > > +    TestServer *server = test_server_new("vhost-user-test", arg);
> > > +    test_server_listen(server);
> > > +
> > > +    /* Enable shmem testing for this server */
> > > +    server->shmem.test_enabled = true;
> > > +
> > > +    append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM);
> > > +    server->vu_ops->append_opts(server, cmd_line, "");
> > > +
> > > +    g_test_queue_destroy(vhost_user_test_cleanup, server);
> > > +
> > > +    return server;
> > > +}
> > > +
> > >  static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
> > >  {
> > >      TestServer *server = arg;
> > > @@ -1089,6 +1147,33 @@ static struct vhost_user_ops g_vu_net_ops = {
> > >      .get_protocol_features = vu_net_get_protocol_features,
> > >  };
> > >
> > > +/* Test function for VHOST_USER_GET_SHMEM_CONFIG message */
> > > +static void test_shmem_config(void *obj, void *arg, QGuestAllocator *alloc)
> > > +{
> > > +    TestServer *s = arg;
> > > +
> > > +    g_assert_true(s->shmem.test_enabled);
> > > +
> > > +    g_mutex_lock(&s->data_mutex);
> > > +    s->shmem.nregions_sent = 0;
> > > +    s->shmem.sizes_sent[0] = 0;
> > > +    s->shmem.sizes_sent[1] = 0;
> > > +    g_mutex_unlock(&s->data_mutex);
> > > +
> > > +    VhostUserMsg msg = {
> > > +        .request = VHOST_USER_GET_SHMEM_CONFIG,
> > > +        .flags = VHOST_USER_VERSION,
> > > +        .size = 0,
> > > +    };
> > > +    chr_read(s, (uint8_t *) &msg, VHOST_USER_HDR_SIZE);
> > > +
> > > +    g_mutex_lock(&s->data_mutex);
> > > +    g_assert_cmpint(s->shmem.nregions_sent, ==, 2);
> > > +    g_assert_cmpint(s->shmem.sizes_sent[0], ==, 0x100000); /* 1MB */
> > > +    g_assert_cmpint(s->shmem.sizes_sent[1], ==, 0x200000); /* 2MB */
> > > +    g_mutex_unlock(&s->data_mutex);
> > > +}
> > > +
> > >  static void register_vhost_user_test(void)
> > >  {
> > >      QOSGraphTestOptions opts = {
> > > @@ -1136,6 +1221,12 @@ static void register_vhost_user_test(void)
> > >      qos_add_test("vhost-user/multiqueue",
> > >                   "virtio-net",
> > >                   test_multiqueue, &opts);
> > > +
> > > +    opts.before = vhost_user_test_setup_shmem_config;
> > > +    opts.edge.extra_device_opts = "";
> > > +    qos_add_test("vhost-user/shmem-config",
> > > +                 "virtio-net",
> > > +                 test_shmem_config, &opts);
> > >  }
> > >  libqos_init(register_vhost_user_test);
> > >
> > > --
> > > 2.49.0
> > >
> 
Re: [PATCH v7 6/8] tests/qtest: Add GET_SHMEM validation test
Posted by Alyssa Ross 2 months, 3 weeks ago
Albert Esteve <aesteve@redhat.com> writes:

> On Tue, Aug 19, 2025 at 12:42 PM Stefan Hajnoczi <stefanha@redhat.com> wrote:
>>
>> On Mon, Aug 18, 2025 at 12:03:51PM +0200, Albert Esteve wrote:
>> > Improve vhost-user-test to properly validate
>> > VHOST_USER_GET_SHMEM_CONFIG message handling by
>> > directly simulating the message exchange.
>> >
>> > The test manually triggers the
>> > VHOST_USER_GET_SHMEM_CONFIG message by calling
>> > chr_read() with a crafted VhostUserMsg, allowing direct
>> > validation of the shmem configuration response handler.
>>
>> It looks like this test case invokes its own chr_read() function without
>> going through QEMU, so I don't understand what this is testing?
>
> I spent some time trying to test it, but in the end I could not
> instatiate vhost-user-device because it is non user_creatable. I did
> not find any test for vhost-user-device anywhere else either. But I
> had already added most of the infrastructure here so I fallback to
> chr_read() communication to avoid having to delete everything. My
> though was that once we have other devices that use shared memory,
> they could tweak the test to instantiate the proper device and test
> this and the map/unmap operations.
>
> Although after writing this, I think other devices will actually a
> specific layout for their shared memory. So
> VHOST_USER_GET_SHMEM_CONFIG is only ever going to be used by
> vhost-user-device.

FWIW: I'm not so sure — my non-upstream Cloud Hypervisor frontend for
the crosvm vhost-user GPU device[1] uses the equivalent of
VHOST_USER_GET_SHMEM_CONFIG to allow the backend to choose the size of
the shared memory region, and I could imagine that being something other
devices might want to do too?

[1]: https://spectrum-os.org/software/cloud-hypervisor/

> In general, trying to test this patch series has been a headache other
> than trying with external device code I have. If you have an idea that
> I could try to test this, I can try. Otherwise, probably is best to
> remove this commit from the series and wait for another vhost-user
> device that uses map/unmap to land to be able to test it.