[PATCH v7 6/6] tests/tcg/multiarch: add test for plugin memory access

Pierrick Bouvier posted 6 patches 1 month, 3 weeks ago
There is a newer version of this series
[PATCH v7 6/6] tests/tcg/multiarch: add test for plugin memory access
Posted by Pierrick Bouvier 1 month, 3 weeks ago
Add an explicit test to check expected memory values are read/written.
8,16,32 load/store are tested for all arch.
64,128 load/store are tested for aarch64/x64.
atomic operations (8,16,32,64) are tested for x64 only.

By default, atomic accesses are non atomic if a single cpu is running,
so we force creation of a second one by creating a new thread first.

load/store helpers code path can't be triggered easily in user mode (no
softmmu), so we can't test it here.

Output of test-plugin-mem-access.c is the list of expected patterns in
plugin output. By reading stdout, we can compare to plugins output and
have a multiarch test.

Can be run with:
make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so

Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
 tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
 tests/tcg/multiarch/Makefile.target           |   7 +
 .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
 3 files changed, 212 insertions(+)
 create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
 create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh

diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
new file mode 100644
index 00000000000..09d1fa22e35
--- /dev/null
+++ b/tests/tcg/multiarch/test-plugin-mem-access.c
@@ -0,0 +1,175 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Check if we detect all memory accesses expected using plugin API.
+ * Used in conjunction with ./check-plugin-mem-access.sh check script.
+ * Output of this program is the list of patterns expected in plugin output.
+ *
+ * 8,16,32 load/store are tested for all arch.
+ * 64,128 load/store are tested for aarch64/x64.
+ * atomic operations (8,16,32,64) are tested for x64 only.
+ */
+
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#if defined(__x86_64__)
+#include <emmintrin.h>
+#elif defined(__aarch64__)
+#include <arm_neon.h>
+#endif /* __x86_64__ */
+
+static void *data;
+
+/* ,store_u8,.*,8,store,0xf1 */
+#define PRINT_EXPECTED(function, type, value, action)                 \
+do {                                                                  \
+    printf(",%s,.*,%d,%s,%s\n",                                       \
+           #function, (int) sizeof(type) * 8, action, value);         \
+}                                                                     \
+while (0)
+
+#define DEFINE_STORE(name, type, value)                  \
+                                                         \
+static void print_expected_store_##name(void)            \
+{                                                        \
+    PRINT_EXPECTED(store_##name, type, #value, "store"); \
+}                                                        \
+                                                         \
+static void store_##name(void)                           \
+{                                                        \
+    *((type *)data) = value;                             \
+    print_expected_store_##name();                       \
+}
+
+#define DEFINE_ATOMIC_OP(name, type, value)                    \
+                                                               \
+static void print_expected_atomic_op_##name(void)              \
+{                                                              \
+    PRINT_EXPECTED(atomic_op_##name, type, "0x0*42", "load");  \
+    PRINT_EXPECTED(atomic_op_##name, type, #value, "store");   \
+}                                                              \
+                                                               \
+static void atomic_op_##name(void)                             \
+{                                                              \
+    *((type *)data) = 0x42;                                    \
+    __sync_val_compare_and_swap((type *)data, 0x42, value);    \
+    print_expected_atomic_op_##name();                         \
+}
+
+#define DEFINE_LOAD(name, type, value)                  \
+                                                        \
+static void print_expected_load_##name(void)            \
+{                                                       \
+    PRINT_EXPECTED(load_##name, type, #value, "load");  \
+}                                                       \
+                                                        \
+static void load_##name(void)                           \
+{                                                       \
+    type src = *((type *) data);                        \
+    type dest = src;                                    \
+    (void)src, (void)dest;                              \
+    print_expected_load_##name();                       \
+}
+
+DEFINE_STORE(u8, uint8_t, 0xf1)
+DEFINE_LOAD(u8, uint8_t, 0xf1)
+DEFINE_STORE(u16, uint16_t, 0xf123)
+DEFINE_LOAD(u16, uint16_t, 0xf123)
+DEFINE_STORE(u32, uint32_t, 0xff112233)
+DEFINE_LOAD(u32, uint32_t, 0xff112233)
+
+#if defined(__x86_64__) || defined(__aarch64__)
+DEFINE_STORE(u64, uint64_t, 0xf123456789abcdef)
+DEFINE_LOAD(u64, uint64_t, 0xf123456789abcdef)
+
+static void print_expected_store_u128(void)
+{
+    PRINT_EXPECTED(store_u128, __int128,
+                   "0xf122334455667788f123456789abcdef", "store");
+}
+
+static void store_u128(void)
+{
+#ifdef __x86_64__
+    _mm_store_si128(data, _mm_set_epi32(0xf1223344, 0x55667788,
+                                        0xf1234567, 0x89abcdef));
+#else
+    const uint32_t init[4] = {0x89abcdef, 0xf1234567, 0x55667788, 0xf1223344};
+    uint32x4_t vec = vld1q_u32(init);
+    vst1q_u32(data, vec);
+#endif /* __x86_64__ */
+    print_expected_store_u128();
+}
+
+static void print_expected_load_u128(void)
+{
+    PRINT_EXPECTED(load_u128, __int128,
+                   "0xf122334455667788f123456789abcdef", "load");
+}
+
+static void load_u128(void)
+{
+#ifdef __x86_64__
+    __m128i var = _mm_load_si128(data);
+#else
+    uint32x4_t var = vld1q_u32(data);
+#endif
+    (void) var;
+    print_expected_load_u128();
+}
+#endif /* __x86_64__ || __aarch64__ */
+
+#if defined(__x86_64__)
+DEFINE_ATOMIC_OP(u8, uint8_t, 0xf1)
+DEFINE_ATOMIC_OP(u16, uint16_t, 0xf123)
+DEFINE_ATOMIC_OP(u32, uint32_t, 0xff112233)
+DEFINE_ATOMIC_OP(u64, uint64_t, 0xf123456789abcdef)
+#endif /* __x86_64__ */
+
+static void *f(void *p)
+{
+    return NULL;
+}
+
+int main(void)
+{
+    /*
+     * We force creation of a second thread to enable cpu flag CF_PARALLEL.
+     * This will generate atomic operations when needed.
+     */
+    pthread_t thread;
+    pthread_create(&thread, NULL, &f, NULL);
+    pthread_join(thread, NULL);
+
+    /* allocate storage up to 128 bits */
+    data = malloc(16);
+
+    store_u8();
+    load_u8();
+
+    store_u16();
+    load_u16();
+
+    store_u32();
+    load_u32();
+
+#if defined(__x86_64__) || defined(__aarch64__)
+    store_u64();
+    load_u64();
+
+    store_u128();
+    load_u128();
+#endif /* __x86_64__ || __aarch64__ */
+
+#if defined(__x86_64__)
+    atomic_op_u8();
+    atomic_op_u16();
+    atomic_op_u32();
+    atomic_op_u64();
+#endif /* __x86_64__ */
+
+    free(data);
+}
diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
index 5e3391ec9d2..d90cbd3e521 100644
--- a/tests/tcg/multiarch/Makefile.target
+++ b/tests/tcg/multiarch/Makefile.target
@@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
 TESTS += semihosting semiconsole
 endif
 
+# Test plugin memory access instrumentation
+run-plugin-test-plugin-mem-access-with-libmem.so: \
+	PLUGIN_ARGS=$(COMMA)print-accesses=true
+run-plugin-test-plugin-mem-access-with-libmem.so: \
+	CHECK_PLUGIN_OUTPUT_COMMAND= \
+	$(SRC_PATH)/tests/tcg/multiarch/check-plugin-mem-access.sh
+
 # Update TESTS
 TESTS += $(MULTIARCH_TESTS)
diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
new file mode 100755
index 00000000000..909606943bb
--- /dev/null
+++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+die()
+{
+    echo "$@" 1>&2
+    exit 1
+}
+
+check()
+{
+    file=$1
+    pattern=$2
+    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
+}
+
+[ $# -eq 1 ] || die "usage: plugin_out_file"
+
+plugin_out=$1
+
+expected()
+{
+    ./test-plugin-mem-access ||
+        die "running test-plugin-mem-access executable failed"
+}
+
+expected | while read line; do
+    check "$plugin_out" "$line"
+done
-- 
2.39.2
Re: [PATCH v7 6/6] tests/tcg/multiarch: add test for plugin memory access
Posted by Alex Bennée 1 week, 5 days ago
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:

> Add an explicit test to check expected memory values are read/written.
> 8,16,32 load/store are tested for all arch.
> 64,128 load/store are tested for aarch64/x64.
> atomic operations (8,16,32,64) are tested for x64 only.
>
> By default, atomic accesses are non atomic if a single cpu is running,
> so we force creation of a second one by creating a new thread first.
>
> load/store helpers code path can't be triggered easily in user mode (no
> softmmu), so we can't test it here.
>
> Output of test-plugin-mem-access.c is the list of expected patterns in
> plugin output. By reading stdout, we can compare to plugins output and
> have a multiarch test.
>
> Can be run with:
> make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so
>
> Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
> ---
>  tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
>  tests/tcg/multiarch/Makefile.target           |   7 +
>  .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
>  3 files changed, 212 insertions(+)
>  create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
>  create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh
>
> diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
> new file mode 100644
> index 00000000000..09d1fa22e35
<snip>
> diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
> index 5e3391ec9d2..d90cbd3e521 100644
> --- a/tests/tcg/multiarch/Makefile.target
> +++ b/tests/tcg/multiarch/Makefile.target
> @@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
>  TESTS += semihosting semiconsole
>  endif
>

Also you need:

test-plugin-mem-access: CFLAGS+=-pthread
test-plugin-mem-access: LDFLAGS+=-pthread

So less tolerant gcc's include pthread (otherwise the alpha-linux-user
fails), with that fix I get:

   TEST    check plugin libmem.so output with test-plugin-mem-access
  ",store_u8,.*,8,store,0xf1" not found in test-plugin-mem-access-with-libmem.so.pout
  make[1]: *** [Makefile:181: run-plugin-test-plugin-mem-access-with-libmem.so] Error 1
  make: *** [/home/alex/lsrc/qemu.git/tests/Makefile.include:56: run-tcg-tests-alpha-linux-user] Error 2

> +# Test plugin memory access instrumentation
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +	PLUGIN_ARGS=$(COMMA)print-accesses=true
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +	CHECK_PLUGIN_OUTPUT_COMMAND= \
> +	$(SRC_PATH)/tests/tcg/multiarch/check-plugin-mem-access.sh
> +
>  # Update TESTS
>  TESTS += $(MULTIARCH_TESTS)
> diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
> new file mode 100755
> index 00000000000..909606943bb
> --- /dev/null
> +++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
> @@ -0,0 +1,30 @@
> +#!/usr/bin/env bash
> +
> +set -euo pipefail
> +
> +die()
> +{
> +    echo "$@" 1>&2
> +    exit 1
> +}
> +
> +check()
> +{
> +    file=$1
> +    pattern=$2
> +    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
> +}
> +
> +[ $# -eq 1 ] || die "usage: plugin_out_file"
> +
> +plugin_out=$1
> +
> +expected()
> +{
> +    ./test-plugin-mem-access ||
> +        die "running test-plugin-mem-access executable failed"
> +}
> +
> +expected | while read line; do
> +    check "$plugin_out" "$line"
> +done

-- 
Alex Bennée
Virtualisation Tech Lead @ Linaro
Re: [PATCH v7 6/6] tests/tcg/multiarch: add test for plugin memory access
Posted by Alex Bennée 1 week, 5 days ago
Alex Bennée <alex.bennee@linaro.org> writes:

> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:
>
>> Add an explicit test to check expected memory values are read/written.
>> 8,16,32 load/store are tested for all arch.
>> 64,128 load/store are tested for aarch64/x64.
>> atomic operations (8,16,32,64) are tested for x64 only.
>>
>> By default, atomic accesses are non atomic if a single cpu is running,
>> so we force creation of a second one by creating a new thread first.
>>
>> load/store helpers code path can't be triggered easily in user mode (no
>> softmmu), so we can't test it here.
>>
>> Output of test-plugin-mem-access.c is the list of expected patterns in
>> plugin output. By reading stdout, we can compare to plugins output and
>> have a multiarch test.
>>
>> Can be run with:
>> make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so
>>
>> Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
>> ---
>>  tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
>>  tests/tcg/multiarch/Makefile.target           |   7 +
>>  .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
>>  3 files changed, 212 insertions(+)
>>  create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
>>  create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh
>>
>> diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
>> new file mode 100644
>> index 00000000000..09d1fa22e35
> <snip>
>> diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
>> index 5e3391ec9d2..d90cbd3e521 100644
>> --- a/tests/tcg/multiarch/Makefile.target
>> +++ b/tests/tcg/multiarch/Makefile.target
>> @@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
>>  TESTS += semihosting semiconsole
>>  endif
>>
>
> Also you need:
>
> test-plugin-mem-access: CFLAGS+=-pthread
> test-plugin-mem-access: LDFLAGS+=-pthread
>
> So less tolerant gcc's include pthread (otherwise the alpha-linux-user
> fails), with that fix I get:
>
>    TEST    check plugin libmem.so output with test-plugin-mem-access
>   ",store_u8,.*,8,store,0xf1" not found in test-plugin-mem-access-with-libmem.so.pout
>   make[1]: *** [Makefile:181: run-plugin-test-plugin-mem-access-with-libmem.so] Error 1
>   make: *** [/home/alex/lsrc/qemu.git/tests/Makefile.include:56: run-tcg-tests-alpha-linux-user] Error 2

And ensure we enable BWX for alpha so it emits bytes stores instead of
faking it with masking:

modified   tests/tcg/alpha/Makefile.target
@@ -13,3 +13,5 @@ test-cmov: test-cond.c
 	$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
 
 run-test-cmov: test-cmov
+
+test-plugin-mem-access: CFLAGS+=-mbwx

-- 
Alex Bennée
Virtualisation Tech Lead @ Linaro
Re: [PATCH v7 6/6] tests/tcg/multiarch: add test for plugin memory access
Posted by Alex Bennée 2 weeks, 4 days ago
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:

> Add an explicit test to check expected memory values are read/written.
> 8,16,32 load/store are tested for all arch.
> 64,128 load/store are tested for aarch64/x64.
> atomic operations (8,16,32,64) are tested for x64 only.
>
> By default, atomic accesses are non atomic if a single cpu is running,
> so we force creation of a second one by creating a new thread first.
>
> load/store helpers code path can't be triggered easily in user mode (no
> softmmu), so we can't test it here.
>
> Output of test-plugin-mem-access.c is the list of expected patterns in
> plugin output. By reading stdout, we can compare to plugins output and
> have a multiarch test.
>
> Can be run with:
> make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so
>
> Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
> ---
>  tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
>  tests/tcg/multiarch/Makefile.target           |   7 +
>  .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
>  3 files changed, 212 insertions(+)
>  create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
>  create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh
>
> diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
> new file mode 100644
> index 00000000000..09d1fa22e35
> --- /dev/null
> +++ b/tests/tcg/multiarch/test-plugin-mem-access.c
> @@ -0,0 +1,175 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Check if we detect all memory accesses expected using plugin API.
> + * Used in conjunction with ./check-plugin-mem-access.sh check script.
> + * Output of this program is the list of patterns expected in plugin output.
> + *
> + * 8,16,32 load/store are tested for all arch.
> + * 64,128 load/store are tested for aarch64/x64.
> + * atomic operations (8,16,32,64) are tested for x64 only.
> + */

It would be nice to build this for the softmmu path as well. I'm not
sure if this can be done with as single source or we need a second test.
I shall have a play.

> +
> +#include <pthread.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +
> +#if defined(__x86_64__)
> +#include <emmintrin.h>
> +#elif defined(__aarch64__)
> +#include <arm_neon.h>
> +#endif /* __x86_64__ */
> +
> +static void *data;
> +
> +/* ,store_u8,.*,8,store,0xf1 */
> +#define PRINT_EXPECTED(function, type, value, action)                 \
> +do {                                                                  \
> +    printf(",%s,.*,%d,%s,%s\n",                                       \
> +           #function, (int) sizeof(type) * 8, action, value);         \
> +}                                                                     \
> +while (0)
> +
> +#define DEFINE_STORE(name, type, value)                  \
> +                                                         \
> +static void print_expected_store_##name(void)            \
> +{                                                        \
> +    PRINT_EXPECTED(store_##name, type, #value, "store"); \
> +}                                                        \
> +                                                         \
> +static void store_##name(void)                           \
> +{                                                        \
> +    *((type *)data) = value;                             \
> +    print_expected_store_##name();                       \
> +}
> +
> +#define DEFINE_ATOMIC_OP(name, type, value)                    \
> +                                                               \
> +static void print_expected_atomic_op_##name(void)              \
> +{                                                              \
> +    PRINT_EXPECTED(atomic_op_##name, type, "0x0*42", "load");  \
> +    PRINT_EXPECTED(atomic_op_##name, type, #value, "store");   \
> +}                                                              \
> +                                                               \
> +static void atomic_op_##name(void)                             \
> +{                                                              \
> +    *((type *)data) = 0x42;                                    \
> +    __sync_val_compare_and_swap((type *)data, 0x42, value);    \
> +    print_expected_atomic_op_##name();                         \
> +}
> +
> +#define DEFINE_LOAD(name, type, value)                  \
> +                                                        \
> +static void print_expected_load_##name(void)            \
> +{                                                       \
> +    PRINT_EXPECTED(load_##name, type, #value, "load");  \
> +}                                                       \
> +                                                        \
> +static void load_##name(void)                           \
> +{                                                       \
> +    type src = *((type *) data);                        \
> +    type dest = src;                                    \
> +    (void)src, (void)dest;                              \
> +    print_expected_load_##name();                       \
> +}
> +
> +DEFINE_STORE(u8, uint8_t, 0xf1)
> +DEFINE_LOAD(u8, uint8_t, 0xf1)
> +DEFINE_STORE(u16, uint16_t, 0xf123)
> +DEFINE_LOAD(u16, uint16_t, 0xf123)
> +DEFINE_STORE(u32, uint32_t, 0xff112233)
> +DEFINE_LOAD(u32, uint32_t, 0xff112233)
> +
> +#if defined(__x86_64__) || defined(__aarch64__)
> +DEFINE_STORE(u64, uint64_t, 0xf123456789abcdef)
> +DEFINE_LOAD(u64, uint64_t, 0xf123456789abcdef)
> +
> +static void print_expected_store_u128(void)
> +{
> +    PRINT_EXPECTED(store_u128, __int128,
> +                   "0xf122334455667788f123456789abcdef", "store");
> +}
> +
> +static void store_u128(void)
> +{
> +#ifdef __x86_64__
> +    _mm_store_si128(data, _mm_set_epi32(0xf1223344, 0x55667788,
> +                                        0xf1234567, 0x89abcdef));
> +#else
> +    const uint32_t init[4] = {0x89abcdef, 0xf1234567, 0x55667788, 0xf1223344};
> +    uint32x4_t vec = vld1q_u32(init);
> +    vst1q_u32(data, vec);
> +#endif /* __x86_64__ */
> +    print_expected_store_u128();
> +}
> +
> +static void print_expected_load_u128(void)
> +{
> +    PRINT_EXPECTED(load_u128, __int128,
> +                   "0xf122334455667788f123456789abcdef", "load");
> +}
> +
> +static void load_u128(void)
> +{
> +#ifdef __x86_64__
> +    __m128i var = _mm_load_si128(data);
> +#else
> +    uint32x4_t var = vld1q_u32(data);
> +#endif
> +    (void) var;
> +    print_expected_load_u128();
> +}
> +#endif /* __x86_64__ || __aarch64__ */
> +
> +#if defined(__x86_64__)
> +DEFINE_ATOMIC_OP(u8, uint8_t, 0xf1)
> +DEFINE_ATOMIC_OP(u16, uint16_t, 0xf123)
> +DEFINE_ATOMIC_OP(u32, uint32_t, 0xff112233)
> +DEFINE_ATOMIC_OP(u64, uint64_t, 0xf123456789abcdef)
> +#endif /* __x86_64__ */
> +
> +static void *f(void *p)
> +{
> +    return NULL;
> +}
> +
> +int main(void)
> +{
> +    /*
> +     * We force creation of a second thread to enable cpu flag CF_PARALLEL.
> +     * This will generate atomic operations when needed.
> +     */
> +    pthread_t thread;
> +    pthread_create(&thread, NULL, &f, NULL);
> +    pthread_join(thread, NULL);
> +
> +    /* allocate storage up to 128 bits */
> +    data = malloc(16);
> +
> +    store_u8();
> +    load_u8();
> +
> +    store_u16();
> +    load_u16();
> +
> +    store_u32();
> +    load_u32();
> +
> +#if defined(__x86_64__) || defined(__aarch64__)
> +    store_u64();
> +    load_u64();
> +
> +    store_u128();
> +    load_u128();
> +#endif /* __x86_64__ || __aarch64__ */
> +
> +#if defined(__x86_64__)
> +    atomic_op_u8();
> +    atomic_op_u16();
> +    atomic_op_u32();
> +    atomic_op_u64();
> +#endif /* __x86_64__ */
> +
> +    free(data);
> +}
> diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
> index 5e3391ec9d2..d90cbd3e521 100644
> --- a/tests/tcg/multiarch/Makefile.target
> +++ b/tests/tcg/multiarch/Makefile.target
> @@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
>  TESTS += semihosting semiconsole
>  endif
>  
> +# Test plugin memory access instrumentation
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +	PLUGIN_ARGS=$(COMMA)print-accesses=true
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +	CHECK_PLUGIN_OUTPUT_COMMAND= \
> +	$(SRC_PATH)/tests/tcg/multiarch/check-plugin-mem-access.sh
> +
>  # Update TESTS
>  TESTS += $(MULTIARCH_TESTS)
> diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
> new file mode 100755
> index 00000000000..909606943bb
> --- /dev/null
> +++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
> @@ -0,0 +1,30 @@
> +#!/usr/bin/env bash
> +
> +set -euo pipefail
> +
> +die()
> +{
> +    echo "$@" 1>&2
> +    exit 1
> +}
> +
> +check()
> +{
> +    file=$1
> +    pattern=$2
> +    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
> +}
> +
> +[ $# -eq 1 ] || die "usage: plugin_out_file"
> +
> +plugin_out=$1
> +
> +expected()
> +{
> +    ./test-plugin-mem-access ||
> +        die "running test-plugin-mem-access executable failed"

I'm confused by this. We seem to be running the test again and this is
going to fail if binfmt_misc isn't setup (which we don't assume for
running the TCG tests).

> +}
> +
> +expected | while read line; do
> +    check "$plugin_out" "$line"
> +done

-- 
Alex Bennée
Virtualisation Tech Lead @ Linaro
Re: [PATCH v7 6/6] tests/tcg/multiarch: add test for plugin memory access
Posted by Pierrick Bouvier 2 weeks, 2 days ago
On 8/29/24 02:03, Alex Bennée wrote:
> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:
> 
>> Add an explicit test to check expected memory values are read/written.
>> 8,16,32 load/store are tested for all arch.
>> 64,128 load/store are tested for aarch64/x64.
>> atomic operations (8,16,32,64) are tested for x64 only.
>>
>> By default, atomic accesses are non atomic if a single cpu is running,
>> so we force creation of a second one by creating a new thread first.
>>
>> load/store helpers code path can't be triggered easily in user mode (no
>> softmmu), so we can't test it here.
>>
>> Output of test-plugin-mem-access.c is the list of expected patterns in
>> plugin output. By reading stdout, we can compare to plugins output and
>> have a multiarch test.
>>
>> Can be run with:
>> make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so
>>
>> Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
>> ---
>>   tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
>>   tests/tcg/multiarch/Makefile.target           |   7 +
>>   .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
>>   3 files changed, 212 insertions(+)
>>   create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
>>   create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh
>>
>> diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
>> new file mode 100644
>> index 00000000000..09d1fa22e35
>> --- /dev/null
>> +++ b/tests/tcg/multiarch/test-plugin-mem-access.c
>> @@ -0,0 +1,175 @@
>> +/*
>> + * SPDX-License-Identifier: GPL-2.0-or-later
>> + *
>> + * Check if we detect all memory accesses expected using plugin API.
>> + * Used in conjunction with ./check-plugin-mem-access.sh check script.
>> + * Output of this program is the list of patterns expected in plugin output.
>> + *
>> + * 8,16,32 load/store are tested for all arch.
>> + * 64,128 load/store are tested for aarch64/x64.
>> + * atomic operations (8,16,32,64) are tested for x64 only.
>> + */
> 
> It would be nice to build this for the softmmu path as well. I'm not
> sure if this can be done with as single source or we need a second test.
> I shall have a play.
> 

Ok, thanks.

>> +
>> +#include <pthread.h>
>> +#include <stdint.h>
>> +#include <stdio.h>
>> +#include <stdlib.h>
>> +
>> +#if defined(__x86_64__)
>> +#include <emmintrin.h>
>> +#elif defined(__aarch64__)
>> +#include <arm_neon.h>
>> +#endif /* __x86_64__ */
>> +
>> +static void *data;
>> +
>> +/* ,store_u8,.*,8,store,0xf1 */
>> +#define PRINT_EXPECTED(function, type, value, action)                 \
>> +do {                                                                  \
>> +    printf(",%s,.*,%d,%s,%s\n",                                       \
>> +           #function, (int) sizeof(type) * 8, action, value);         \
>> +}                                                                     \
>> +while (0)
>> +
>> +#define DEFINE_STORE(name, type, value)                  \
>> +                                                         \
>> +static void print_expected_store_##name(void)            \
>> +{                                                        \
>> +    PRINT_EXPECTED(store_##name, type, #value, "store"); \
>> +}                                                        \
>> +                                                         \
>> +static void store_##name(void)                           \
>> +{                                                        \
>> +    *((type *)data) = value;                             \
>> +    print_expected_store_##name();                       \
>> +}
>> +
>> +#define DEFINE_ATOMIC_OP(name, type, value)                    \
>> +                                                               \
>> +static void print_expected_atomic_op_##name(void)              \
>> +{                                                              \
>> +    PRINT_EXPECTED(atomic_op_##name, type, "0x0*42", "load");  \
>> +    PRINT_EXPECTED(atomic_op_##name, type, #value, "store");   \
>> +}                                                              \
>> +                                                               \
>> +static void atomic_op_##name(void)                             \
>> +{                                                              \
>> +    *((type *)data) = 0x42;                                    \
>> +    __sync_val_compare_and_swap((type *)data, 0x42, value);    \
>> +    print_expected_atomic_op_##name();                         \
>> +}
>> +
>> +#define DEFINE_LOAD(name, type, value)                  \
>> +                                                        \
>> +static void print_expected_load_##name(void)            \
>> +{                                                       \
>> +    PRINT_EXPECTED(load_##name, type, #value, "load");  \
>> +}                                                       \
>> +                                                        \
>> +static void load_##name(void)                           \
>> +{                                                       \
>> +    type src = *((type *) data);                        \
>> +    type dest = src;                                    \
>> +    (void)src, (void)dest;                              \
>> +    print_expected_load_##name();                       \
>> +}
>> +
>> +DEFINE_STORE(u8, uint8_t, 0xf1)
>> +DEFINE_LOAD(u8, uint8_t, 0xf1)
>> +DEFINE_STORE(u16, uint16_t, 0xf123)
>> +DEFINE_LOAD(u16, uint16_t, 0xf123)
>> +DEFINE_STORE(u32, uint32_t, 0xff112233)
>> +DEFINE_LOAD(u32, uint32_t, 0xff112233)
>> +
>> +#if defined(__x86_64__) || defined(__aarch64__)
>> +DEFINE_STORE(u64, uint64_t, 0xf123456789abcdef)
>> +DEFINE_LOAD(u64, uint64_t, 0xf123456789abcdef)
>> +
>> +static void print_expected_store_u128(void)
>> +{
>> +    PRINT_EXPECTED(store_u128, __int128,
>> +                   "0xf122334455667788f123456789abcdef", "store");
>> +}
>> +
>> +static void store_u128(void)
>> +{
>> +#ifdef __x86_64__
>> +    _mm_store_si128(data, _mm_set_epi32(0xf1223344, 0x55667788,
>> +                                        0xf1234567, 0x89abcdef));
>> +#else
>> +    const uint32_t init[4] = {0x89abcdef, 0xf1234567, 0x55667788, 0xf1223344};
>> +    uint32x4_t vec = vld1q_u32(init);
>> +    vst1q_u32(data, vec);
>> +#endif /* __x86_64__ */
>> +    print_expected_store_u128();
>> +}
>> +
>> +static void print_expected_load_u128(void)
>> +{
>> +    PRINT_EXPECTED(load_u128, __int128,
>> +                   "0xf122334455667788f123456789abcdef", "load");
>> +}
>> +
>> +static void load_u128(void)
>> +{
>> +#ifdef __x86_64__
>> +    __m128i var = _mm_load_si128(data);
>> +#else
>> +    uint32x4_t var = vld1q_u32(data);
>> +#endif
>> +    (void) var;
>> +    print_expected_load_u128();
>> +}
>> +#endif /* __x86_64__ || __aarch64__ */
>> +
>> +#if defined(__x86_64__)
>> +DEFINE_ATOMIC_OP(u8, uint8_t, 0xf1)
>> +DEFINE_ATOMIC_OP(u16, uint16_t, 0xf123)
>> +DEFINE_ATOMIC_OP(u32, uint32_t, 0xff112233)
>> +DEFINE_ATOMIC_OP(u64, uint64_t, 0xf123456789abcdef)
>> +#endif /* __x86_64__ */
>> +
>> +static void *f(void *p)
>> +{
>> +    return NULL;
>> +}
>> +
>> +int main(void)
>> +{
>> +    /*
>> +     * We force creation of a second thread to enable cpu flag CF_PARALLEL.
>> +     * This will generate atomic operations when needed.
>> +     */
>> +    pthread_t thread;
>> +    pthread_create(&thread, NULL, &f, NULL);
>> +    pthread_join(thread, NULL);
>> +
>> +    /* allocate storage up to 128 bits */
>> +    data = malloc(16);
>> +
>> +    store_u8();
>> +    load_u8();
>> +
>> +    store_u16();
>> +    load_u16();
>> +
>> +    store_u32();
>> +    load_u32();
>> +
>> +#if defined(__x86_64__) || defined(__aarch64__)
>> +    store_u64();
>> +    load_u64();
>> +
>> +    store_u128();
>> +    load_u128();
>> +#endif /* __x86_64__ || __aarch64__ */
>> +
>> +#if defined(__x86_64__)
>> +    atomic_op_u8();
>> +    atomic_op_u16();
>> +    atomic_op_u32();
>> +    atomic_op_u64();
>> +#endif /* __x86_64__ */
>> +
>> +    free(data);
>> +}
>> diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
>> index 5e3391ec9d2..d90cbd3e521 100644
>> --- a/tests/tcg/multiarch/Makefile.target
>> +++ b/tests/tcg/multiarch/Makefile.target
>> @@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
>>   TESTS += semihosting semiconsole
>>   endif
>>   
>> +# Test plugin memory access instrumentation
>> +run-plugin-test-plugin-mem-access-with-libmem.so: \
>> +	PLUGIN_ARGS=$(COMMA)print-accesses=true
>> +run-plugin-test-plugin-mem-access-with-libmem.so: \
>> +	CHECK_PLUGIN_OUTPUT_COMMAND= \
>> +	$(SRC_PATH)/tests/tcg/multiarch/check-plugin-mem-access.sh
>> +
>>   # Update TESTS
>>   TESTS += $(MULTIARCH_TESTS)
>> diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
>> new file mode 100755
>> index 00000000000..909606943bb
>> --- /dev/null
>> +++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
>> @@ -0,0 +1,30 @@
>> +#!/usr/bin/env bash
>> +
>> +set -euo pipefail
>> +
>> +die()
>> +{
>> +    echo "$@" 1>&2
>> +    exit 1
>> +}
>> +
>> +check()
>> +{
>> +    file=$1
>> +    pattern=$2
>> +    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
>> +}
>> +
>> +[ $# -eq 1 ] || die "usage: plugin_out_file"
>> +
>> +plugin_out=$1
>> +
>> +expected()
>> +{
>> +    ./test-plugin-mem-access ||
>> +        die "running test-plugin-mem-access executable failed"
> 
> I'm confused by this. We seem to be running the test again and this is
> going to fail if binfmt_misc isn't setup (which we don't assume for
> running the TCG tests).
> 

The test stdout is the expected output to grep. This is to avoid avoid 
an "expected file" and a "source file" somewhere else.
Could we use compiled qemu-user to run it instead?

I'm trying to find a solution where "expected" is not duplicated between 
several files.

>> +}
>> +
>> +expected | while read line; do
>> +    check "$plugin_out" "$line"
>> +done
> 
Re: [PATCH v7 6/6] tests/tcg/multiarch: add test for plugin memory access
Posted by Alex Bennée 1 week, 5 days ago
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:

> On 8/29/24 02:03, Alex Bennée wrote:
>> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:
>> 
<snip>
>>> diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
>>> new file mode 100755
>>> index 00000000000..909606943bb
>>> --- /dev/null
>>> +++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
>>> @@ -0,0 +1,30 @@
>>> +#!/usr/bin/env bash
>>> +
>>> +set -euo pipefail
>>> +
>>> +die()
>>> +{
>>> +    echo "$@" 1>&2
>>> +    exit 1
>>> +}
>>> +
>>> +check()
>>> +{
>>> +    file=$1
>>> +    pattern=$2
>>> +    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
>>> +}
>>> +
>>> +[ $# -eq 1 ] || die "usage: plugin_out_file"
>>> +
>>> +plugin_out=$1
>>> +
>>> +expected()
>>> +{
>>> +    ./test-plugin-mem-access ||
>>> +        die "running test-plugin-mem-access executable failed"
>> I'm confused by this. We seem to be running the test again and this
>> is
>> going to fail if binfmt_misc isn't setup (which we don't assume for
>> running the TCG tests).
>> 
>
> The test stdout is the expected output to grep. This is to avoid avoid
> an "expected file" and a "source file" somewhere else.

Is this really such an issue. For the system mode test I just did:

  run-plugin-memory-with-libmem.so: 		\
          CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py $@.out

> Could we use compiled qemu-user to run it instead?

Yes - although that would be inefficient (and you need to pass that path
in somehow anyway)

> I'm trying to find a solution where "expected" is not duplicated
> between several files.

Move it all into python?

-- 
Alex Bennée
Virtualisation Tech Lead @ Linaro
[RFC PATCH] tests/tcg: add a system test to check memory instrumentation
Posted by Alex Bennée 2 weeks, 3 days ago
At first I thought I could compile the user-mode test for system mode
however we already have a fairly comprehensive test case for system
mode in "memory" so lets use that.

First we extend the test to report where the test_data region is. Then
we expand the pdot() function to track the total number of reads and
writes to the region. We have to add some addition pdot() calls to
take into account multiple reads/writes in the test loops.

As tracking every access will quickly build up with "print-access" we
add a new mode to track groups of reads and writes to pages. Because
the test_data is page aligned we can be sure all accesses to it are
ones we can count.

Finally we add a python script to integrate the data from the plugin
and the output of the test and validate they both agree on the total
counts.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
 tests/tcg/multiarch/system/memory.c           |  48 +++++---
 tests/tcg/plugins/mem.c                       |  79 ++++++++++++-
 .../multiarch/system/Makefile.softmmu-target  |   6 +
 .../system/validate-memory-counts.py          | 108 ++++++++++++++++++
 4 files changed, 224 insertions(+), 17 deletions(-)
 create mode 100755 tests/tcg/multiarch/system/validate-memory-counts.py

diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
index 6eb2eb16f7..335ecbd7f0 100644
--- a/tests/tcg/multiarch/system/memory.c
+++ b/tests/tcg/multiarch/system/memory.c
@@ -14,12 +14,16 @@
 
 #include <stdint.h>
 #include <stdbool.h>
+#include <inttypes.h>
 #include <minilib.h>
 
 #ifndef CHECK_UNALIGNED
 # error "Target does not specify CHECK_UNALIGNED"
 #endif
 
+uint32_t test_read_count;
+uint32_t test_write_count;
+
 #define MEM_PAGE_SIZE 4096             /* nominal 4k "pages" */
 #define TEST_SIZE (MEM_PAGE_SIZE * 4)  /* 4 pages */
 
@@ -32,8 +36,13 @@ typedef void (*init_ufn) (int offset);
 typedef bool (*read_ufn) (int offset);
 typedef bool (*read_sfn) (int offset, bool nf);
 
-static void pdot(int count)
+static void pdot(int count, bool write)
 {
+    if (write) {
+        test_write_count++;
+    } else {
+        test_read_count++;
+    }
     if (count % 128 == 0) {
         ml_printf(".");
     }
@@ -66,7 +75,7 @@ static void init_test_data_u8(int unused_offset)
     ml_printf("Filling test area with u8:");
     for (i = 0; i < TEST_SIZE; i++) {
         *ptr++ = BYTE_NEXT(count);
-        pdot(i);
+        pdot(i, true);
     }
     ml_printf("done\n");
 }
@@ -91,8 +100,9 @@ static void init_test_data_s8(bool neg_first)
               neg_first ? "neg first" : "pos first");
     for (i = 0; i < TEST_SIZE / 2; i++) {
         *ptr++ = get_byte(i, neg_first);
+        pdot(i, true);
         *ptr++ = get_byte(i, !neg_first);
-        pdot(i);
+        pdot(i, true);
     }
     ml_printf("done\n");
 }
@@ -107,6 +117,7 @@ static void reset_start_data(int offset)
     int i;
     for (i = 0; i < offset; i++) {
         *ptr++ = 0;
+        pdot(i, true);
     }
 }
 
@@ -125,7 +136,7 @@ static void init_test_data_u16(int offset)
         uint16_t low = BYTE_NEXT(count), high = BYTE_NEXT(count);
         word = BYTE_SHIFT(high, 1) | BYTE_SHIFT(low, 0);
         *ptr++ = word;
-        pdot(i);
+        pdot(i, true);
     }
     ml_printf("done @ %p\n", ptr);
 }
@@ -147,7 +158,7 @@ static void init_test_data_u32(int offset)
         word = BYTE_SHIFT(b1, 3) | BYTE_SHIFT(b2, 2) | BYTE_SHIFT(b3, 1) |
                BYTE_SHIFT(b4, 0);
         *ptr++ = word;
-        pdot(i);
+        pdot(i, true);
     }
     ml_printf("done @ %p\n", ptr);
 }
@@ -172,7 +183,7 @@ static void init_test_data_u64(int offset)
                BYTE_SHIFT(b4, 4) | BYTE_SHIFT(b5, 3) | BYTE_SHIFT(b6, 2) |
                BYTE_SHIFT(b7, 1) | BYTE_SHIFT(b8, 0);
         *ptr++ = word;
-        pdot(i);
+        pdot(i, true);
     }
     ml_printf("done @ %p\n", ptr);
 }
@@ -194,7 +205,7 @@ static bool read_test_data_u16(int offset)
             ml_printf("Error %d < %d\n", high, low);
             return false;
         } else {
-            pdot(i);
+            pdot(i, false);
         }
 
     }
@@ -236,7 +247,7 @@ static bool read_test_data_u32(int offset)
             ml_printf("Error %d, %d, %d, %d", b1, b2, b3, b4);
             return false;
         } else {
-            pdot(i);
+            pdot(i, false);
         }
     }
     ml_printf("done @ %p\n", ptr);
@@ -290,7 +301,7 @@ static bool read_test_data_u64(int offset)
                       b1, b2, b3, b4, b5, b6, b7, b8);
             return false;
         } else {
-            pdot(i);
+            pdot(i, false);
         }
     }
     ml_printf("done @ %p\n", ptr);
@@ -357,9 +368,11 @@ static bool read_test_data_s8(int offset, bool neg_first)
         second = *ptr++;
 
         if (neg_first && first < 0 && second > 0) {
-            pdot(i);
+            pdot(i, false);
+            pdot(i, false);
         } else if (!neg_first && first > 0 && second < 0) {
-            pdot(i);
+            pdot(i, false);
+            pdot(i, false);
         } else {
             ml_printf("Error %d %c %d\n", first, neg_first ? '<' : '>', second);
             return false;
@@ -390,9 +403,9 @@ static bool read_test_data_s16(int offset, bool neg_first)
         int32_t data = *ptr++;
 
         if (neg_first && data < 0) {
-            pdot(i);
+            pdot(i, false);
         } else if (!neg_first && data > 0) {
-            pdot(i);
+            pdot(i, false);
         } else {
             ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>');
             return false;
@@ -423,9 +436,9 @@ static bool read_test_data_s32(int offset, bool neg_first)
         int64_t data = *ptr++;
 
         if (neg_first && data < 0) {
-            pdot(i);
+            pdot(i, false);
         } else if (!neg_first && data > 0) {
-            pdot(i);
+            pdot(i, false);
         } else {
             ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>');
             return false;
@@ -475,6 +488,9 @@ int main(void)
     int i;
     bool ok = true;
 
+    ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
+    ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
+
     /* Run through the unsigned tests first */
     for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
         ok = do_unsigned_test(init_ufns[i]);
@@ -490,6 +506,8 @@ int main(void)
         ok = do_signed_reads(true);
     }
 
+    ml_printf("Test data read: %"PRId32"\n", test_read_count);
+    ml_printf("Test data write: %"PRId32"\n", test_write_count);
     ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
     return ok ? 0 : -1;
 }
diff --git a/tests/tcg/plugins/mem.c b/tests/tcg/plugins/mem.c
index 086e6f5bdf..f9a2ab4c13 100644
--- a/tests/tcg/plugins/mem.c
+++ b/tests/tcg/plugins/mem.c
@@ -26,13 +26,27 @@ typedef struct {
     const char *sym;
 } InsnInfo;
 
+typedef struct {
+    uint64_t page_address;
+    uint64_t reads;
+    uint64_t read_bytes;
+    uint64_t writes;
+    uint64_t written_bytes;
+} PageInfo;
+
 static struct qemu_plugin_scoreboard *counts;
 static qemu_plugin_u64 mem_count;
 static qemu_plugin_u64 io_count;
-static bool do_inline, do_callback, do_print_accesses;
+static bool do_inline, do_callback, do_print_accesses, do_page_summary;
 static bool do_haddr;
 static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW;
 
+static uint64_t page_size = 4096;
+static uint64_t page_mask;
+
+static GMutex lock;
+static GHashTable *pages;
+
 static void plugin_exit(qemu_plugin_id_t id, void *p)
 {
     g_autoptr(GString) out = g_string_new("");
@@ -46,6 +60,31 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
                                qemu_plugin_u64_sum(io_count));
     }
     qemu_plugin_outs(out->str);
+
+
+    if (do_page_summary) {
+        GList *counts = g_hash_table_get_values(pages);
+
+        g_string_printf(out, "PageAddr, Reads, Read Bytes, Writes, Write Bytes\n");
+
+        if (counts && g_list_next(counts)) {
+            for (/* counts */; counts->next; counts = counts->next) {
+                PageInfo *pi = (PageInfo *) counts->data;
+
+                g_string_append_printf(out,
+                                       "0x%016"PRIx64", "
+                                       "%"PRId64", %"PRId64", "
+                                       "%"PRId64", %"PRId64"\n",
+                                       pi->page_address,
+                                       pi->reads,
+                                       pi->read_bytes,
+                                       pi->writes,
+                                       pi->written_bytes);
+            }
+        }
+        qemu_plugin_outs(out->str);
+    }
+
     qemu_plugin_scoreboard_free(counts);
 }
 
@@ -63,6 +102,31 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
     } else {
         qemu_plugin_u64_add(mem_count, cpu_index, 1);
     }
+
+    if (do_page_summary) {
+        uint64_t page = vaddr & ~page_mask;
+        PageInfo *pi;
+        unsigned size = 8 << qemu_plugin_mem_size_shift(meminfo);
+
+        g_mutex_lock(&lock);
+        pi = (PageInfo *) g_hash_table_lookup(pages, GUINT_TO_POINTER(page));
+
+        if (!pi) {
+            pi = g_new0(PageInfo, 1);
+            pi->page_address = page;
+            g_hash_table_insert(pages, GUINT_TO_POINTER(page), (gpointer) pi);
+        }
+
+        if (qemu_plugin_mem_is_store(meminfo)) {
+            pi->writes++;
+            pi->written_bytes += size;
+        } else {
+            pi->reads++;
+            pi->read_bytes += size;
+        }
+
+        g_mutex_unlock(&lock);
+    }
 }
 
 static void print_access(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
@@ -117,7 +181,7 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
                 QEMU_PLUGIN_INLINE_ADD_U64,
                 mem_count, 1);
         }
-        if (do_callback) {
+        if (do_callback || do_page_summary) {
             qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem,
                                              QEMU_PLUGIN_CB_NO_REGS,
                                              rw, NULL);
@@ -176,6 +240,12 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
                 fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
                 return -1;
             }
+        } else if (g_strcmp0(tokens[0], "page-summary") == 0) {
+            if (!qemu_plugin_bool_parse(tokens[0], tokens[1],
+                                        &do_page_summary)) {
+                fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
+                return -1;
+            }
         } else {
             fprintf(stderr, "option parsing failed: %s\n", opt);
             return -1;
@@ -196,6 +266,11 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
         qemu_plugin_outs(out->str);
     }
 
+    if (do_page_summary) {
+        page_mask = (page_size - 1);
+        pages = g_hash_table_new(NULL, g_direct_equal);
+    }
+
     counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
     mem_count = qemu_plugin_scoreboard_u64_in_struct(
         counts, CPUCount, mem_count);
diff --git a/tests/tcg/multiarch/system/Makefile.softmmu-target b/tests/tcg/multiarch/system/Makefile.softmmu-target
index 32dc0f9830..a1b33a6973 100644
--- a/tests/tcg/multiarch/system/Makefile.softmmu-target
+++ b/tests/tcg/multiarch/system/Makefile.softmmu-target
@@ -65,3 +65,9 @@ endif
 
 MULTIARCH_RUNS += run-gdbstub-memory run-gdbstub-interrupt \
 	run-gdbstub-untimely-packet run-gdbstub-registers
+
+# Test plugin memory access instrumentation
+run-plugin-memory-with-libmem.so: 		\
+	PLUGIN_ARGS=$(COMMA)page-summary=true
+run-plugin-memory-with-libmem.so: 		\
+	CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py $@.out
diff --git a/tests/tcg/multiarch/system/validate-memory-counts.py b/tests/tcg/multiarch/system/validate-memory-counts.py
new file mode 100755
index 0000000000..8c18bff066
--- /dev/null
+++ b/tests/tcg/multiarch/system/validate-memory-counts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python3
+#
+# validate-memory-counts.py: check we instrumented memory properly
+#
+# This program takes two inputs:
+#   - the mem plugin output
+#   - the memory binary output
+#
+# Copyright (C) 2024 Linaro Ltd
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import sys
+
+def extract_counts(path):
+    """
+    Load the output from path and extract the lines containing:
+
+      Test data start: 0x40214000
+      Test data end: 0x40218001
+      Test data read: 2522280
+      Test data write: 262111
+
+    From the stream of data. Extract the values for use in the
+    validation function.
+    """
+    start_address = None
+    end_address = None
+    read_count = 0
+    write_count = 0
+    with open(path, 'r') as f:
+        for line in f:
+            if line.startswith("Test data start:"):
+                start_address = int(line.split(':')[1].strip(), 16)
+            elif line.startswith("Test data end:"):
+                end_address = int(line.split(':')[1].strip(), 16)
+            elif line.startswith("Test data read:"):
+                read_count = int(line.split(':')[1].strip())
+            elif line.startswith("Test data write:"):
+                write_count = int(line.split(':')[1].strip())
+    return start_address, end_address, read_count, write_count
+
+
+def parse_plugin_output(path, start, end):
+    """
+    Load the plugin output from path in the form of:
+
+      PageAddr, Reads, Read Bytes, Writes, Write Bytes
+      0x0000000040214000, 630296, 15719488, 69700, 1116480
+      0x0000000040201000, 0, 0, 2, 128
+      0x0000000040215000, 630784, 15728640, 69632, 1114112
+
+    And extract the ranges that match test data start and end and
+    return the results.
+    """
+    total_reads = 0
+    total_read_bytes = 0
+    total_writes = 0
+    total_written_bytes = 0
+
+    with open(path, 'r') as f:
+        next(f)  # Skip the header
+        for line in f:
+            parts = line.strip().split(', ')
+            if len(parts) != 5:
+                continue
+            page_addr = int(parts[0], 16)
+            reads = int(parts[1])
+            read_bytes = int(parts[2])
+            writes = int(parts[3])
+            written_bytes = int(parts[4])
+            if start <= page_addr < end: # Checking if within range
+                total_reads += reads
+                total_read_bytes += read_bytes
+                total_writes += writes
+                total_written_bytes += written_bytes
+
+    return total_reads, total_read_bytes, total_writes, total_written_bytes
+
+def main():
+    if len(sys.argv) != 3:
+        print("Usage: <script_name>.py <memory_binary_output_path> <mem_plugin_output_path>")
+        sys.exit(1)
+
+    memory_binary_output_path = sys.argv[1]
+    mem_plugin_output_path = sys.argv[2]
+
+    # Extract counts from memory binary
+    start, end, expected_reads, expected_writes = extract_counts(memory_binary_output_path)
+
+    if start is None or end is None:
+        print("Failed to extract start or end address from memory binary output.")
+        sys.exit(1)
+
+    # Parse plugin output
+    actual_reads, actual_read_bytes, actual_writes, actual_written_bytes = parse_plugin_output(mem_plugin_output_path, start, end)
+
+    # Compare and report
+    if actual_reads == expected_reads and actual_writes == expected_writes:
+        sys.exit(0)
+    else:
+        print("Fail: The memory reads and writes count does not match.")
+        print(f"Expected Reads: {expected_reads}, Actual Reads: {actual_reads}")
+        print(f"Expected Writes: {expected_writes}, Actual Writes: {actual_writes}")
+        sys.exit(1)
+
+if __name__ == "__main__":
+    main()
-- 
2.39.2


Re: [RFC PATCH] tests/tcg: add a system test to check memory instrumentation
Posted by Pierrick Bouvier 2 weeks, 2 days ago
On 8/30/24 08:25, Alex Bennée wrote:
> At first I thought I could compile the user-mode test for system mode
> however we already have a fairly comprehensive test case for system
> mode in "memory" so lets use that.
> 
> First we extend the test to report where the test_data region is. Then
> we expand the pdot() function to track the total number of reads and
> writes to the region. We have to add some addition pdot() calls to
> take into account multiple reads/writes in the test loops.
> 
> As tracking every access will quickly build up with "print-access" we
> add a new mode to track groups of reads and writes to pages. Because
> the test_data is page aligned we can be sure all accesses to it are
> ones we can count.
> 
> Finally we add a python script to integrate the data from the plugin
> and the output of the test and validate they both agree on the total
> counts.
> 
> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> ---
>   tests/tcg/multiarch/system/memory.c           |  48 +++++---
>   tests/tcg/plugins/mem.c                       |  79 ++++++++++++-
>   .../multiarch/system/Makefile.softmmu-target  |   6 +
>   .../system/validate-memory-counts.py          | 108 ++++++++++++++++++
>   4 files changed, 224 insertions(+), 17 deletions(-)
>   create mode 100755 tests/tcg/multiarch/system/validate-memory-counts.py
> 
> diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
> index 6eb2eb16f7..335ecbd7f0 100644
> --- a/tests/tcg/multiarch/system/memory.c
> +++ b/tests/tcg/multiarch/system/memory.c
> @@ -14,12 +14,16 @@
>   
>   #include <stdint.h>
>   #include <stdbool.h>
> +#include <inttypes.h>
>   #include <minilib.h>
>   
>   #ifndef CHECK_UNALIGNED
>   # error "Target does not specify CHECK_UNALIGNED"
>   #endif
>   
> +uint32_t test_read_count;
> +uint32_t test_write_count;
> +
>   #define MEM_PAGE_SIZE 4096             /* nominal 4k "pages" */
>   #define TEST_SIZE (MEM_PAGE_SIZE * 4)  /* 4 pages */
>   
> @@ -32,8 +36,13 @@ typedef void (*init_ufn) (int offset);
>   typedef bool (*read_ufn) (int offset);
>   typedef bool (*read_sfn) (int offset, bool nf);
>   
> -static void pdot(int count)
> +static void pdot(int count, bool write)
>   {
> +    if (write) {
> +        test_write_count++;
> +    } else {
> +        test_read_count++;
> +    }
>       if (count % 128 == 0) {
>           ml_printf(".");
>       }
> @@ -66,7 +75,7 @@ static void init_test_data_u8(int unused_offset)
>       ml_printf("Filling test area with u8:");
>       for (i = 0; i < TEST_SIZE; i++) {
>           *ptr++ = BYTE_NEXT(count);
> -        pdot(i);
> +        pdot(i, true);
>       }
>       ml_printf("done\n");
>   }
> @@ -91,8 +100,9 @@ static void init_test_data_s8(bool neg_first)
>                 neg_first ? "neg first" : "pos first");
>       for (i = 0; i < TEST_SIZE / 2; i++) {
>           *ptr++ = get_byte(i, neg_first);
> +        pdot(i, true);
>           *ptr++ = get_byte(i, !neg_first);
> -        pdot(i);
> +        pdot(i, true);
>       }
>       ml_printf("done\n");
>   }
> @@ -107,6 +117,7 @@ static void reset_start_data(int offset)
>       int i;
>       for (i = 0; i < offset; i++) {
>           *ptr++ = 0;
> +        pdot(i, true);
>       }
>   }
>   
> @@ -125,7 +136,7 @@ static void init_test_data_u16(int offset)
>           uint16_t low = BYTE_NEXT(count), high = BYTE_NEXT(count);
>           word = BYTE_SHIFT(high, 1) | BYTE_SHIFT(low, 0);
>           *ptr++ = word;
> -        pdot(i);
> +        pdot(i, true);
>       }
>       ml_printf("done @ %p\n", ptr);
>   }
> @@ -147,7 +158,7 @@ static void init_test_data_u32(int offset)
>           word = BYTE_SHIFT(b1, 3) | BYTE_SHIFT(b2, 2) | BYTE_SHIFT(b3, 1) |
>                  BYTE_SHIFT(b4, 0);
>           *ptr++ = word;
> -        pdot(i);
> +        pdot(i, true);
>       }
>       ml_printf("done @ %p\n", ptr);
>   }
> @@ -172,7 +183,7 @@ static void init_test_data_u64(int offset)
>                  BYTE_SHIFT(b4, 4) | BYTE_SHIFT(b5, 3) | BYTE_SHIFT(b6, 2) |
>                  BYTE_SHIFT(b7, 1) | BYTE_SHIFT(b8, 0);
>           *ptr++ = word;
> -        pdot(i);
> +        pdot(i, true);
>       }
>       ml_printf("done @ %p\n", ptr);
>   }
> @@ -194,7 +205,7 @@ static bool read_test_data_u16(int offset)
>               ml_printf("Error %d < %d\n", high, low);
>               return false;
>           } else {
> -            pdot(i);
> +            pdot(i, false);
>           }
>   
>       }
> @@ -236,7 +247,7 @@ static bool read_test_data_u32(int offset)
>               ml_printf("Error %d, %d, %d, %d", b1, b2, b3, b4);
>               return false;
>           } else {
> -            pdot(i);
> +            pdot(i, false);
>           }
>       }
>       ml_printf("done @ %p\n", ptr);
> @@ -290,7 +301,7 @@ static bool read_test_data_u64(int offset)
>                         b1, b2, b3, b4, b5, b6, b7, b8);
>               return false;
>           } else {
> -            pdot(i);
> +            pdot(i, false);
>           }
>       }
>       ml_printf("done @ %p\n", ptr);
> @@ -357,9 +368,11 @@ static bool read_test_data_s8(int offset, bool neg_first)
>           second = *ptr++;
>   
>           if (neg_first && first < 0 && second > 0) {
> -            pdot(i);
> +            pdot(i, false);
> +            pdot(i, false);
>           } else if (!neg_first && first > 0 && second < 0) {
> -            pdot(i);
> +            pdot(i, false);
> +            pdot(i, false);
>           } else {
>               ml_printf("Error %d %c %d\n", first, neg_first ? '<' : '>', second);
>               return false;
> @@ -390,9 +403,9 @@ static bool read_test_data_s16(int offset, bool neg_first)
>           int32_t data = *ptr++;
>   
>           if (neg_first && data < 0) {
> -            pdot(i);
> +            pdot(i, false);
>           } else if (!neg_first && data > 0) {
> -            pdot(i);
> +            pdot(i, false);
>           } else {
>               ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>');
>               return false;
> @@ -423,9 +436,9 @@ static bool read_test_data_s32(int offset, bool neg_first)
>           int64_t data = *ptr++;
>   
>           if (neg_first && data < 0) {
> -            pdot(i);
> +            pdot(i, false);
>           } else if (!neg_first && data > 0) {
> -            pdot(i);
> +            pdot(i, false);
>           } else {
>               ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>');
>               return false;
> @@ -475,6 +488,9 @@ int main(void)
>       int i;
>       bool ok = true;
>   
> +    ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
> +    ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
> +
>       /* Run through the unsigned tests first */
>       for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
>           ok = do_unsigned_test(init_ufns[i]);
> @@ -490,6 +506,8 @@ int main(void)
>           ok = do_signed_reads(true);
>       }
>   
> +    ml_printf("Test data read: %"PRId32"\n", test_read_count);
> +    ml_printf("Test data write: %"PRId32"\n", test_write_count);
>       ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
>       return ok ? 0 : -1;
>   }
> diff --git a/tests/tcg/plugins/mem.c b/tests/tcg/plugins/mem.c
> index 086e6f5bdf..f9a2ab4c13 100644
> --- a/tests/tcg/plugins/mem.c
> +++ b/tests/tcg/plugins/mem.c
> @@ -26,13 +26,27 @@ typedef struct {
>       const char *sym;
>   } InsnInfo;
>   
> +typedef struct {
> +    uint64_t page_address;
> +    uint64_t reads;
> +    uint64_t read_bytes;
> +    uint64_t writes;
> +    uint64_t written_bytes;
> +} PageInfo;
> +
>   static struct qemu_plugin_scoreboard *counts;
>   static qemu_plugin_u64 mem_count;
>   static qemu_plugin_u64 io_count;
> -static bool do_inline, do_callback, do_print_accesses;
> +static bool do_inline, do_callback, do_print_accesses, do_page_summary;
>   static bool do_haddr;
>   static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW;
>   
> +static uint64_t page_size = 4096;
> +static uint64_t page_mask;
> +
> +static GMutex lock;
> +static GHashTable *pages;
> +
>   static void plugin_exit(qemu_plugin_id_t id, void *p)
>   {
>       g_autoptr(GString) out = g_string_new("");
> @@ -46,6 +60,31 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
>                                  qemu_plugin_u64_sum(io_count));
>       }
>       qemu_plugin_outs(out->str);
> +
> +
> +    if (do_page_summary) {
> +        GList *counts = g_hash_table_get_values(pages);
> +
> +        g_string_printf(out, "PageAddr, Reads, Read Bytes, Writes, Write Bytes\n");
> +
> +        if (counts && g_list_next(counts)) {
> +            for (/* counts */; counts->next; counts = counts->next) {
> +                PageInfo *pi = (PageInfo *) counts->data;
> +
> +                g_string_append_printf(out,
> +                                       "0x%016"PRIx64", "
> +                                       "%"PRId64", %"PRId64", "
> +                                       "%"PRId64", %"PRId64"\n",
> +                                       pi->page_address,
> +                                       pi->reads,
> +                                       pi->read_bytes,
> +                                       pi->writes,
> +                                       pi->written_bytes);
> +            }
> +        }
> +        qemu_plugin_outs(out->str);
> +    }
> +
>       qemu_plugin_scoreboard_free(counts);
>   }
>   
> @@ -63,6 +102,31 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
>       } else {
>           qemu_plugin_u64_add(mem_count, cpu_index, 1);
>       }
> +
> +    if (do_page_summary) {
> +        uint64_t page = vaddr & ~page_mask;
> +        PageInfo *pi;
> +        unsigned size = 8 << qemu_plugin_mem_size_shift(meminfo);
> +
> +        g_mutex_lock(&lock);
> +        pi = (PageInfo *) g_hash_table_lookup(pages, GUINT_TO_POINTER(page));
> +
> +        if (!pi) {
> +            pi = g_new0(PageInfo, 1);
> +            pi->page_address = page;
> +            g_hash_table_insert(pages, GUINT_TO_POINTER(page), (gpointer) pi);
> +        }
> +
> +        if (qemu_plugin_mem_is_store(meminfo)) {
> +            pi->writes++;
> +            pi->written_bytes += size;
> +        } else {
> +            pi->reads++;
> +            pi->read_bytes += size;
> +        }
> +
> +        g_mutex_unlock(&lock);
> +    }
>   }
>   
>   static void print_access(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
> @@ -117,7 +181,7 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
>                   QEMU_PLUGIN_INLINE_ADD_U64,
>                   mem_count, 1);
>           }
> -        if (do_callback) {
> +        if (do_callback || do_page_summary) {
>               qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem,
>                                                QEMU_PLUGIN_CB_NO_REGS,
>                                                rw, NULL);
> @@ -176,6 +240,12 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
>                   fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
>                   return -1;
>               }
> +        } else if (g_strcmp0(tokens[0], "page-summary") == 0) {
> +            if (!qemu_plugin_bool_parse(tokens[0], tokens[1],
> +                                        &do_page_summary)) {
> +                fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
> +                return -1;
> +            }
>           } else {
>               fprintf(stderr, "option parsing failed: %s\n", opt);
>               return -1;
> @@ -196,6 +266,11 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
>           qemu_plugin_outs(out->str);
>       }
>   
> +    if (do_page_summary) {
> +        page_mask = (page_size - 1);
> +        pages = g_hash_table_new(NULL, g_direct_equal);
> +    }
> +
>       counts = qemu_plugin_scoreboard_new(sizeof(CPUCount));
>       mem_count = qemu_plugin_scoreboard_u64_in_struct(
>           counts, CPUCount, mem_count);
> diff --git a/tests/tcg/multiarch/system/Makefile.softmmu-target b/tests/tcg/multiarch/system/Makefile.softmmu-target
> index 32dc0f9830..a1b33a6973 100644
> --- a/tests/tcg/multiarch/system/Makefile.softmmu-target
> +++ b/tests/tcg/multiarch/system/Makefile.softmmu-target
> @@ -65,3 +65,9 @@ endif
>   
>   MULTIARCH_RUNS += run-gdbstub-memory run-gdbstub-interrupt \
>   	run-gdbstub-untimely-packet run-gdbstub-registers
> +
> +# Test plugin memory access instrumentation
> +run-plugin-memory-with-libmem.so: 		\
> +	PLUGIN_ARGS=$(COMMA)page-summary=true
> +run-plugin-memory-with-libmem.so: 		\
> +	CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py $@.out
> diff --git a/tests/tcg/multiarch/system/validate-memory-counts.py b/tests/tcg/multiarch/system/validate-memory-counts.py
> new file mode 100755
> index 0000000000..8c18bff066
> --- /dev/null
> +++ b/tests/tcg/multiarch/system/validate-memory-counts.py
> @@ -0,0 +1,108 @@
> +#!/usr/bin/env python3
> +#
> +# validate-memory-counts.py: check we instrumented memory properly
> +#
> +# This program takes two inputs:
> +#   - the mem plugin output
> +#   - the memory binary output
> +#
> +# Copyright (C) 2024 Linaro Ltd
> +#
> +# SPDX-License-Identifier: GPL-2.0-or-later
> +
> +import sys
> +
> +def extract_counts(path):
> +    """
> +    Load the output from path and extract the lines containing:
> +
> +      Test data start: 0x40214000
> +      Test data end: 0x40218001
> +      Test data read: 2522280
> +      Test data write: 262111
> +
> +    From the stream of data. Extract the values for use in the
> +    validation function.
> +    """
> +    start_address = None
> +    end_address = None
> +    read_count = 0
> +    write_count = 0
> +    with open(path, 'r') as f:
> +        for line in f:
> +            if line.startswith("Test data start:"):
> +                start_address = int(line.split(':')[1].strip(), 16)
> +            elif line.startswith("Test data end:"):
> +                end_address = int(line.split(':')[1].strip(), 16)
> +            elif line.startswith("Test data read:"):
> +                read_count = int(line.split(':')[1].strip())
> +            elif line.startswith("Test data write:"):
> +                write_count = int(line.split(':')[1].strip())
> +    return start_address, end_address, read_count, write_count
> +
> +
> +def parse_plugin_output(path, start, end):
> +    """
> +    Load the plugin output from path in the form of:
> +
> +      PageAddr, Reads, Read Bytes, Writes, Write Bytes
> +      0x0000000040214000, 630296, 15719488, 69700, 1116480
> +      0x0000000040201000, 0, 0, 2, 128
> +      0x0000000040215000, 630784, 15728640, 69632, 1114112
> +
> +    And extract the ranges that match test data start and end and
> +    return the results.
> +    """
> +    total_reads = 0
> +    total_read_bytes = 0
> +    total_writes = 0
> +    total_written_bytes = 0
> +
> +    with open(path, 'r') as f:
> +        next(f)  # Skip the header
> +        for line in f:
> +            parts = line.strip().split(', ')
> +            if len(parts) != 5:
> +                continue
> +            page_addr = int(parts[0], 16)
> +            reads = int(parts[1])
> +            read_bytes = int(parts[2])
> +            writes = int(parts[3])
> +            written_bytes = int(parts[4])
> +            if start <= page_addr < end: # Checking if within range
> +                total_reads += reads
> +                total_read_bytes += read_bytes
> +                total_writes += writes
> +                total_written_bytes += written_bytes
> +
> +    return total_reads, total_read_bytes, total_writes, total_written_bytes
> +
> +def main():
> +    if len(sys.argv) != 3:
> +        print("Usage: <script_name>.py <memory_binary_output_path> <mem_plugin_output_path>")
> +        sys.exit(1)
> +
> +    memory_binary_output_path = sys.argv[1]
> +    mem_plugin_output_path = sys.argv[2]
> +
> +    # Extract counts from memory binary
> +    start, end, expected_reads, expected_writes = extract_counts(memory_binary_output_path)
> +
> +    if start is None or end is None:
> +        print("Failed to extract start or end address from memory binary output.")
> +        sys.exit(1)
> +
> +    # Parse plugin output
> +    actual_reads, actual_read_bytes, actual_writes, actual_written_bytes = parse_plugin_output(mem_plugin_output_path, start, end)
> +
> +    # Compare and report
> +    if actual_reads == expected_reads and actual_writes == expected_writes:
> +        sys.exit(0)
> +    else:
> +        print("Fail: The memory reads and writes count does not match.")
> +        print(f"Expected Reads: {expected_reads}, Actual Reads: {actual_reads}")
> +        print(f"Expected Writes: {expected_writes}, Actual Writes: {actual_writes}")
> +        sys.exit(1)
> +
> +if __name__ == "__main__":
> +    main()

Thanks for investigating this.
Overall, it would be a good thing to have a test like this.

However, I think it misses the point attached to this series.
Indeed, the new API function qemu_plugin_mem_get_value() is never 
called, and we just count expect read/write at given addresses.

There is value into this, but I definitely thing this should be in a 
different series, after the current one is merged.

What do you think?