Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
contrib/plugins/hotblocks.c | 50 ++++++++++++++++++++++---------------
1 file changed, 30 insertions(+), 20 deletions(-)
diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c
index 4de1b134944..02bc5078bdd 100644
--- a/contrib/plugins/hotblocks.c
+++ b/contrib/plugins/hotblocks.c
@@ -34,8 +34,8 @@ static guint64 limit = 20;
*/
typedef struct {
uint64_t start_addr;
- uint64_t exec_count;
- int trans_count;
+ struct qemu_plugin_scoreboard *exec_count;
+ int trans_count;
unsigned long insns;
} ExecCount;
@@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b)
{
ExecCount *ea = (ExecCount *) a;
ExecCount *eb = (ExecCount *) b;
- return ea->exec_count > eb->exec_count ? -1 : 1;
+ uint64_t count_a =
+ qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count));
+ uint64_t count_b =
+ qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count));
+ return count_a > count_b ? -1 : 1;
+}
+
+static void exec_count_free(gpointer key, gpointer value, gpointer user_data)
+{
+ ExecCount *cnt = value;
+ qemu_plugin_scoreboard_free(cnt->exec_count);
}
static void plugin_exit(qemu_plugin_id_t id, void *p)
@@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
GList *counts, *it;
int i;
- g_mutex_lock(&lock);
g_string_append_printf(report, "%d entries in the hash table\n",
g_hash_table_size(hotblocks));
counts = g_hash_table_get_values(hotblocks);
@@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
for (i = 0; i < limit && it->next; i++, it = it->next) {
ExecCount *rec = (ExecCount *) it->data;
- g_string_append_printf(report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n",
- rec->start_addr, rec->trans_count,
- rec->insns, rec->exec_count);
+ g_string_append_printf(
+ report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n",
+ rec->start_addr, rec->trans_count,
+ rec->insns,
+ qemu_plugin_u64_sum(
+ qemu_plugin_scoreboard_u64(rec->exec_count)));
}
g_list_free(it);
}
- g_mutex_unlock(&lock);
qemu_plugin_outs(report->str);
+
+ g_hash_table_foreach(hotblocks, exec_count_free, NULL);
+ g_hash_table_destroy(hotblocks);
}
static void plugin_init(void)
@@ -82,15 +96,9 @@ static void plugin_init(void)
static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
{
- ExecCount *cnt;
- uint64_t hash = (uint64_t) udata;
-
- g_mutex_lock(&lock);
- cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash);
- /* should always succeed */
- g_assert(cnt);
- cnt->exec_count++;
- g_mutex_unlock(&lock);
+ ExecCount *cnt = (ExecCount *)udata;
+ qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count),
+ cpu_index, 1);
}
/*
@@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
cnt->start_addr = pc;
cnt->trans_count = 1;
cnt->insns = insns;
+ cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t));
g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt);
}
g_mutex_unlock(&lock);
if (do_inline) {
- qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64,
- &cnt->exec_count, 1);
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_ADD_U64,
+ qemu_plugin_scoreboard_u64(cnt->exec_count), 1);
} else {
qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
QEMU_PLUGIN_CB_NO_REGS,
- (void *)hash);
+ (void *)cnt);
}
}
--
2.43.0
Hi Pierrick, On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > --- > contrib/plugins/hotblocks.c | 50 ++++++++++++++++++++++--------------- > 1 file changed, 30 insertions(+), 20 deletions(-) > > diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c > index 4de1b134944..02bc5078bdd 100644 > --- a/contrib/plugins/hotblocks.c > +++ b/contrib/plugins/hotblocks.c > @@ -34,8 +34,8 @@ static guint64 limit = 20; > */ > typedef struct { > uint64_t start_addr; > - uint64_t exec_count; > - int trans_count; > + struct qemu_plugin_scoreboard *exec_count; > + int trans_count; > unsigned long insns; > } ExecCount; > > @@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) > { > ExecCount *ea = (ExecCount *) a; > ExecCount *eb = (ExecCount *) b; > - return ea->exec_count > eb->exec_count ? -1 : 1; > + uint64_t count_a = > + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count)); > + uint64_t count_b = > + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count)); > + return count_a > count_b ? -1 : 1; > +} > + > +static void exec_count_free(gpointer key, gpointer value, gpointer user_data) > +{ > + ExecCount *cnt = value; > + qemu_plugin_scoreboard_free(cnt->exec_count); > } > > static void plugin_exit(qemu_plugin_id_t id, void *p) > @@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) > GList *counts, *it; > int i; > > - g_mutex_lock(&lock); I encountered cases before where the vCPUs continue executing while plugin_exit is called. This can happen e.g., when QEMU calls exit(3) from one CPU thread. Others will continue to run at the same time the atexit callbacks are called. This also means that you can't really free the resources as you do at the end of plugin_exit. > g_string_append_printf(report, "%d entries in the hash table\n", > g_hash_table_size(hotblocks)); > counts = g_hash_table_get_values(hotblocks); > @@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) > > for (i = 0; i < limit && it->next; i++, it = it->next) { > ExecCount *rec = (ExecCount *) it->data; > - g_string_append_printf(report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", > - rec->start_addr, rec->trans_count, > - rec->insns, rec->exec_count); > + g_string_append_printf( > + report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", > + rec->start_addr, rec->trans_count, > + rec->insns, > + qemu_plugin_u64_sum( > + qemu_plugin_scoreboard_u64(rec->exec_count))); > } > > g_list_free(it); > } > - g_mutex_unlock(&lock); > > qemu_plugin_outs(report->str); > + > + g_hash_table_foreach(hotblocks, exec_count_free, NULL); > + g_hash_table_destroy(hotblocks); > } > > static void plugin_init(void) > @@ -82,15 +96,9 @@ static void plugin_init(void) > > static void vcpu_tb_exec(unsigned int cpu_index, void *udata) > { > - ExecCount *cnt; > - uint64_t hash = (uint64_t) udata; > - > - g_mutex_lock(&lock); > - cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash); > - /* should always succeed */ > - g_assert(cnt); > - cnt->exec_count++; > - g_mutex_unlock(&lock); > + ExecCount *cnt = (ExecCount *)udata; > + qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count), > + cpu_index, 1); > } > > /* > @@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) > cnt->start_addr = pc; > cnt->trans_count = 1; > cnt->insns = insns; > + cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t)); > g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt); > } > > g_mutex_unlock(&lock); > > if (do_inline) { > - qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, > - &cnt->exec_count, 1); > + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( > + tb, QEMU_PLUGIN_INLINE_ADD_U64, > + qemu_plugin_scoreboard_u64(cnt->exec_count), 1); > } else { > qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, > QEMU_PLUGIN_CB_NO_REGS, > - (void *)hash); > + (void *)cnt); > } > } > > -- > 2.43.0 > > --
On 2/27/24 2:54 PM, Luc Michel wrote: > Hi Pierrick, > > On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: >> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >> --- >> contrib/plugins/hotblocks.c | 50 ++++++++++++++++++++++--------------- >> 1 file changed, 30 insertions(+), 20 deletions(-) >> >> diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c >> index 4de1b134944..02bc5078bdd 100644 >> --- a/contrib/plugins/hotblocks.c >> +++ b/contrib/plugins/hotblocks.c >> @@ -34,8 +34,8 @@ static guint64 limit = 20; >> */ >> typedef struct { >> uint64_t start_addr; >> - uint64_t exec_count; >> - int trans_count; >> + struct qemu_plugin_scoreboard *exec_count; >> + int trans_count; >> unsigned long insns; >> } ExecCount; >> >> @@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) >> { >> ExecCount *ea = (ExecCount *) a; >> ExecCount *eb = (ExecCount *) b; >> - return ea->exec_count > eb->exec_count ? -1 : 1; >> + uint64_t count_a = >> + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count)); >> + uint64_t count_b = >> + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count)); >> + return count_a > count_b ? -1 : 1; >> +} >> + >> +static void exec_count_free(gpointer key, gpointer value, gpointer user_data) >> +{ >> + ExecCount *cnt = value; >> + qemu_plugin_scoreboard_free(cnt->exec_count); >> } >> >> static void plugin_exit(qemu_plugin_id_t id, void *p) >> @@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) >> GList *counts, *it; >> int i; >> >> - g_mutex_lock(&lock); > > I encountered cases before where the vCPUs continue executing while > plugin_exit is called. This can happen e.g., when QEMU calls exit(3) > from one CPU thread. Others will continue to run at the same time the > atexit callbacks are called. > > This also means that you can't really free the resources as you do at > the end of plugin_exit. > Interesting... The current documentation [1] mentions it's the right place to free resources, and from what I saw, existing plugins assume this too (see contrib/plugins/cache.c for instance). There is probably a bug related to the case you mention, and I'll try to reproduce this, and see if we can have a proper fix. I'm not sure that removing cleanup code from existing plugins is the right thing to do at the moment, even though there is an existing issue with some programs. [1] https://www.qemu.org/docs/master/devel/tcg-plugins.html#c.qemu_plugin_register_atexit_cb >> g_string_append_printf(report, "%d entries in the hash table\n", >> g_hash_table_size(hotblocks)); >> counts = g_hash_table_get_values(hotblocks); >> @@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) >> >> for (i = 0; i < limit && it->next; i++, it = it->next) { >> ExecCount *rec = (ExecCount *) it->data; >> - g_string_append_printf(report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", >> - rec->start_addr, rec->trans_count, >> - rec->insns, rec->exec_count); >> + g_string_append_printf( >> + report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", >> + rec->start_addr, rec->trans_count, >> + rec->insns, >> + qemu_plugin_u64_sum( >> + qemu_plugin_scoreboard_u64(rec->exec_count))); >> } >> >> g_list_free(it); >> } >> - g_mutex_unlock(&lock); >> >> qemu_plugin_outs(report->str); >> + >> + g_hash_table_foreach(hotblocks, exec_count_free, NULL); >> + g_hash_table_destroy(hotblocks); >> } >> >> static void plugin_init(void) >> @@ -82,15 +96,9 @@ static void plugin_init(void) >> >> static void vcpu_tb_exec(unsigned int cpu_index, void *udata) >> { >> - ExecCount *cnt; >> - uint64_t hash = (uint64_t) udata; >> - >> - g_mutex_lock(&lock); >> - cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash); >> - /* should always succeed */ >> - g_assert(cnt); >> - cnt->exec_count++; >> - g_mutex_unlock(&lock); >> + ExecCount *cnt = (ExecCount *)udata; >> + qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count), >> + cpu_index, 1); >> } >> >> /* >> @@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) >> cnt->start_addr = pc; >> cnt->trans_count = 1; >> cnt->insns = insns; >> + cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t)); >> g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt); >> } >> >> g_mutex_unlock(&lock); >> >> if (do_inline) { >> - qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, >> - &cnt->exec_count, 1); >> + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( >> + tb, QEMU_PLUGIN_INLINE_ADD_U64, >> + qemu_plugin_scoreboard_u64(cnt->exec_count), 1); >> } else { >> qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, >> QEMU_PLUGIN_CB_NO_REGS, >> - (void *)hash); >> + (void *)cnt); >> } >> } >> >> -- >> 2.43.0 >> >> >
On 15:09 Tue 27 Feb , Pierrick Bouvier wrote: > On 2/27/24 2:54 PM, Luc Michel wrote: > > Hi Pierrick, > > > > On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: > > > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > > > --- > > > contrib/plugins/hotblocks.c | 50 ++++++++++++++++++++++--------------- > > > 1 file changed, 30 insertions(+), 20 deletions(-) > > > > > > diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c > > > index 4de1b134944..02bc5078bdd 100644 > > > --- a/contrib/plugins/hotblocks.c > > > +++ b/contrib/plugins/hotblocks.c > > > @@ -34,8 +34,8 @@ static guint64 limit = 20; > > > */ > > > typedef struct { > > > uint64_t start_addr; > > > - uint64_t exec_count; > > > - int trans_count; > > > + struct qemu_plugin_scoreboard *exec_count; > > > + int trans_count; > > > unsigned long insns; > > > } ExecCount; > > > > > > @@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) > > > { > > > ExecCount *ea = (ExecCount *) a; > > > ExecCount *eb = (ExecCount *) b; > > > - return ea->exec_count > eb->exec_count ? -1 : 1; > > > + uint64_t count_a = > > > + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count)); > > > + uint64_t count_b = > > > + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count)); > > > + return count_a > count_b ? -1 : 1; > > > +} > > > + > > > +static void exec_count_free(gpointer key, gpointer value, gpointer user_data) > > > +{ > > > + ExecCount *cnt = value; > > > + qemu_plugin_scoreboard_free(cnt->exec_count); > > > } > > > > > > static void plugin_exit(qemu_plugin_id_t id, void *p) > > > @@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) > > > GList *counts, *it; > > > int i; > > > > > > - g_mutex_lock(&lock); > > > > I encountered cases before where the vCPUs continue executing while > > plugin_exit is called. This can happen e.g., when QEMU calls exit(3) > > from one CPU thread. Others will continue to run at the same time the > > atexit callbacks are called. > > > > This also means that you can't really free the resources as you do at > > the end of plugin_exit. > > > > Interesting... > > The current documentation [1] mentions it's the right place to free > resources, and from what I saw, existing plugins assume this too (see > contrib/plugins/cache.c for instance). > > There is probably a bug related to the case you mention, and I'll try to > reproduce this, and see if we can have a proper fix. > > I'm not sure that removing cleanup code from existing plugins is the > right thing to do at the moment, even though there is an existing issue > with some programs. Yes absolutely. The problem is on the QEMU side. FWIW I tried to fix one of those exit cases (semihosted exit syscall) some time ago: https://lore.kernel.org/qemu-devel/20220621125916.25257-1-lmichel@kalray.eu/ IIRC Peter was not fundamentally against it. But then I was off for several months due to medical reasons. I can take some time to rebase it and address Peter's comments if there is interest to it. Luc > > [1] > https://www.qemu.org/docs/master/devel/tcg-plugins.html#c.qemu_plugin_register_atexit_cb > > > > g_string_append_printf(report, "%d entries in the hash table\n", > > > g_hash_table_size(hotblocks)); > > > counts = g_hash_table_get_values(hotblocks); > > > @@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) > > > > > > for (i = 0; i < limit && it->next; i++, it = it->next) { > > > ExecCount *rec = (ExecCount *) it->data; > > > - g_string_append_printf(report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", > > > - rec->start_addr, rec->trans_count, > > > - rec->insns, rec->exec_count); > > > + g_string_append_printf( > > > + report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", > > > + rec->start_addr, rec->trans_count, > > > + rec->insns, > > > + qemu_plugin_u64_sum( > > > + qemu_plugin_scoreboard_u64(rec->exec_count))); > > > } > > > > > > g_list_free(it); > > > } > > > - g_mutex_unlock(&lock); > > > > > > qemu_plugin_outs(report->str); > > > + > > > + g_hash_table_foreach(hotblocks, exec_count_free, NULL); > > > + g_hash_table_destroy(hotblocks); > > > } > > > > > > static void plugin_init(void) > > > @@ -82,15 +96,9 @@ static void plugin_init(void) > > > > > > static void vcpu_tb_exec(unsigned int cpu_index, void *udata) > > > { > > > - ExecCount *cnt; > > > - uint64_t hash = (uint64_t) udata; > > > - > > > - g_mutex_lock(&lock); > > > - cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash); > > > - /* should always succeed */ > > > - g_assert(cnt); > > > - cnt->exec_count++; > > > - g_mutex_unlock(&lock); > > > + ExecCount *cnt = (ExecCount *)udata; > > > + qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count), > > > + cpu_index, 1); > > > } > > > > > > /* > > > @@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) > > > cnt->start_addr = pc; > > > cnt->trans_count = 1; > > > cnt->insns = insns; > > > + cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t)); > > > g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt); > > > } > > > > > > g_mutex_unlock(&lock); > > > > > > if (do_inline) { > > > - qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, > > > - &cnt->exec_count, 1); > > > + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( > > > + tb, QEMU_PLUGIN_INLINE_ADD_U64, > > > + qemu_plugin_scoreboard_u64(cnt->exec_count), 1); > > > } else { > > > qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, > > > QEMU_PLUGIN_CB_NO_REGS, > > > - (void *)hash); > > > + (void *)cnt); > > > } > > > } > > > > > > -- > > > 2.43.0 > > > > > > > > --
Luc Michel <luc.michel@amd.com> writes: > On 15:09 Tue 27 Feb , Pierrick Bouvier wrote: >> On 2/27/24 2:54 PM, Luc Michel wrote: >> > Hi Pierrick, >> > >> > On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: >> > > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >> > > --- >> > > contrib/plugins/hotblocks.c | 50 ++++++++++++++++++++++--------------- >> > > 1 file changed, 30 insertions(+), 20 deletions(-) >> > > >> > > diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c >> > > index 4de1b134944..02bc5078bdd 100644 >> > > --- a/contrib/plugins/hotblocks.c >> > > +++ b/contrib/plugins/hotblocks.c >> > > @@ -34,8 +34,8 @@ static guint64 limit = 20; >> > > */ >> > > typedef struct { >> > > uint64_t start_addr; >> > > - uint64_t exec_count; >> > > - int trans_count; >> > > + struct qemu_plugin_scoreboard *exec_count; >> > > + int trans_count; >> > > unsigned long insns; >> > > } ExecCount; >> > > >> > > @@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) >> > > { >> > > ExecCount *ea = (ExecCount *) a; >> > > ExecCount *eb = (ExecCount *) b; >> > > - return ea->exec_count > eb->exec_count ? -1 : 1; >> > > + uint64_t count_a = >> > > + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count)); >> > > + uint64_t count_b = >> > > + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count)); >> > > + return count_a > count_b ? -1 : 1; >> > > +} >> > > + >> > > +static void exec_count_free(gpointer key, gpointer value, gpointer user_data) >> > > +{ >> > > + ExecCount *cnt = value; >> > > + qemu_plugin_scoreboard_free(cnt->exec_count); >> > > } >> > > >> > > static void plugin_exit(qemu_plugin_id_t id, void *p) >> > > @@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) >> > > GList *counts, *it; >> > > int i; >> > > >> > > - g_mutex_lock(&lock); >> > >> > I encountered cases before where the vCPUs continue executing while >> > plugin_exit is called. This can happen e.g., when QEMU calls exit(3) >> > from one CPU thread. Others will continue to run at the same time the >> > atexit callbacks are called. >> > >> > This also means that you can't really free the resources as you do at >> > the end of plugin_exit. >> > >> >> Interesting... >> >> The current documentation [1] mentions it's the right place to free >> resources, and from what I saw, existing plugins assume this too (see >> contrib/plugins/cache.c for instance). >> >> There is probably a bug related to the case you mention, and I'll try to >> reproduce this, and see if we can have a proper fix. >> >> I'm not sure that removing cleanup code from existing plugins is the >> right thing to do at the moment, even though there is an existing issue >> with some programs. > > Yes absolutely. The problem is on the QEMU side. FWIW I tried to fix one > of those exit cases (semihosted exit syscall) some time ago: > https://lore.kernel.org/qemu-devel/20220621125916.25257-1-lmichel@kalray.eu/ The plugin exit handler should flush all instrumented code: /* * Handle exit from linux-user. Unlike the normal atexit() mechanism * we need to handle the clean-up manually as it's possible threads * are still running. We need to remove all callbacks from code * generation, flush the current translations and then we can safely * trigger the exit callbacks. */ void qemu_plugin_user_exit(void) { enum qemu_plugin_event ev; CPUState *cpu; /* * Locking order: we must acquire locks in an order that is consistent * with the one in fork_start(). That is: * - start_exclusive(), which acquires qemu_cpu_list_lock, * must be called before acquiring plugin.lock. * - tb_flush(), which acquires mmap_lock(), must be called * while plugin.lock is not held. */ start_exclusive(); qemu_rec_mutex_lock(&plugin.lock); /* un-register all callbacks except the final AT_EXIT one */ for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) { if (ev != QEMU_PLUGIN_EV_ATEXIT) { struct qemu_plugin_cb *cb, *next; QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { plugin_unregister_cb__locked(cb->ctx, ev); } } } CPU_FOREACH(cpu) { qemu_plugin_disable_mem_helpers(cpu); } qemu_rec_mutex_unlock(&plugin.lock); tb_flush(current_cpu); end_exclusive(); /* now it's safe to handle the exit case */ qemu_plugin_atexit_cb(); } So you shouldn't see any other instrumentation activity after the end_exclusive. However you may see another thread hit its exist handler before we complete exiting the system. -- Alex Bennée Virtualisation Tech Lead @ Linaro
On 14:33 Thu 29 Feb , Alex Bennée wrote: > Caution: This message originated from an External Source. Use proper caution when opening attachments, clicking links, or responding. > > > Luc Michel <luc.michel@amd.com> writes: > > > On 15:09 Tue 27 Feb , Pierrick Bouvier wrote: > >> On 2/27/24 2:54 PM, Luc Michel wrote: > >> > Hi Pierrick, > >> > > >> > On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: > >> > > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > >> > > --- > >> > > contrib/plugins/hotblocks.c | 50 ++++++++++++++++++++++--------------- > >> > > 1 file changed, 30 insertions(+), 20 deletions(-) > >> > > > >> > > diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c > >> > > index 4de1b134944..02bc5078bdd 100644 > >> > > --- a/contrib/plugins/hotblocks.c > >> > > +++ b/contrib/plugins/hotblocks.c > >> > > @@ -34,8 +34,8 @@ static guint64 limit = 20; > >> > > */ > >> > > typedef struct { > >> > > uint64_t start_addr; > >> > > - uint64_t exec_count; > >> > > - int trans_count; > >> > > + struct qemu_plugin_scoreboard *exec_count; > >> > > + int trans_count; > >> > > unsigned long insns; > >> > > } ExecCount; > >> > > > >> > > @@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) > >> > > { > >> > > ExecCount *ea = (ExecCount *) a; > >> > > ExecCount *eb = (ExecCount *) b; > >> > > - return ea->exec_count > eb->exec_count ? -1 : 1; > >> > > + uint64_t count_a = > >> > > + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count)); > >> > > + uint64_t count_b = > >> > > + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count)); > >> > > + return count_a > count_b ? -1 : 1; > >> > > +} > >> > > + > >> > > +static void exec_count_free(gpointer key, gpointer value, gpointer user_data) > >> > > +{ > >> > > + ExecCount *cnt = value; > >> > > + qemu_plugin_scoreboard_free(cnt->exec_count); > >> > > } > >> > > > >> > > static void plugin_exit(qemu_plugin_id_t id, void *p) > >> > > @@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) > >> > > GList *counts, *it; > >> > > int i; > >> > > > >> > > - g_mutex_lock(&lock); > >> > > >> > I encountered cases before where the vCPUs continue executing while > >> > plugin_exit is called. This can happen e.g., when QEMU calls exit(3) > >> > from one CPU thread. Others will continue to run at the same time the > >> > atexit callbacks are called. > >> > > >> > This also means that you can't really free the resources as you do at > >> > the end of plugin_exit. > >> > > >> > >> Interesting... > >> > >> The current documentation [1] mentions it's the right place to free > >> resources, and from what I saw, existing plugins assume this too (see > >> contrib/plugins/cache.c for instance). > >> > >> There is probably a bug related to the case you mention, and I'll try to > >> reproduce this, and see if we can have a proper fix. > >> > >> I'm not sure that removing cleanup code from existing plugins is the > >> right thing to do at the moment, even though there is an existing issue > >> with some programs. > > > > Yes absolutely. The problem is on the QEMU side. FWIW I tried to fix one > > of those exit cases (semihosted exit syscall) some time ago: > > https://lore.kernel.org/qemu-devel/20220621125916.25257-1-lmichel@kalray.eu/ > > The plugin exit handler should flush all instrumented code: > > /* > * Handle exit from linux-user. Unlike the normal atexit() mechanism > * we need to handle the clean-up manually as it's possible threads > * are still running. We need to remove all callbacks from code > * generation, flush the current translations and then we can safely > * trigger the exit callbacks. > */ > > void qemu_plugin_user_exit(void) > { > enum qemu_plugin_event ev; > CPUState *cpu; > > /* > * Locking order: we must acquire locks in an order that is consistent > * with the one in fork_start(). That is: > * - start_exclusive(), which acquires qemu_cpu_list_lock, > * must be called before acquiring plugin.lock. > * - tb_flush(), which acquires mmap_lock(), must be called > * while plugin.lock is not held. > */ > start_exclusive(); > > qemu_rec_mutex_lock(&plugin.lock); > /* un-register all callbacks except the final AT_EXIT one */ > for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) { > if (ev != QEMU_PLUGIN_EV_ATEXIT) { > struct qemu_plugin_cb *cb, *next; > > QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { > plugin_unregister_cb__locked(cb->ctx, ev); > } > } > } > CPU_FOREACH(cpu) { > qemu_plugin_disable_mem_helpers(cpu); > } > qemu_rec_mutex_unlock(&plugin.lock); > > tb_flush(current_cpu); > end_exclusive(); > > /* now it's safe to handle the exit case */ > qemu_plugin_atexit_cb(); > } > > So you shouldn't see any other instrumentation activity after the > end_exclusive. However you may see another thread hit its exist handler > before we complete exiting the system. OK I was not aware of this. Anyway I encountered those issue a long time ago. It's probably not relevant anymore. Thanks -- Luc > > -- > Alex Bennée > Virtualisation Tech Lead @ Linaro --
On 2/27/24 6:26 PM, Luc Michel wrote: > On 15:09 Tue 27 Feb , Pierrick Bouvier wrote: >> On 2/27/24 2:54 PM, Luc Michel wrote: >>> Hi Pierrick, >>> >>> On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: >>>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >>>> --- >>>> contrib/plugins/hotblocks.c | 50 ++++++++++++++++++++++--------------- >>>> 1 file changed, 30 insertions(+), 20 deletions(-) >>>> >>>> diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c >>>> index 4de1b134944..02bc5078bdd 100644 >>>> --- a/contrib/plugins/hotblocks.c >>>> +++ b/contrib/plugins/hotblocks.c >>>> @@ -34,8 +34,8 @@ static guint64 limit = 20; >>>> */ >>>> typedef struct { >>>> uint64_t start_addr; >>>> - uint64_t exec_count; >>>> - int trans_count; >>>> + struct qemu_plugin_scoreboard *exec_count; >>>> + int trans_count; >>>> unsigned long insns; >>>> } ExecCount; >>>> >>>> @@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) >>>> { >>>> ExecCount *ea = (ExecCount *) a; >>>> ExecCount *eb = (ExecCount *) b; >>>> - return ea->exec_count > eb->exec_count ? -1 : 1; >>>> + uint64_t count_a = >>>> + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count)); >>>> + uint64_t count_b = >>>> + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count)); >>>> + return count_a > count_b ? -1 : 1; >>>> +} >>>> + >>>> +static void exec_count_free(gpointer key, gpointer value, gpointer user_data) >>>> +{ >>>> + ExecCount *cnt = value; >>>> + qemu_plugin_scoreboard_free(cnt->exec_count); >>>> } >>>> >>>> static void plugin_exit(qemu_plugin_id_t id, void *p) >>>> @@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) >>>> GList *counts, *it; >>>> int i; >>>> >>>> - g_mutex_lock(&lock); >>> >>> I encountered cases before where the vCPUs continue executing while >>> plugin_exit is called. This can happen e.g., when QEMU calls exit(3) >>> from one CPU thread. Others will continue to run at the same time the >>> atexit callbacks are called. >>> >>> This also means that you can't really free the resources as you do at >>> the end of plugin_exit. >>> >> >> Interesting... >> >> The current documentation [1] mentions it's the right place to free >> resources, and from what I saw, existing plugins assume this too (see >> contrib/plugins/cache.c for instance). >> >> There is probably a bug related to the case you mention, and I'll try to >> reproduce this, and see if we can have a proper fix. >> >> I'm not sure that removing cleanup code from existing plugins is the >> right thing to do at the moment, even though there is an existing issue >> with some programs. > > Yes absolutely. The problem is on the QEMU side. FWIW I tried to fix one > of those exit cases (semihosted exit syscall) some time ago: > https://lore.kernel.org/qemu-devel/20220621125916.25257-1-lmichel@kalray.eu/ > > IIRC Peter was not fundamentally against it. But then I was off for > several months due to medical reasons. > > I can take some time to rebase it and address Peter's comments if there > is interest to it. > It could be well appreciated, and definitely worth fixing! > Luc > >> >> [1] >> https://www.qemu.org/docs/master/devel/tcg-plugins.html#c.qemu_plugin_register_atexit_cb >> >>>> g_string_append_printf(report, "%d entries in the hash table\n", >>>> g_hash_table_size(hotblocks)); >>>> counts = g_hash_table_get_values(hotblocks); >>>> @@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) >>>> >>>> for (i = 0; i < limit && it->next; i++, it = it->next) { >>>> ExecCount *rec = (ExecCount *) it->data; >>>> - g_string_append_printf(report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", >>>> - rec->start_addr, rec->trans_count, >>>> - rec->insns, rec->exec_count); >>>> + g_string_append_printf( >>>> + report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n", >>>> + rec->start_addr, rec->trans_count, >>>> + rec->insns, >>>> + qemu_plugin_u64_sum( >>>> + qemu_plugin_scoreboard_u64(rec->exec_count))); >>>> } >>>> >>>> g_list_free(it); >>>> } >>>> - g_mutex_unlock(&lock); >>>> >>>> qemu_plugin_outs(report->str); >>>> + >>>> + g_hash_table_foreach(hotblocks, exec_count_free, NULL); >>>> + g_hash_table_destroy(hotblocks); >>>> } >>>> >>>> static void plugin_init(void) >>>> @@ -82,15 +96,9 @@ static void plugin_init(void) >>>> >>>> static void vcpu_tb_exec(unsigned int cpu_index, void *udata) >>>> { >>>> - ExecCount *cnt; >>>> - uint64_t hash = (uint64_t) udata; >>>> - >>>> - g_mutex_lock(&lock); >>>> - cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash); >>>> - /* should always succeed */ >>>> - g_assert(cnt); >>>> - cnt->exec_count++; >>>> - g_mutex_unlock(&lock); >>>> + ExecCount *cnt = (ExecCount *)udata; >>>> + qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count), >>>> + cpu_index, 1); >>>> } >>>> >>>> /* >>>> @@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) >>>> cnt->start_addr = pc; >>>> cnt->trans_count = 1; >>>> cnt->insns = insns; >>>> + cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t)); >>>> g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt); >>>> } >>>> >>>> g_mutex_unlock(&lock); >>>> >>>> if (do_inline) { >>>> - qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64, >>>> - &cnt->exec_count, 1); >>>> + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( >>>> + tb, QEMU_PLUGIN_INLINE_ADD_U64, >>>> + qemu_plugin_scoreboard_u64(cnt->exec_count), 1); >>>> } else { >>>> qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, >>>> QEMU_PLUGIN_CB_NO_REGS, >>>> - (void *)hash); >>>> + (void *)cnt); >>>> } >>>> } >>>> >>>> -- >>>> 2.43.0 >>>> >>>> >>> >
© 2016 - 2024 Red Hat, Inc.