target/riscv/debug.h | 3 ++- target/riscv/cpu.c | 8 +++++++- target/riscv/debug.c | 15 ++++++++++++--- 3 files changed, 21 insertions(+), 5 deletions(-)
riscv_trigger_init() had been called on reset events that can happen
several times for a CPU and it allocated timers for itrigger. If old
timers were present, they were simply overwritten by the new timers,
resulting in a memory leak.
Divide riscv_trigger_init() into two functions, namely
riscv_trigger_realize() and riscv_trigger_reset() and call them in
appropriate timing. The timer allocation will happen only once for a
CPU in riscv_trigger_realize().
Fixes: 5a4ae64cac ("target/riscv: Add itrigger support when icount is enabled")
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
---
target/riscv/debug.h | 3 ++-
target/riscv/cpu.c | 8 +++++++-
target/riscv/debug.c | 15 ++++++++++++---
3 files changed, 21 insertions(+), 5 deletions(-)
diff --git a/target/riscv/debug.h b/target/riscv/debug.h
index c471748d5a..7edc31e7cc 100644
--- a/target/riscv/debug.h
+++ b/target/riscv/debug.h
@@ -143,7 +143,8 @@ void riscv_cpu_debug_excp_handler(CPUState *cs);
bool riscv_cpu_debug_check_breakpoint(CPUState *cs);
bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
-void riscv_trigger_init(CPURISCVState *env);
+void riscv_trigger_realize(CPURISCVState *env);
+void riscv_trigger_reset(CPURISCVState *env);
bool riscv_itrigger_enabled(CPURISCVState *env);
void riscv_itrigger_update_priv(CPURISCVState *env);
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index e12b6ef7f6..3bc3f96a58 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -904,7 +904,7 @@ static void riscv_cpu_reset_hold(Object *obj)
#ifndef CONFIG_USER_ONLY
if (cpu->cfg.debug) {
- riscv_trigger_init(env);
+ riscv_trigger_reset(env);
}
if (kvm_enabled()) {
@@ -1475,6 +1475,12 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
riscv_cpu_register_gdb_regs_for_features(cs);
+#ifndef CONFIG_USER_ONLY
+ if (cpu->cfg.debug) {
+ riscv_trigger_realize(&cpu->env);
+ }
+#endif
+
qemu_init_vcpu(cs);
cpu_reset(cs);
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
index 75ee1c4971..1c44403205 100644
--- a/target/riscv/debug.c
+++ b/target/riscv/debug.c
@@ -903,7 +903,17 @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
return false;
}
-void riscv_trigger_init(CPURISCVState *env)
+void riscv_trigger_realize(CPURISCVState *env)
+{
+ int i;
+
+ for (i = 0; i < RV_MAX_TRIGGERS; i++) {
+ env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ riscv_itrigger_timer_cb, env);
+ }
+}
+
+void riscv_trigger_reset(CPURISCVState *env)
{
target_ulong tdata1 = build_tdata1(env, TRIGGER_TYPE_AD_MATCH, 0, 0);
int i;
@@ -928,7 +938,6 @@ void riscv_trigger_init(CPURISCVState *env)
env->tdata3[i] = 0;
env->cpu_breakpoint[i] = NULL;
env->cpu_watchpoint[i] = NULL;
- env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
- riscv_itrigger_timer_cb, env);
+ timer_del(env->itrigger_timer[i]);
}
}
--
2.41.0
On Thu, Aug 17, 2023 at 2:28 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> riscv_trigger_init() had been called on reset events that can happen
> several times for a CPU and it allocated timers for itrigger. If old
> timers were present, they were simply overwritten by the new timers,
> resulting in a memory leak.
>
> Divide riscv_trigger_init() into two functions, namely
> riscv_trigger_realize() and riscv_trigger_reset() and call them in
> appropriate timing. The timer allocation will happen only once for a
> CPU in riscv_trigger_realize().
>
> Fixes: 5a4ae64cac ("target/riscv: Add itrigger support when icount is enabled")
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/debug.h | 3 ++-
> target/riscv/cpu.c | 8 +++++++-
> target/riscv/debug.c | 15 ++++++++++++---
> 3 files changed, 21 insertions(+), 5 deletions(-)
>
> diff --git a/target/riscv/debug.h b/target/riscv/debug.h
> index c471748d5a..7edc31e7cc 100644
> --- a/target/riscv/debug.h
> +++ b/target/riscv/debug.h
> @@ -143,7 +143,8 @@ void riscv_cpu_debug_excp_handler(CPUState *cs);
> bool riscv_cpu_debug_check_breakpoint(CPUState *cs);
> bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
>
> -void riscv_trigger_init(CPURISCVState *env);
> +void riscv_trigger_realize(CPURISCVState *env);
> +void riscv_trigger_reset(CPURISCVState *env);
>
> bool riscv_itrigger_enabled(CPURISCVState *env);
> void riscv_itrigger_update_priv(CPURISCVState *env);
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index e12b6ef7f6..3bc3f96a58 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -904,7 +904,7 @@ static void riscv_cpu_reset_hold(Object *obj)
>
> #ifndef CONFIG_USER_ONLY
> if (cpu->cfg.debug) {
> - riscv_trigger_init(env);
> + riscv_trigger_reset(env);
> }
>
> if (kvm_enabled()) {
> @@ -1475,6 +1475,12 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
>
> riscv_cpu_register_gdb_regs_for_features(cs);
>
> +#ifndef CONFIG_USER_ONLY
> + if (cpu->cfg.debug) {
> + riscv_trigger_realize(&cpu->env);
> + }
> +#endif
> +
> qemu_init_vcpu(cs);
> cpu_reset(cs);
>
> diff --git a/target/riscv/debug.c b/target/riscv/debug.c
> index 75ee1c4971..1c44403205 100644
> --- a/target/riscv/debug.c
> +++ b/target/riscv/debug.c
> @@ -903,7 +903,17 @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
> return false;
> }
>
> -void riscv_trigger_init(CPURISCVState *env)
> +void riscv_trigger_realize(CPURISCVState *env)
> +{
> + int i;
> +
> + for (i = 0; i < RV_MAX_TRIGGERS; i++) {
> + env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> + riscv_itrigger_timer_cb, env);
> + }
> +}
> +
> +void riscv_trigger_reset(CPURISCVState *env)
> {
> target_ulong tdata1 = build_tdata1(env, TRIGGER_TYPE_AD_MATCH, 0, 0);
> int i;
> @@ -928,7 +938,6 @@ void riscv_trigger_init(CPURISCVState *env)
> env->tdata3[i] = 0;
> env->cpu_breakpoint[i] = NULL;
> env->cpu_watchpoint[i] = NULL;
> - env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> - riscv_itrigger_timer_cb, env);
> + timer_del(env->itrigger_timer[i]);
> }
> }
> --
> 2.41.0
>
>
On 2023/8/17 0:27, Akihiko Odaki wrote:
> riscv_trigger_init() had been called on reset events that can happen
> several times for a CPU and it allocated timers for itrigger. If old
> timers were present, they were simply overwritten by the new timers,
> resulting in a memory leak.
>
> Divide riscv_trigger_init() into two functions, namely
> riscv_trigger_realize() and riscv_trigger_reset() and call them in
> appropriate timing. The timer allocation will happen only once for a
> CPU in riscv_trigger_realize().
>
> Fixes: 5a4ae64cac ("target/riscv: Add itrigger support when icount is enabled")
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> ---
> target/riscv/debug.h | 3 ++-
> target/riscv/cpu.c | 8 +++++++-
> target/riscv/debug.c | 15 ++++++++++++---
> 3 files changed, 21 insertions(+), 5 deletions(-)
>
> diff --git a/target/riscv/debug.h b/target/riscv/debug.h
> index c471748d5a..7edc31e7cc 100644
> --- a/target/riscv/debug.h
> +++ b/target/riscv/debug.h
> @@ -143,7 +143,8 @@ void riscv_cpu_debug_excp_handler(CPUState *cs);
> bool riscv_cpu_debug_check_breakpoint(CPUState *cs);
> bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
>
> -void riscv_trigger_init(CPURISCVState *env);
> +void riscv_trigger_realize(CPURISCVState *env);
> +void riscv_trigger_reset(CPURISCVState *env);
>
> bool riscv_itrigger_enabled(CPURISCVState *env);
> void riscv_itrigger_update_priv(CPURISCVState *env);
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index e12b6ef7f6..3bc3f96a58 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -904,7 +904,7 @@ static void riscv_cpu_reset_hold(Object *obj)
>
> #ifndef CONFIG_USER_ONLY
> if (cpu->cfg.debug) {
> - riscv_trigger_init(env);
> + riscv_trigger_reset(env);
> }
>
> if (kvm_enabled()) {
> @@ -1475,6 +1475,12 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
>
> riscv_cpu_register_gdb_regs_for_features(cs);
>
> +#ifndef CONFIG_USER_ONLY
> + if (cpu->cfg.debug) {
> + riscv_trigger_realize(&cpu->env);
> + }
> +#endif
> +
> qemu_init_vcpu(cs);
> cpu_reset(cs);
>
> diff --git a/target/riscv/debug.c b/target/riscv/debug.c
> index 75ee1c4971..1c44403205 100644
> --- a/target/riscv/debug.c
> +++ b/target/riscv/debug.c
> @@ -903,7 +903,17 @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
> return false;
> }
>
> -void riscv_trigger_init(CPURISCVState *env)
> +void riscv_trigger_realize(CPURISCVState *env)
> +{
> + int i;
> +
> + for (i = 0; i < RV_MAX_TRIGGERS; i++) {
> + env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> + riscv_itrigger_timer_cb, env);
> + }
> +}
> +
> +void riscv_trigger_reset(CPURISCVState *env)
> {
> target_ulong tdata1 = build_tdata1(env, TRIGGER_TYPE_AD_MATCH, 0, 0);
> int i;
> @@ -928,7 +938,6 @@ void riscv_trigger_init(CPURISCVState *env)
> env->tdata3[i] = 0;
> env->cpu_breakpoint[i] = NULL;
> env->cpu_watchpoint[i] = NULL;
> - env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> - riscv_itrigger_timer_cb, env);
> + timer_del(env->itrigger_timer[i]);
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
Zhiwei
> }
> }
On 16/8/23 18:27, Akihiko Odaki wrote:
> riscv_trigger_init() had been called on reset events that can happen
> several times for a CPU and it allocated timers for itrigger. If old
> timers were present, they were simply overwritten by the new timers,
> resulting in a memory leak.
>
> Divide riscv_trigger_init() into two functions, namely
> riscv_trigger_realize() and riscv_trigger_reset() and call them in
> appropriate timing. The timer allocation will happen only once for a
> CPU in riscv_trigger_realize().
>
> Fixes: 5a4ae64cac ("target/riscv: Add itrigger support when icount is enabled")
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> ---
> target/riscv/debug.h | 3 ++-
> target/riscv/cpu.c | 8 +++++++-
> target/riscv/debug.c | 15 ++++++++++++---
> 3 files changed, 21 insertions(+), 5 deletions(-)
>
> diff --git a/target/riscv/debug.h b/target/riscv/debug.h
> index c471748d5a..7edc31e7cc 100644
> --- a/target/riscv/debug.h
> +++ b/target/riscv/debug.h
> @@ -143,7 +143,8 @@ void riscv_cpu_debug_excp_handler(CPUState *cs);
> bool riscv_cpu_debug_check_breakpoint(CPUState *cs);
> bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
>
> -void riscv_trigger_init(CPURISCVState *env);
> +void riscv_trigger_realize(CPURISCVState *env);
> +void riscv_trigger_reset(CPURISCVState *env);
>
> bool riscv_itrigger_enabled(CPURISCVState *env);
> void riscv_itrigger_update_priv(CPURISCVState *env);
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index e12b6ef7f6..3bc3f96a58 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -904,7 +904,7 @@ static void riscv_cpu_reset_hold(Object *obj)
>
> #ifndef CONFIG_USER_ONLY
> if (cpu->cfg.debug) {
> - riscv_trigger_init(env);
> + riscv_trigger_reset(env);
Maybe name _reset_hold()? Otherwise:
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> }
© 2016 - 2026 Red Hat, Inc.