Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-common.h | 1 -
accel/tcg/cpu-exec-common.c | 30 ------------------------------
2 files changed, 31 deletions(-)
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 87dc9a752c..41788c0bdd 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -133,7 +133,6 @@ static inline void cpu_physical_memory_write(hwaddr addr,
{
cpu_physical_memory_rw(addr, (void *)buf, len, true);
}
-void cpu_reloading_memory_map(void);
void *cpu_physical_memory_map(hwaddr addr,
hwaddr *plen,
bool is_write);
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
index 9a5fabf625..7e35d7f4b5 100644
--- a/accel/tcg/cpu-exec-common.c
+++ b/accel/tcg/cpu-exec-common.c
@@ -33,36 +33,6 @@ void cpu_loop_exit_noexc(CPUState *cpu)
cpu_loop_exit(cpu);
}
-#if defined(CONFIG_SOFTMMU)
-void cpu_reloading_memory_map(void)
-{
- if (qemu_in_vcpu_thread() && current_cpu->running) {
- /* The guest can in theory prolong the RCU critical section as long
- * as it feels like. The major problem with this is that because it
- * can do multiple reconfigurations of the memory map within the
- * critical section, we could potentially accumulate an unbounded
- * collection of memory data structures awaiting reclamation.
- *
- * Because the only thing we're currently protecting with RCU is the
- * memory data structures, it's sufficient to break the critical section
- * in this callback, which we know will get called every time the
- * memory map is rearranged.
- *
- * (If we add anything else in the system that uses RCU to protect
- * its data structures, we will need to implement some other mechanism
- * to force TCG CPUs to exit the critical section, at which point this
- * part of this callback might become unnecessary.)
- *
- * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
- * only protects cpu->as->dispatch. Since we know our caller is about
- * to reload it, it's safe to split the critical section.
- */
- rcu_read_unlock();
- rcu_read_lock();
- }
-}
-#endif
-
void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */
--
2.34.1
Richard Henderson <richard.henderson@linaro.org> writes: > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> This is just cleanup, I think it should be merged with 2/3 > --- > include/exec/cpu-common.h | 1 - > accel/tcg/cpu-exec-common.c | 30 ------------------------------ > 2 files changed, 31 deletions(-) > > diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h > index 87dc9a752c..41788c0bdd 100644 > --- a/include/exec/cpu-common.h > +++ b/include/exec/cpu-common.h > @@ -133,7 +133,6 @@ static inline void cpu_physical_memory_write(hwaddr addr, > { > cpu_physical_memory_rw(addr, (void *)buf, len, true); > } > -void cpu_reloading_memory_map(void); > void *cpu_physical_memory_map(hwaddr addr, > hwaddr *plen, > bool is_write); > diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c > index 9a5fabf625..7e35d7f4b5 100644 > --- a/accel/tcg/cpu-exec-common.c > +++ b/accel/tcg/cpu-exec-common.c > @@ -33,36 +33,6 @@ void cpu_loop_exit_noexc(CPUState *cpu) > cpu_loop_exit(cpu); > } > > -#if defined(CONFIG_SOFTMMU) > -void cpu_reloading_memory_map(void) > -{ > - if (qemu_in_vcpu_thread() && current_cpu->running) { > - /* The guest can in theory prolong the RCU critical section as long > - * as it feels like. The major problem with this is that because it > - * can do multiple reconfigurations of the memory map within the > - * critical section, we could potentially accumulate an unbounded > - * collection of memory data structures awaiting reclamation. > - * > - * Because the only thing we're currently protecting with RCU is the > - * memory data structures, it's sufficient to break the critical section > - * in this callback, which we know will get called every time the > - * memory map is rearranged. > - * > - * (If we add anything else in the system that uses RCU to protect > - * its data structures, we will need to implement some other mechanism > - * to force TCG CPUs to exit the critical section, at which point this > - * part of this callback might become unnecessary.) > - * > - * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which > - * only protects cpu->as->dispatch. Since we know our caller is about > - * to reload it, it's safe to split the critical section. > - */ > - rcu_read_unlock(); > - rcu_read_lock(); > - } > -} > -#endif > - > void cpu_loop_exit(CPUState *cpu) > { > /* Undo the setting in cpu_tb_exec. */ -- Alex Bennée Virtualisation Tech Lead @ Linaro
© 2016 - 2024 Red Hat, Inc.