From: "Emilio G. Cota" <c...@braap.org> Suggested-by: Alex Bennée <alex.ben...@linaro.org> Reviewed-by: Alex Bennée <alex.ben...@linaro.org> Signed-off-by: Emilio G. Cota <c...@braap.org> [AJB: moved inside start/end_exclusive fns + cleanup] Signed-off-by: Alex Bennée <alex.ben...@linaro.org>
--- v4 - -> cpu_in_exclusive_context - moved inside start/end exclusive - fixed up cpu_exec_step_atomic --- accel/tcg/cpu-exec.c | 5 +---- cpus-common.c | 4 ++++ include/qom/cpu.h | 13 +++++++++++++ 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 6c85c3ee1e9..ab9dfd4f908 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -239,8 +239,6 @@ void cpu_exec_step_atomic(CPUState *cpu) uint32_t flags; uint32_t cflags = 1; uint32_t cf_mask = cflags & CF_HASH_MASK; - /* volatile because we modify it between setjmp and longjmp */ - volatile bool in_exclusive_region = false; if (sigsetjmp(cpu->jmp_env, 0) == 0) { tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); @@ -254,7 +252,6 @@ void cpu_exec_step_atomic(CPUState *cpu) /* Since we got here, we know that parallel_cpus must be true. */ parallel_cpus = false; - in_exclusive_region = true; cc->cpu_exec_enter(cpu); /* execute the generated code */ trace_exec_tb(tb, pc); @@ -274,7 +271,7 @@ void cpu_exec_step_atomic(CPUState *cpu) assert_no_pages_locked(); } - if (in_exclusive_region) { + if (cpu_in_exclusive_context(cpu)) { /* We might longjump out of either the codegen or the * execution, so must make sure we only end the exclusive * region if we started it. diff --git a/cpus-common.c b/cpus-common.c index 3ca58c64e80..e87400834be 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -208,11 +208,15 @@ void start_exclusive(void) * section until end_exclusive resets pending_cpus to 0. */ qemu_mutex_unlock(&qemu_cpu_list_lock); + + current_cpu->in_exclusive_context = true; } /* Finish an exclusive operation. */ void end_exclusive(void) { + current_cpu->in_exclusive_context = false; + qemu_mutex_lock(&qemu_cpu_list_lock); atomic_set(&pending_cpus, 0); qemu_cond_broadcast(&exclusive_resume); diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 5ee0046b629..65a0926c49a 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -373,6 +373,7 @@ struct CPUState { bool unplug; bool crash_occurred; bool exit_request; + bool in_exclusive_context; uint32_t cflags_next_tb; /* updates protected by BQL */ uint32_t interrupt_request; @@ -785,6 +786,18 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) */ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); +/** + * cpu_in_exclusive_context() + * @cpu: The vCPU to check + * + * Returns true if @cpu is an exclusive context, for example running + * something which has previously been queued via async_safe_run_on_cpu(). + */ +static inline bool cpu_in_exclusive_context(const CPUState *cpu) +{ + return cpu->in_exclusive_context; +} + /** * qemu_get_cpu: * @index: The CPUState@cpu_index value of the CPU to obtain. -- 2.20.1