Missing description:

  Similarly to 1d78a3c3ab8 for KVM, wrap hv_vcpu_run() with
  cpu_exec_start/end(), so that the accelerator can perform
  pending operations while all vCPUs are quiescent. See also
  explanation in commit c265e976f46 ("cpus-common: lock-free
  fast path for cpu_exec_start/end").

On 3/9/25 12:06, Philippe Mathieu-Daudé wrote:
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
---
  target/arm/hvf/hvf.c  | 2 ++
  target/i386/hvf/hvf.c | 4 ++++
  2 files changed, 6 insertions(+)

diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index b60efdc1769..40ec930d244 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -2154,7 +2154,9 @@ int hvf_arch_vcpu_exec(CPUState *cpu)
          flush_cpu_state(cpu);
bql_unlock();
+        cpu_exec_start(cpu);
          r = hv_vcpu_run(cpu->accel->fd);
+        cpu_exec_end(cpu);
          bql_lock();
          switch (r) {
          case HV_SUCCESS:
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index 73c50175048..cb4af70e91d 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -749,9 +749,13 @@ int hvf_arch_vcpu_exec(CPUState *cpu)
              return EXCP_HLT;
          }
+ cpu_exec_start(cpu);
+
          hv_return_t r = hv_vcpu_run_until(cpu->accel->fd, 
HV_DEADLINE_FOREVER);
          assert_hvf_ok(r);
+ cpu_exec_end(cpu);
+
          /* handle VMEXIT */
          uint64_t exit_reason = rvmcs(cpu->accel->fd, VMCS_EXIT_REASON);
          uint64_t exit_qual = rvmcs(cpu->accel->fd, VMCS_EXIT_QUALIFICATION);


Reply via email to