From: alloc <[email protected]>
Timeout functions like usleep can return early on signal, which reduces
more dirty pages than expected. In dirtylimit case, dirtyrate meter
thread needs to kick all vcpus out to sync. The callchain:
vcpu_calculate_dirtyrate
global_dirty_log_sync
memory_global_dirty_log_sync
kvm_log_sync_global
kvm_dirty_ring_flush
kvm_cpu_synchronize_kick_all <---- send vcpu signal
For long time sleep, use qemu_cond_timedwait_iothread to handle cpu stop
event.
Signed-off-by: alloc <[email protected]>
---
softmmu/dirtylimit.c | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index fa959d7743..ee938c636d 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -411,13 +411,28 @@ void dirtylimit_set_all(uint64_t quota,
void dirtylimit_vcpu_execute(CPUState *cpu)
{
+ int64_t sleep_us, endtime_us;
+
+ dirtylimit_state_lock();
if (dirtylimit_in_service() &&
dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled &&
cpu->throttle_us_per_full) {
trace_dirtylimit_vcpu_execute(cpu->cpu_index,
cpu->throttle_us_per_full);
- usleep(cpu->throttle_us_per_full);
- }
+ sleep_us = cpu->throttle_us_per_full;
+ dirtylimit_state_unlock();
+ endtime_us = qemu_clock_get_us(QEMU_CLOCK_REALTIME) + sleep_us;
+ while (sleep_us > 0 && !cpu->stop) {
+ if (sleep_us > SCALE_US) {
+ qemu_mutex_lock_iothread();
+ qemu_cond_timedwait_iothread(cpu->halt_cond, sleep_us /
SCALE_US);
+ qemu_mutex_unlock_iothread();
+ } else
+ g_usleep(sleep_us);
+ sleep_us = endtime_us - qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+ }
+ } else
+ dirtylimit_state_unlock();
}
static void dirtylimit_init(void)
--
2.39.3