Right now downtime_start is stored in MigrationState. In preparation to having more downtime timestamps during switchover, move downtime_start to an array namely, @timestamp.
Add a setter/getter surrounding which timestamps to record, to make it easier to spread to various migration functions. Signed-off-by: Joao Martins <[email protected]> --- qapi/migration.json | 14 ++++++++++++++ migration/migration.h | 7 +++++-- migration/migration.c | 24 ++++++++++++++++++++---- 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/qapi/migration.json b/qapi/migration.json index 8843e74b59c7..b836cc881d33 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -190,6 +190,20 @@ { 'struct': 'VfioStats', 'data': {'transferred': 'int' } } +## +# @MigrationDowntime: +# +# An enumeration of downtime timestamps for all +# steps of the switchover. +# +# @start:Timestamp taken at the start of the switchover right before +# we stop the VM. +# +# Since: 8.2 +## +{ 'enum': 'MigrationDowntime', + 'data': [ 'start' ] } + ## # @MigrationInfo: # diff --git a/migration/migration.h b/migration/migration.h index c390500604b6..180dc31c5306 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -319,8 +319,8 @@ struct MigrationState { int64_t start_time; /* Total time used by latest migration (ms) */ int64_t total_time; - /* Timestamp when VM is down (ms) to migrate the last stuff */ - int64_t downtime_start; + /* Timestamps e.g. when VM is down (ms) to migrate the last stuff */ + int64_t timestamp[MIGRATION_DOWNTIME__MAX]; int64_t downtime; int64_t expected_downtime; bool capabilities[MIGRATION_CAPABILITY__MAX]; @@ -516,4 +516,7 @@ void migration_populate_vfio_info(MigrationInfo *info); void migration_reset_vfio_bytes_transferred(void); void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page); +void migration_set_timestamp(MigrationDowntime tm); +int64_t migration_get_timestamp(MigrationDowntime tm); + #endif diff --git a/migration/migration.c b/migration/migration.c index d61e5727429a..dd955c61acc7 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -2312,6 +2312,21 @@ static int migration_maybe_pause(MigrationState *s, return s->state == new_state ? 0 : -EINVAL; } +void migration_set_timestamp(MigrationDowntime type) +{ + MigrationState *s = migrate_get_current(); + + s->timestamp[type] = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); +} + +int64_t migration_get_timestamp(MigrationDowntime type) +{ + MigrationState *s = migrate_get_current(); + + return s->timestamp[type]; +} + + /** * migration_completion: Used by migration_thread when there's not much left. * The caller 'breaks' the loop when this returns. @@ -2325,7 +2340,7 @@ static void migration_completion(MigrationState *s) if (s->state == MIGRATION_STATUS_ACTIVE) { qemu_mutex_lock_iothread(); - s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + migration_set_timestamp(MIGRATION_DOWNTIME_START); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); s->vm_old_state = runstate_get(); @@ -2670,7 +2685,7 @@ static void migration_calculate_complete(MigrationState *s) * It's still not set, so we are precopy migration. For * postcopy, downtime is calculated during postcopy_start(). */ - s->downtime = end_time - s->downtime_start; + s->downtime = end_time - migration_get_timestamp(MIGRATION_DOWNTIME_START); } transfer_time = s->total_time - s->setup_time; @@ -3069,7 +3084,8 @@ static void bg_migration_vm_start_bh(void *opaque) s->vm_start_bh = NULL; vm_start(); - s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start; + s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - + migration_get_timestamp(MIGRATION_DOWNTIME_START); } /** @@ -3134,7 +3150,7 @@ static void *bg_migration_thread(void *opaque) s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; trace_migration_thread_setup_complete(); - s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + migration_set_timestamp(MIGRATION_DOWNTIME_START); qemu_mutex_lock_iothread(); -- 2.39.3
