* Peter Xu ([email protected]) wrote:
> To prepare for thread-safety on page accountings, at least below counters
> need to be accessed only atomically, they are:
> 
>         ram_counters.transferred
>         ram_counters.duplicate
>         ram_counters.normal
>         ram_counters.postcopy_bytes
> 
> There are a lot of other counters but they won't be accessed outside
> migration thread, then they're still safe to be accessed without atomic
> ops.
> 
> Signed-off-by: Peter Xu <[email protected]>

I think this is OK; I'm not sure whether the memset 0's of ram_counters
technically need changing.
I'd love to put a comment somewhere saying these fields need to be
atomically read, but their qapi defined so I don't think we can.

Finally, we probably need to check these are happy on 32 bit builds,
sometimes it's a bit funny with atomic adds.


Reviewed-by: Dr. David Alan Gilbert <[email protected]>

> ---
>  migration/migration.c | 10 +++++-----
>  migration/multifd.c   |  2 +-
>  migration/ram.c       | 29 +++++++++++++++--------------
>  3 files changed, 21 insertions(+), 20 deletions(-)
> 
> diff --git a/migration/migration.c b/migration/migration.c
> index 07c74a79a2..0eacc0c99b 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -1048,13 +1048,13 @@ static void populate_ram_info(MigrationInfo *info, 
> MigrationState *s)
>  
>      info->has_ram = true;
>      info->ram = g_malloc0(sizeof(*info->ram));
> -    info->ram->transferred = ram_counters.transferred;
> +    info->ram->transferred = qatomic_read(&ram_counters.transferred);
>      info->ram->total = ram_bytes_total();
> -    info->ram->duplicate = ram_counters.duplicate;
> +    info->ram->duplicate = qatomic_read(&ram_counters.duplicate);
>      /* legacy value.  It is not used anymore */
>      info->ram->skipped = 0;
> -    info->ram->normal = ram_counters.normal;
> -    info->ram->normal_bytes = ram_counters.normal * page_size;
> +    info->ram->normal = qatomic_read(&ram_counters.normal);
> +    info->ram->normal_bytes = info->ram->normal * page_size;
>      info->ram->mbps = s->mbps;
>      info->ram->dirty_sync_count = ram_counters.dirty_sync_count;
>      info->ram->dirty_sync_missed_zero_copy =
> @@ -1065,7 +1065,7 @@ static void populate_ram_info(MigrationInfo *info, 
> MigrationState *s)
>      info->ram->pages_per_second = s->pages_per_second;
>      info->ram->precopy_bytes = ram_counters.precopy_bytes;
>      info->ram->downtime_bytes = ram_counters.downtime_bytes;
> -    info->ram->postcopy_bytes = ram_counters.postcopy_bytes;
> +    info->ram->postcopy_bytes = qatomic_read(&ram_counters.postcopy_bytes);
>  
>      if (migrate_use_xbzrle()) {
>          info->has_xbzrle_cache = true;
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 586ddc9d65..460326acd4 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -437,7 +437,7 @@ static int multifd_send_pages(QEMUFile *f)
>                  + p->packet_len;
>      qemu_file_acct_rate_limit(f, transferred);
>      ram_counters.multifd_bytes += transferred;
> -    ram_counters.transferred += transferred;
> +    qatomic_add(&ram_counters.transferred, transferred);
>      qemu_mutex_unlock(&p->mutex);
>      qemu_sem_post(&p->sem);
>  
> diff --git a/migration/ram.c b/migration/ram.c
> index 6e7de6087a..5bd3d76bf0 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -432,11 +432,11 @@ static void ram_transferred_add(uint64_t bytes)
>      if (runstate_is_running()) {
>          ram_counters.precopy_bytes += bytes;
>      } else if (migration_in_postcopy()) {
> -        ram_counters.postcopy_bytes += bytes;
> +        qatomic_add(&ram_counters.postcopy_bytes, bytes);
>      } else {
>          ram_counters.downtime_bytes += bytes;
>      }
> -    ram_counters.transferred += bytes;
> +    qatomic_add(&ram_counters.transferred, bytes);
>  }
>  
>  void dirty_sync_missed_zero_copy(void)
> @@ -725,7 +725,7 @@ void mig_throttle_counter_reset(void)
>  
>      rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
>      rs->num_dirty_pages_period = 0;
> -    rs->bytes_xfer_prev = ram_counters.transferred;
> +    rs->bytes_xfer_prev = qatomic_read(&ram_counters.transferred);
>  }
>  
>  /**
> @@ -1085,8 +1085,9 @@ uint64_t ram_pagesize_summary(void)
>  
>  uint64_t ram_get_total_transferred_pages(void)
>  {
> -    return  ram_counters.normal + ram_counters.duplicate +
> -                compression_counters.pages + xbzrle_counters.pages;
> +    return  qatomic_read(&ram_counters.normal) +
> +        qatomic_read(&ram_counters.duplicate) +
> +        compression_counters.pages + xbzrle_counters.pages;
>  }
>  
>  static void migration_update_rates(RAMState *rs, int64_t end_time)
> @@ -1145,8 +1146,8 @@ static void migration_trigger_throttle(RAMState *rs)
>  {
>      MigrationState *s = migrate_get_current();
>      uint64_t threshold = s->parameters.throttle_trigger_threshold;
> -
> -    uint64_t bytes_xfer_period = ram_counters.transferred - 
> rs->bytes_xfer_prev;
> +    uint64_t bytes_xfer_period =
> +        qatomic_read(&ram_counters.transferred) - rs->bytes_xfer_prev;
>      uint64_t bytes_dirty_period = rs->num_dirty_pages_period * 
> TARGET_PAGE_SIZE;
>      uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
>  
> @@ -1285,7 +1286,7 @@ static int save_zero_page(RAMState *rs, RAMBlock 
> *block, ram_addr_t offset)
>      int len = save_zero_page_to_file(rs, rs->f, block, offset);
>  
>      if (len) {
> -        ram_counters.duplicate++;
> +        qatomic_inc(&ram_counters.duplicate);
>          ram_transferred_add(len);
>          return 1;
>      }
> @@ -1322,9 +1323,9 @@ static bool control_save_page(RAMState *rs, RAMBlock 
> *block, ram_addr_t offset,
>      }
>  
>      if (bytes_xmit > 0) {
> -        ram_counters.normal++;
> +        qatomic_inc(&ram_counters.normal);
>      } else if (bytes_xmit == 0) {
> -        ram_counters.duplicate++;
> +        qatomic_inc(&ram_counters.duplicate);
>      }
>  
>      return true;
> @@ -1354,7 +1355,7 @@ static int save_normal_page(RAMState *rs, RAMBlock 
> *block, ram_addr_t offset,
>          qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
>      }
>      ram_transferred_add(TARGET_PAGE_SIZE);
> -    ram_counters.normal++;
> +    qatomic_inc(&ram_counters.normal);
>      return 1;
>  }
>  
> @@ -1448,7 +1449,7 @@ update_compress_thread_counts(const CompressParam 
> *param, int bytes_xmit)
>      ram_transferred_add(bytes_xmit);
>  
>      if (param->zero_page) {
> -        ram_counters.duplicate++;
> +        qatomic_inc(&ram_counters.duplicate);
>          return;
>      }
>  
> @@ -2620,9 +2621,9 @@ void acct_update_position(QEMUFile *f, size_t size, 
> bool zero)
>      uint64_t pages = size / TARGET_PAGE_SIZE;
>  
>      if (zero) {
> -        ram_counters.duplicate += pages;
> +        qatomic_add(&ram_counters.duplicate, pages);
>      } else {
> -        ram_counters.normal += pages;
> +        qatomic_add(&ram_counters.normal, pages);
>          ram_transferred_add(size);
>          qemu_file_credit_transfer(f, size);
>      }
> -- 
> 2.32.0
> 
-- 
Dr. David Alan Gilbert / [email protected] / Manchester, UK


Reply via email to