On Monday, 2020-08-24 at 17:14:37 +08, Chuan Zheng wrote:
> Implement get_sample_page_period() and set_sample_page_period() to
> sleep specific time between sample actions.
>
> Signed-off-by: Chuan Zheng <[email protected]>
> ---
> migration/dirtyrate.c | 24 ++++++++++++++++++++++++
> migration/dirtyrate.h | 2 ++
> 2 files changed, 26 insertions(+)
>
> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> index bd398b7..d1c0a78 100644
> --- a/migration/dirtyrate.c
> +++ b/migration/dirtyrate.c
> @@ -28,6 +28,30 @@
> static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
> static struct DirtyRateStat DirtyStat;
>
> +static int64_t set_sample_page_period(int64_t msec, int64_t initial_time)
> +{
> + int64_t current_time;
> +
> + current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
> + if ((current_time - initial_time) >= msec) {
> + msec = current_time - initial_time;
> + } else {
> + g_usleep((msec + initial_time - current_time) * 1000);
> + }
> +
> + return msec;
> +}
> +
> +static int64_t get_sample_page_period(int64_t sec)
> +{
> + if (sec <= MIN_FETCH_DIRTYRATE_TIME_SEC ||
Shouldn't the minimum value be allowed?
That is, this test should be "sec < MIN_FETCH_DIRTYRATE_TIME_SEC" and
MIN_FETCH_DIRTYRATE_TIME_SEC should be 1.
> + sec > MAX_FETCH_DIRTYRATE_TIME_SEC) {
> + sec = DEFAULT_FETCH_DIRTYRATE_TIME_SEC;
> + }
> +
> + return sec;
> +}
> +
> static int dirtyrate_set_state(int *state, int old_state, int new_state)
> {
> assert(new_state < DIRTY_RATE_STATUS__MAX);
> diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h
> index 41bc264..50a5636 100644
> --- a/migration/dirtyrate.h
> +++ b/migration/dirtyrate.h
> @@ -51,6 +51,8 @@
>
> /* Take 1s as default for calculation duration */
> #define DEFAULT_FETCH_DIRTYRATE_TIME_SEC 1
> +#define MIN_FETCH_DIRTYRATE_TIME_SEC 0
> +#define MAX_FETCH_DIRTYRATE_TIME_SEC 60
>
> struct DirtyRateConfig {
> uint64_t sample_pages_per_gigabytes; /* sample pages per GB */
> --
> 1.8.3.1
dme.
--
Facts don't do what I want them to.