int dirty_rate_high_cnt;
/* How many times we have synchronized the bitmap */
uint64_t bitmap_sync_count;
+ /* these variables are used for bitmap sync */
+ /* last time we did a full bitmap_sync */
+ int64_t time_last_bitmap_sync;
};
typedef struct RAMState RAMState;
}
/* Fix me: there are too many global variables used in migration process. */
-static int64_t start_time;
static int64_t bytes_xfer_prev;
static uint64_t xbzrle_cache_miss_prev;
static uint64_t iterations_prev;
-static void migration_bitmap_sync_init(void)
+static void migration_bitmap_sync_init(RAMState *rs)
{
- start_time = 0;
+ rs->time_last_bitmap_sync = 0;
bytes_xfer_prev = 0;
num_dirty_pages_period = 0;
xbzrle_cache_miss_prev = 0;
bytes_xfer_prev = ram_bytes_transferred();
}
- if (!start_time) {
- start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+ if (!rs->time_last_bitmap_sync) {
+ rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
}
trace_migration_bitmap_sync_start();
end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
/* more than 1 second = 1000 millisecons */
- if (end_time > start_time + 1000) {
+ if (end_time > rs->time_last_bitmap_sync + 1000) {
if (migrate_auto_converge()) {
/* The following detection logic can be refined later. For now:
Check to see if the dirtied bytes is 50% more than the approx.
xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
}
s->dirty_pages_rate = num_dirty_pages_period * 1000
- / (end_time - start_time);
+ / (end_time - rs->time_last_bitmap_sync);
s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
- start_time = end_time;
+ rs->time_last_bitmap_sync = end_time;
num_dirty_pages_period = 0;
}
s->dirty_sync_count = rs->bitmap_sync_count;
rs->dirty_rate_high_cnt = 0;
rs->bitmap_sync_count = 0;
- migration_bitmap_sync_init();
+ migration_bitmap_sync_init(rs);
qemu_mutex_init(&migration_bitmap_mutex);
if (migrate_use_xbzrle()) {