double xbzrle_cache_miss_rate;
/* xbzrle number of overflows */
uint64_t xbzrle_overflows;
+ /* number of dirty bits in the bitmap */
+ uint64_t migration_dirty_pages;
};
typedef struct RAMState RAMState;
return ram_state.xbzrle_overflows;
}
+static ram_addr_t ram_save_remaining(void)
+{
+ return ram_state.migration_dirty_pages;
+}
+
static QemuMutex migration_bitmap_mutex;
-static uint64_t migration_dirty_pages;
/* used by the search for pages to send */
struct PageSearchStatus {
return (next - base) << TARGET_PAGE_BITS;
}
-static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
+static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr)
{
bool ret;
int nr = addr >> TARGET_PAGE_BITS;
ret = test_and_clear_bit(nr, bitmap);
if (ret) {
- migration_dirty_pages--;
+ rs->migration_dirty_pages--;
}
return ret;
}
{
unsigned long *bitmap;
bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
- migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap,
- start, length, &rs->num_dirty_pages_period);
+ rs->migration_dirty_pages +=
+ cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length,
+ &rs->num_dirty_pages_period);
}
static void migration_bitmap_sync_init(RAMState *rs)
int res = 0;
/* Check the pages is dirty and if it is send it */
- if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
+ if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) {
unsigned long *unsentmap;
if (compression_switch && migrate_use_compression()) {
res = ram_save_compressed_page(rs, ms, f, pss,
}
}
-static ram_addr_t ram_save_remaining(void)
-{
- return migration_dirty_pages;
-}
-
uint64_t ram_bytes_remaining(void)
{
return ram_save_remaining() * TARGET_PAGE_SIZE;
void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
{
+ RAMState *rs = &ram_state;
/* called in qemu main thread, so there is
* no writing race against this migration_bitmap
*/
atomic_rcu_set(&migration_bitmap_rcu, bitmap);
qemu_mutex_unlock(&migration_bitmap_mutex);
- migration_dirty_pages += new - old;
+ rs->migration_dirty_pages += new - old;
call_rcu(old_bitmap, migration_bitmap_free, rcu);
}
}
RAMBlock *block,
PostcopyDiscardState *pds)
{
+ RAMState *rs = &ram_state;
unsigned long *bitmap;
unsigned long *unsentmap;
unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
* Remark them as dirty, updating the count for any pages
* that weren't previously dirty.
*/
- migration_dirty_pages += !test_and_set_bit(page, bitmap);
+ rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
}
}
* Count the total number of pages used by ram blocks not including any
* gaps due to alignment or unplugs.
*/
- migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
+ rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
memory_global_dirty_log_start();
migration_bitmap_sync(rs);