ram_counters.dirty_sync_missed_zero_copy++;
}
+struct MigrationOps {
+ int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss);
+};
+typedef struct MigrationOps MigrationOps;
+
+MigrationOps *migration_ops;
+
CompressionStats compression_counters;
struct CompressParam {
}
/**
- * ram_save_target_page: save one target page
+ * ram_save_target_page_legacy: save one target page
*
* Returns the number of pages written
*
* @rs: current RAM state
* @pss: data about the page we want to send
*/
-static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
+static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
{
RAMBlock *block = pss->block;
ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
if (page_dirty) {
/* Be strict to return code; it must be 1, or what else? */
- if (ram_save_target_page(rs, pss) != 1) {
+ if (migration_ops->ram_save_target_page(rs, pss) != 1) {
error_report_once("%s: ram_save_target_page failed", __func__);
ret = -1;
goto out;
if (preempt_active) {
qemu_mutex_unlock(&rs->bitmap_mutex);
}
- tmppages = ram_save_target_page(rs, pss);
+ tmppages = migration_ops->ram_save_target_page(rs, pss);
if (tmppages >= 0) {
pages += tmppages;
/*
xbzrle_cleanup();
compress_threads_save_cleanup();
ram_state_cleanup(rsp);
+ g_free(migration_ops);
+ migration_ops = NULL;
}
static void ram_state_reset(RAMState *rs)
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
+ migration_ops = g_malloc0(sizeof(MigrationOps));
+ migration_ops->ram_save_target_page = ram_save_target_page_legacy;
ret = multifd_send_sync_main(f);
if (ret < 0) {
return ret;