Once that res_compatible is removed, they don't make sense anymore.
We remove the _only preffix. And to make things clearer we rename
them to must_precopy and can_postcopy.
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Signed-off-by: Juan Quintela <quintela@redhat.com>
return 0;
}
-static void cmma_state_pending(void *opaque,
- uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only)
+static void cmma_state_pending(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy)
{
S390StAttribState *sas = S390_STATTRIB(opaque);
S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas);
long long res = sac->get_dirtycount(sas);
if (res >= 0) {
- *res_precopy_only += res;
+ *must_precopy += res;
}
}
trace_vfio_save_cleanup(vbasedev->name);
}
-static void vfio_state_pending(void *opaque,
- uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only)
+static void vfio_state_pending(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy)
{
VFIODevice *vbasedev = opaque;
VFIOMigration *migration = vbasedev->migration;
return;
}
- *res_precopy_only += migration->pending_bytes;
+ *must_precopy += migration->pending_bytes;
- trace_vfio_state_pending(vbasedev->name, *res_precopy_only,
- *res_postcopy_only);
+ trace_vfio_state_pending(vbasedev->name, *must_precopy, *can_postcopy);
}
static int vfio_save_iterate(QEMUFile *f, void *opaque)
/* This runs outside the iothread lock! */
int (*save_setup)(QEMUFile *f, void *opaque);
/* Note for save_live_pending:
- * - res_precopy_only is for data which must be migrated in precopy phase
- * or in stopped state, in other words - before target vm start
- * - res_postcopy_only is for data which must be migrated in postcopy phase
- * or in stopped state, in other words - after source vm stop
+ * must_precopy:
+ * - must be migrated in precopy or in stopped state
+ * - i.e. must be migrated before target start
*
- * Sum of res_postcopy_only and res_postcopy_only is the whole
- * amount of pending data.
+ * can_postcopy:
+ * - can migrate in postcopy or in stopped state
+ * - i.e. can migrate after target start
+ * - some can also be migrated during precopy (RAM)
+ * - some must be migrated after source stops (block-dirty-bitmap)
+ *
+ * Sum of can_postcopy and must_postcopy is the whole amount of
+ * pending data.
*/
/* This estimates the remaining data to transfer */
- void (*state_pending_estimate)(void *opaque,
- uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only);
+ void (*state_pending_estimate)(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy);
/* This calculate the exact remaining data to transfer */
- void (*state_pending_exact)(void *opaque,
- uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only);
+ void (*state_pending_exact)(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy);
LoadStateHandler *load_state;
int (*load_setup)(QEMUFile *f, void *opaque);
int (*load_cleanup)(void *opaque);
}
static void dirty_bitmap_state_pending(void *opaque,
- uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only)
+ uint64_t *must_precopy,
+ uint64_t *can_postcopy)
{
DBMSaveState *s = &((DBMState *)opaque)->save;
SaveBitmapState *dbms;
trace_dirty_bitmap_state_pending(pending);
- *res_postcopy_only += pending;
+ *can_postcopy += pending;
}
/* First occurrence of this bitmap. It should be created if doesn't exist */
return 0;
}
-static void block_state_pending(void *opaque,
- uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only)
+static void block_state_pending(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy)
{
/* Estimate pending number of bytes to send */
uint64_t pending;
trace_migration_block_state_pending(pending);
/* We don't do postcopy */
- *res_precopy_only += pending;
+ *must_precopy += pending;
}
static int block_load(QEMUFile *f, void *opaque, int version_id)
*/
static MigIterateState migration_iteration_run(MigrationState *s)
{
- uint64_t pend_pre, pend_post;
+ uint64_t must_precopy, can_postcopy;
bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
- qemu_savevm_state_pending_estimate(&pend_pre, &pend_post);
- uint64_t pending_size = pend_pre + pend_post;
+ qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
+ uint64_t pending_size = must_precopy + can_postcopy;
- trace_migrate_pending_estimate(pending_size, pend_pre, pend_post);
+ trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
- if (pend_pre <= s->threshold_size) {
- qemu_savevm_state_pending_exact(&pend_pre, &pend_post);
- pending_size = pend_pre + pend_post;
- trace_migrate_pending_exact(pending_size, pend_pre, pend_post);
+ if (must_precopy <= s->threshold_size) {
+ qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
+ pending_size = must_precopy + can_postcopy;
+ trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
}
if (!pending_size || pending_size < s->threshold_size) {
}
/* Still a significant amount to transfer */
- if (!in_postcopy && pend_pre <= s->threshold_size &&
+ if (!in_postcopy && must_precopy <= s->threshold_size &&
qatomic_read(&s->start_postcopy)) {
if (postcopy_start(s)) {
error_report("%s: postcopy failed to start", __func__);
return 0;
}
-static void ram_state_pending_estimate(void *opaque,
- uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only)
+static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy)
{
RAMState **temp = opaque;
RAMState *rs = *temp;
if (migrate_postcopy_ram()) {
/* We can do postcopy, and all the data is postcopiable */
- *res_postcopy_only += remaining_size;
+ *can_postcopy += remaining_size;
} else {
- *res_precopy_only += remaining_size;
+ *must_precopy += remaining_size;
}
}
-static void ram_state_pending_exact(void *opaque,
- uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only)
+static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy)
{
RAMState **temp = opaque;
RAMState *rs = *temp;
if (migrate_postcopy_ram()) {
/* We can do postcopy, and all the data is postcopiable */
- *res_postcopy_only += remaining_size;
+ *can_postcopy += remaining_size;
} else {
- *res_precopy_only += remaining_size;
+ *must_precopy += remaining_size;
}
}
* the result is split into the amount for units that can and
* for units that can't do postcopy.
*/
-void qemu_savevm_state_pending_estimate(uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only)
+void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
+ uint64_t *can_postcopy)
{
SaveStateEntry *se;
- *res_precopy_only = 0;
- *res_postcopy_only = 0;
+ *must_precopy = 0;
+ *can_postcopy = 0;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (!se->ops || !se->ops->state_pending_estimate) {
continue;
}
}
- se->ops->state_pending_estimate(se->opaque,
- res_precopy_only,
- res_postcopy_only);
+ se->ops->state_pending_estimate(se->opaque, must_precopy, can_postcopy);
}
}
-void qemu_savevm_state_pending_exact(uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only)
+void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
+ uint64_t *can_postcopy)
{
SaveStateEntry *se;
- *res_precopy_only = 0;
- *res_postcopy_only = 0;
+ *must_precopy = 0;
+ *can_postcopy = 0;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (!se->ops || !se->ops->state_pending_exact) {
continue;
}
}
- se->ops->state_pending_exact(se->opaque,
- res_precopy_only,
- res_postcopy_only);
+ se->ops->state_pending_exact(se->opaque, must_precopy, can_postcopy);
}
}
void qemu_savevm_state_complete_postcopy(QEMUFile *f);
int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
bool inactivate_disks);
-void qemu_savevm_state_pending_exact(uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only);
-void qemu_savevm_state_pending_estimate(uint64_t *res_precopy_only,
- uint64_t *res_postcopy_only);
+void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
+ uint64_t *can_postcopy);
+void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
+ uint64_t *can_postcopy);
void qemu_savevm_send_ping(QEMUFile *f, uint32_t value);
void qemu_savevm_send_open_return_path(QEMUFile *f);
int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len);