abort();
}
+ update_compress_thread_counts(param, len);
+
return len;
}
static void flush_compressed_data(RAMState *rs)
{
- int idx, len, thread_count;
+ int idx, thread_count;
if (!save_page_use_compression(rs)) {
return;
qemu_mutex_lock(&comp_param[idx].mutex);
if (!comp_param[idx].quit) {
CompressParam *param = &comp_param[idx];
- len = send_queued_data(param);
+ send_queued_data(param);
compress_reset_result(param);
-
- /*
- * it's safe to fetch zero_page without holding comp_done_lock
- * as there is no further request submitted to the thread,
- * i.e, the thread should be waiting for a request at this point.
- */
- update_compress_thread_counts(param, len);
}
qemu_mutex_unlock(&comp_param[idx].mutex);
}
static int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset)
{
- int idx, thread_count, bytes_xmit = -1, pages = -1;
+ int idx, thread_count, pages = -1;
bool wait = migrate_compress_wait_thread();
thread_count = migrate_compress_threads();
CompressParam *param = &comp_param[idx];
qemu_mutex_lock(¶m->mutex);
param->done = false;
- bytes_xmit = send_queued_data(param);
+ send_queued_data(param);
compress_reset_result(param);
set_compress_params(param, block, offset);
- update_compress_thread_counts(param, bytes_xmit);
qemu_cond_signal(¶m->cond);
qemu_mutex_unlock(¶m->mutex);
pages = 1;