return len;
}
-static void flush_compressed_data(RAMState *rs)
+static void flush_compressed_data(int (send_queued_data(CompressParam *)))
{
int idx, thread_count;
- if (!save_page_use_compression(rs)) {
- return;
- }
thread_count = migrate_compress_threads();
qemu_mutex_lock(&comp_done_lock);
}
}
+static void ram_flush_compressed_data(RAMState *rs)
+{
+ if (!save_page_use_compression(rs)) {
+ return;
+ }
+
+ flush_compressed_data(send_queued_data);
+}
+
static inline void set_compress_params(CompressParam *param, RAMBlock *block,
ram_addr_t offset)
{
param->trigger = true;
}
-static int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset)
+static int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset,
+ int (send_queued_data(CompressParam *)))
{
int idx, thread_count, pages = -1;
bool wait = migrate_compress_wait_thread();
* Also If xbzrle is on, stop using the data compression at this
* point. In theory, xbzrle can do better than compression.
*/
- flush_compressed_data(rs);
+ ram_flush_compressed_data(rs);
/* Hit the end of the list */
pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
* much CPU resource.
*/
if (block != pss->last_sent_block) {
- flush_compressed_data(rs);
+ ram_flush_compressed_data(rs);
return false;
}
- if (compress_page_with_multi_thread(block, offset) > 0) {
+ if (compress_page_with_multi_thread(block, offset, send_queued_data) > 0) {
return true;
}
* page is sent in one chunk.
*/
if (migrate_postcopy_ram()) {
- flush_compressed_data(rs);
+ ram_flush_compressed_data(rs);
}
/*
}
qemu_mutex_unlock(&rs->bitmap_mutex);
- flush_compressed_data(rs);
+ ram_flush_compressed_data(rs);
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
}