return ret;
}
+static bool ramblock_is_ignored(RAMBlock *block)
+{
+ return !qemu_ram_is_migratable(block) ||
+ (migrate_ignore_shared() && qemu_ram_is_shared(block));
+}
+
/* Should be holding either ram_list.mutex, or the RCU lock. */
+#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
+ INTERNAL_RAMBLOCK_FOREACH(block) \
+ if (ramblock_is_ignored(block)) {} else
+
#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
INTERNAL_RAMBLOCK_FOREACH(block) \
if (!qemu_ram_is_migratable(block)) {} else
#undef RAMBLOCK_FOREACH
+int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
+{
+ RAMBlock *block;
+ int ret = 0;
+
+ rcu_read_lock();
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+ ret = func(block, opaque);
+ if (ret) {
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
static void ramblock_recv_map_init(void)
{
RAMBlock *rb;
- RAMBLOCK_FOREACH_MIGRATABLE(rb) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
assert(!rb->receivedmap);
rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
}
unsigned long *bitmap = rb->bmap;
unsigned long next;
- if (!qemu_ram_is_migratable(rb)) {
+ if (ramblock_is_ignored(rb)) {
return size;
}
RAMBlock *block;
uint64_t summary = 0;
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
summary |= block->page_size;
}
qemu_mutex_lock(&rs->bitmap_mutex);
rcu_read_lock();
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
migration_bitmap_sync_range(rs, block, 0, block->used_length);
}
ram_counters.remaining = ram_bytes_remaining();
size_t pagesize_bits =
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
- if (!qemu_ram_is_migratable(pss->block)) {
+ if (ramblock_is_ignored(pss->block)) {
error_report("block %s should not be migrated !", pss->block->idstr);
return 0;
}
}
}
-uint64_t ram_bytes_total(void)
+static uint64_t ram_bytes_total_common(bool count_ignored)
{
RAMBlock *block;
uint64_t total = 0;
rcu_read_lock();
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
- total += block->used_length;
+ if (count_ignored) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ total += block->used_length;
+ }
+ } else {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+ total += block->used_length;
+ }
}
rcu_read_unlock();
return total;
}
+uint64_t ram_bytes_total(void)
+{
+ return ram_bytes_total_common(false);
+}
+
static void xbzrle_load_setup(void)
{
XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
*/
memory_global_dirty_log_stop();
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
g_free(block->bmap);
block->bmap = NULL;
g_free(block->unsentmap);
{
struct RAMBlock *block;
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
unsigned long *bitmap = block->bmap;
unsigned long range = block->used_length >> TARGET_PAGE_BITS;
unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
struct RAMBlock *block;
int ret;
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
PostcopyDiscardState *pds =
postcopy_discard_send_init(ms, block->idstr);
rs->last_sent_block = NULL;
rs->last_page = 0;
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
unsigned long *bitmap = block->bmap;
unsigned long *unsentmap = block->unsentmap;
/* Skip setting bitmap if there is no RAM */
if (ram_bytes_total()) {
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
pages = block->max_length >> TARGET_PAGE_BITS;
block->bmap = bitmap_new(pages);
bitmap_set(block->bmap, 0, pages);
* about dirty page logging as well.
*/
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
pages += bitmap_count_one(block->bmap,
block->used_length >> TARGET_PAGE_BITS);
}
rcu_read_lock();
- qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
+ qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
RAMBLOCK_FOREACH_MIGRATABLE(block) {
qemu_put_byte(f, strlen(block->idstr));
if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
qemu_put_be64(f, block->page_size);
}
+ if (migrate_ignore_shared()) {
+ qemu_put_be64(f, block->mr->addr);
+ qemu_put_byte(f, ramblock_is_ignored(block) ? 1 : 0);
+ }
}
rcu_read_unlock();
return NULL;
}
- if (!qemu_ram_is_migratable(block)) {
+ if (ramblock_is_ignored(block)) {
error_report("block %s should not be migrated !", id);
return NULL;
}
RAMBlock *block;
rcu_read_lock();
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
block->colo_cache = qemu_anon_ram_alloc(block->used_length,
NULL,
false);
if (ram_bytes_total()) {
RAMBlock *block;
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
block->bmap = bitmap_new(pages);
out_locked:
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
if (block->colo_cache) {
qemu_anon_ram_free(block->colo_cache, block->used_length);
block->colo_cache = NULL;
RAMBlock *block;
memory_global_dirty_log_stop();
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
g_free(block->bmap);
block->bmap = NULL;
}
rcu_read_lock();
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
if (block->colo_cache) {
qemu_anon_ram_free(block->colo_cache, block->used_length);
block->colo_cache = NULL;
{
RAMBlock *rb;
- RAMBLOCK_FOREACH_MIGRATABLE(rb) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
if (ramblock_is_pmem(rb)) {
pmem_persist(rb->host, rb->used_length);
}
xbzrle_load_cleanup();
compress_threads_load_cleanup();
- RAMBLOCK_FOREACH_MIGRATABLE(rb) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
g_free(rb->receivedmap);
rb->receivedmap = NULL;
}
memory_global_dirty_log_sync();
rcu_read_lock();
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
migration_bitmap_sync_range(ram_state, block, 0, block->used_length);
}
rcu_read_unlock();
ret = -EINVAL;
}
}
+ if (migrate_ignore_shared()) {
+ hwaddr addr = qemu_get_be64(f);
+ bool ignored = qemu_get_byte(f);
+ if (ignored != ramblock_is_ignored(block)) {
+ error_report("RAM block %s should %s be migrated",
+ id, ignored ? "" : "not");
+ ret = -EINVAL;
+ }
+ if (ramblock_is_ignored(block) &&
+ block->mr->addr != addr) {
+ error_report("Mismatched GPAs for block %s "
+ "%" PRId64 "!= %" PRId64,
+ id, (uint64_t)addr,
+ (uint64_t)block->mr->addr);
+ ret = -EINVAL;
+ }
+ }
ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
block->idstr);
} else {
static bool ram_has_postcopy(void *opaque)
{
RAMBlock *rb;
- RAMBLOCK_FOREACH_MIGRATABLE(rb) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
if (ramblock_is_pmem(rb)) {
info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
"is not supported now!", rb->idstr, rb->host);
trace_ram_dirty_bitmap_sync_start();
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
qemu_savevm_send_recv_bitmap(file, block->idstr);
trace_ram_dirty_bitmap_request(block->idstr);
ramblock_count++;