* (Set during postcopy)
*/
#define RAM_UF_ZEROPAGE (1 << 3)
+
+/* RAM can be migrated */
+#define RAM_MIGRATABLE (1 << 4)
#endif
#ifdef TARGET_PAGE_BITS_VARY
rb->flags |= RAM_UF_ZEROPAGE;
}
+bool qemu_ram_is_migratable(RAMBlock *rb)
+{
+ return rb->flags & RAM_MIGRATABLE;
+}
+
+void qemu_ram_set_migratable(RAMBlock *rb)
+{
+ rb->flags |= RAM_MIGRATABLE;
+}
+
+void qemu_ram_unset_migratable(RAMBlock *rb)
+{
+ rb->flags &= ~RAM_MIGRATABLE;
+}
+
/* Called with iothread lock held. */
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
{
return ret;
}
+int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque)
+{
+ RAMBlock *block;
+ int ret = 0;
+
+ rcu_read_lock();
+ RAMBLOCK_FOREACH(block) {
+ if (!qemu_ram_is_migratable(block)) {
+ continue;
+ }
+ ret = func(block->idstr, block->host, block->offset,
+ block->used_length, opaque);
+ if (ret) {
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
/*
* Unmap pages of memory from start to start+length such that
* they a) read as 0, b) Trigger whatever fault mechanism
bool qemu_ram_is_shared(RAMBlock *rb);
bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
void qemu_ram_set_uf_zeroable(RAMBlock *rb);
+bool qemu_ram_is_migratable(RAMBlock *rb);
+void qemu_ram_set_migratable(RAMBlock *rb);
+void qemu_ram_unset_migratable(RAMBlock *rb);
size_t qemu_ram_pagesize(RAMBlock *block);
size_t qemu_ram_pagesize_largest(void);
ram_addr_t offset, ram_addr_t length, void *opaque);
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
+int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque);
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
#endif
}
/* We don't support postcopy with shared RAM yet */
- if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) {
+ if (qemu_ram_foreach_migratable_block(test_ramblock_postcopiable, NULL)) {
goto out;
}
*/
int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
{
- if (qemu_ram_foreach_block(init_range, NULL)) {
+ if (qemu_ram_foreach_migratable_block(init_range, NULL)) {
return -1;
}
return -1;
}
- if (qemu_ram_foreach_block(cleanup_range, mis)) {
+ if (qemu_ram_foreach_migratable_block(cleanup_range, mis)) {
return -1;
}
/* Let the fault thread quit */
*/
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
{
- if (qemu_ram_foreach_block(nhp_range, mis)) {
+ if (qemu_ram_foreach_migratable_block(nhp_range, mis)) {
return -1;
}
/*
* Mark the given area of RAM as requiring notification to unwritten areas
- * Used as a callback on qemu_ram_foreach_block.
+ * Used as a callback on qemu_ram_foreach_migratable_block.
* host_addr: Base of area to mark
* offset: Offset in the whole ram arena
* length: Length of the section
mis->have_fault_thread = true;
/* Mark so that we get notified of accesses to unwritten areas */
- if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
+ if (qemu_ram_foreach_migratable_block(ram_block_enable_notify, mis)) {
return -1;
}
return ret;
}
+/* Should be holding either ram_list.mutex, or the RCU lock. */
+#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
+ RAMBLOCK_FOREACH(block) \
+ if (!qemu_ram_is_migratable(block)) {} else
+
static void ramblock_recv_map_init(void)
{
RAMBlock *rb;
- RAMBLOCK_FOREACH(rb) {
+ RAMBLOCK_FOREACH_MIGRATABLE(rb) {
assert(!rb->receivedmap);
rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
}
unsigned long *bitmap = rb->bmap;
unsigned long next;
+ if (!qemu_ram_is_migratable(rb)) {
+ return size;
+ }
+
if (rs->ram_bulk_stage && start > 0) {
next = start + 1;
} else {
RAMBlock *block;
uint64_t summary = 0;
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
summary |= block->page_size;
}
qemu_mutex_lock(&rs->bitmap_mutex);
rcu_read_lock();
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
migration_bitmap_sync_range(rs, block, 0, block->used_length);
}
rcu_read_unlock();
size_t pagesize_bits =
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
+ if (!qemu_ram_is_migratable(pss->block)) {
+ error_report("block %s should not be migrated !", pss->block->idstr);
+ return 0;
+ }
+
do {
/* Check the pages is dirty and if it is send it */
if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
uint64_t total = 0;
rcu_read_lock();
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
total += block->used_length;
}
rcu_read_unlock();
*/
memory_global_dirty_log_stop();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
g_free(block->bmap);
block->bmap = NULL;
g_free(block->unsentmap);
{
struct RAMBlock *block;
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
unsigned long *bitmap = block->bmap;
unsigned long range = block->used_length >> TARGET_PAGE_BITS;
unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
struct RAMBlock *block;
int ret;
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
PostcopyDiscardState *pds =
postcopy_discard_send_init(ms, block->idstr);
rs->last_sent_block = NULL;
rs->last_page = 0;
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
unsigned long *bitmap = block->bmap;
unsigned long *unsentmap = block->unsentmap;
/* Skip setting bitmap if there is no RAM */
if (ram_bytes_total()) {
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
pages = block->max_length >> TARGET_PAGE_BITS;
block->bmap = bitmap_new(pages);
bitmap_set(block->bmap, 0, pages);
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
- RAMBLOCK_FOREACH(block) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
qemu_put_be64(f, block->used_length);
return NULL;
}
+ if (!qemu_ram_is_migratable(block)) {
+ error_report("block %s should not be migrated !", id);
+ return NULL;
+ }
+
return block;
}
xbzrle_load_cleanup();
compress_threads_load_cleanup();
- RAMBLOCK_FOREACH(rb) {
+ RAMBLOCK_FOREACH_MIGRATABLE(rb) {
g_free(rb->receivedmap);
rb->receivedmap = NULL;
}
length = qemu_get_be64(f);
block = qemu_ram_block_by_name(id);
- if (block) {
+ if (block && !qemu_ram_is_migratable(block)) {
+ error_report("block %s should not be migrated !", id);
+ ret = -EINVAL;
+ } else if (block) {
if (length != block->used_length) {
Error *local_err = NULL;