static int ram_block_notify_add_single(RAMBlock *rb, void *opaque)
{
const ram_addr_t max_size = qemu_ram_get_max_length(rb);
+ const ram_addr_t size = qemu_ram_get_used_length(rb);
void *host = qemu_ram_get_host_addr(rb);
RAMBlockNotifier *notifier = opaque;
if (host) {
- notifier->ram_block_added(notifier, host, max_size);
+ notifier->ram_block_added(notifier, host, size, max_size);
}
return 0;
}
QLIST_REMOVE(n, next);
}
-void ram_block_notify_add(void *host, size_t size)
+void ram_block_notify_add(void *host, size_t size, size_t max_size)
{
RAMBlockNotifier *notifier;
QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
- notifier->ram_block_added(notifier, host, size);
+ notifier->ram_block_added(notifier, host, size, max_size);
}
}
-void ram_block_notify_remove(void *host, size_t size)
+void ram_block_notify_remove(void *host, size_t size, size_t max_size)
{
RAMBlockNotifier *notifier;
QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
- notifier->ram_block_removed(notifier, host, size);
+ notifier->ram_block_removed(notifier, host, size, max_size);
+ }
+}
+
+void ram_block_notify_resize(void *host, size_t old_size, size_t new_size)
+{
+ RAMBlockNotifier *notifier;
+
+ QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
+ if (notifier->ram_block_resized) {
+ notifier->ram_block_resized(notifier, host, old_size, new_size);
+ }
}
}
if (entry->vaddr_base != NULL) {
if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
- ram_block_notify_remove(entry->vaddr_base, entry->size);
+ ram_block_notify_remove(entry->vaddr_base, entry->size,
+ entry->size);
}
/*
}
if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
- ram_block_notify_add(vaddr_base, size);
+ ram_block_notify_add(vaddr_base, size, size);
}
entry->vaddr_base = vaddr_base;
}
pentry->next = entry->next;
- ram_block_notify_remove(entry->vaddr_base, entry->size);
+ ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size);
if (munmap(entry->vaddr_base, entry->size) != 0) {
perror("unmap fails");
exit(-1);
void qemu_mutex_unlock_ramlist(void);
struct RAMBlockNotifier {
- void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size);
- void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size);
+ void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size);
+ void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size);
+ void (*ram_block_resized)(RAMBlockNotifier *n, void *host, size_t old_size,
+ size_t new_size);
QLIST_ENTRY(RAMBlockNotifier) next;
};
void ram_block_notifier_add(RAMBlockNotifier *n);
void ram_block_notifier_remove(RAMBlockNotifier *n);
-void ram_block_notify_add(void *host, size_t size);
-void ram_block_notify_remove(void *host, size_t size);
+void ram_block_notify_add(void *host, size_t size, size_t max_size);
+void ram_block_notify_remove(void *host, size_t size, size_t max_size);
+void ram_block_notify_resize(void *host, size_t old_size, size_t new_size);
void ram_block_dump(Monitor *mon);
*/
int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
{
+ const ram_addr_t oldsize = block->used_length;
const ram_addr_t unaligned_size = newsize;
assert(block);
return -EINVAL;
}
+ /* Notify before modifying the ram block and touching the bitmaps. */
+ if (block->host) {
+ ram_block_notify_resize(block->host, oldsize, newsize);
+ }
+
cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
block->used_length = newsize;
cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
qemu_madvise(new_block->host, new_block->max_length,
QEMU_MADV_DONTFORK);
}
- ram_block_notify_add(new_block->host, new_block->max_length);
+ ram_block_notify_add(new_block->host, new_block->used_length,
+ new_block->max_length);
}
}
}
if (block->host) {
- ram_block_notify_remove(block->host, block->max_length);
+ ram_block_notify_remove(block->host, block->used_length,
+ block->max_length);
}
qemu_mutex_lock_ramlist();
.priority = 10,
};
-static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
+static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size)
{
/*
* We must register each RAM block with the HAXM kernel module, or
* host physical pages for the RAM block as part of this registration
* process, hence the name hax_populate_ram().
*/
- if (hax_populate_ram((uint64_t)(uintptr_t)host, size) < 0) {
+ if (hax_populate_ram((uint64_t)(uintptr_t)host, max_size) < 0) {
fprintf(stderr, "HAX failed to populate RAM\n");
abort();
}
}
static void
-sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
+sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size)
{
int r;
struct kvm_enc_region range;
}
range.addr = (__u64)(unsigned long)host;
- range.size = size;
+ range.size = max_size;
- trace_kvm_memcrypt_register_region(host, size);
+ trace_kvm_memcrypt_register_region(host, max_size);
r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
if (r) {
error_report("%s: failed to register region (%p+%#zx) error '%s'",
- __func__, host, size, strerror(errno));
+ __func__, host, max_size, strerror(errno));
exit(1);
}
}
static void
-sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size)
+sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size)
{
int r;
struct kvm_enc_region range;
}
range.addr = (__u64)(unsigned long)host;
- range.size = size;
+ range.size = max_size;
- trace_kvm_memcrypt_unregister_region(host, size);
+ trace_kvm_memcrypt_unregister_region(host, max_size);
r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range);
if (r) {
error_report("%s: failed to unregister region (%p+%#zx)",
- __func__, host, size);
+ __func__, host, max_size);
}
}
return ret;
}
-static void qemu_vfio_ram_block_added(RAMBlockNotifier *n,
- void *host, size_t size)
+static void qemu_vfio_ram_block_added(RAMBlockNotifier *n, void *host,
+ size_t size, size_t max_size)
{
QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
int ret;
- trace_qemu_vfio_ram_block_added(s, host, size);
- ret = qemu_vfio_dma_map(s, host, size, false, NULL);
+ trace_qemu_vfio_ram_block_added(s, host, max_size);
+ ret = qemu_vfio_dma_map(s, host, max_size, false, NULL);
if (ret) {
- error_report("qemu_vfio_dma_map(%p, %zu) failed: %s", host, size,
+ error_report("qemu_vfio_dma_map(%p, %zu) failed: %s", host, max_size,
strerror(-ret));
}
}
-static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
- void *host, size_t size)
+static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n, void *host,
+ size_t size, size_t max_size)
{
QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
if (host) {
- trace_qemu_vfio_ram_block_removed(s, host, size);
+ trace_qemu_vfio_ram_block_removed(s, host, max_size);
qemu_vfio_dma_unmap(s, host);
}
}