From 23f1174aac4181f86bb7e13ca8bc2d4a0bdf1e5c Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 20 Aug 2019 22:13:25 +0800 Subject: [PATCH] memory: Split zones when do coalesced_io_del() It is a workaround of current KVM's KVM_UNREGISTER_COALESCED_MMIO interface. The kernel interface only allows to unregister an mmio device with exactly the zone size when registered, or any smaller zone that is included in the device mmio zone. It does not support the userspace to specify a very large zone to remove all the small mmio devices within the zone covered. Logically speaking it would be nicer to fix this from KVM side, though in all cases we still need to coop with old kernels so let's do this. Fixes: 3ac7d43a6fbb5d4a3 Signed-off-by: Peter Xu Message-Id: <20190820141328.10009-2-peterx@redhat.com> Signed-off-by: Paolo Bonzini --- memory.c | 49 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/memory.c b/memory.c index 9a1193a144..7124274f49 100644 --- a/memory.c +++ b/memory.c @@ -855,8 +855,39 @@ static void address_space_update_ioeventfds(AddressSpace *as) flatview_unref(view); } +/* + * Notify the memory listeners about the coalesced IO change events of + * range `cmr'. Only the part that has intersection of the specified + * FlatRange will be sent. + */ +static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as, + CoalescedMemoryRange *cmr, bool add) +{ + AddrRange tmp; + + tmp = addrrange_shift(cmr->addr, + int128_sub(fr->addr.start, + int128_make64(fr->offset_in_region))); + if (!addrrange_intersects(tmp, fr->addr)) { + return; + } + tmp = addrrange_intersection(tmp, fr->addr); + + if (add) { + MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add, + int128_get64(tmp.start), + int128_get64(tmp.size)); + } else { + MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del, + int128_get64(tmp.start), + int128_get64(tmp.size)); + } +} + static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as) { + CoalescedMemoryRange *cmr; + if (!fr->has_coalesced_range) { return; } @@ -865,16 +896,15 @@ static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as) return; } - MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del, - int128_get64(fr->addr.start), - int128_get64(fr->addr.size)); + QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) { + flat_range_coalesced_io_notify(fr, as, cmr, false); + } } static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as) { MemoryRegion *mr = fr->mr; CoalescedMemoryRange *cmr; - AddrRange tmp; if (QTAILQ_EMPTY(&mr->coalesced)) { return; @@ -885,16 +915,7 @@ static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as) } QTAILQ_FOREACH(cmr, &mr->coalesced, link) { - tmp = addrrange_shift(cmr->addr, - int128_sub(fr->addr.start, - int128_make64(fr->offset_in_region))); - if (!addrrange_intersects(tmp, fr->addr)) { - continue; - } - tmp = addrrange_intersection(tmp, fr->addr); - MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add, - int128_get64(tmp.start), - int128_get64(tmp.size)); + flat_range_coalesced_io_notify(fr, as, cmr, true); } } -- 2.30.2