From: Richard Henderson Date: Thu, 29 Sep 2022 17:51:21 +0000 (-0700) Subject: accel/tcg: Inline tb_flush_jmp_cache X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=1d41a79b3c7573c941723455149cfadfe7c2ab37;p=qemu.git accel/tcg: Inline tb_flush_jmp_cache This function has two users, who use it incompatibly. In tlb_flush_page_by_mmuidx_async_0, when flushing a single page, we need to flush exactly two pages. In tlb_flush_range_by_mmuidx_async_0, when flushing a range of pages, we need to flush N+1 pages. This avoids double-flushing of jmp cache pages in a range. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index a0db2d32a8..c7909fb619 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -107,14 +107,6 @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) } } -static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) -{ - /* Discard jump cache entries for any tb which might potentially - overlap the flushed page. */ - tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); - tb_jmp_cache_clear_page(cpu, addr); -} - /** * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary * @desc: The CPUTLBDesc portion of the TLB @@ -541,7 +533,12 @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, } qemu_spin_unlock(&env_tlb(env)->c.lock); - tb_flush_jmp_cache(cpu, addr); + /* + * Discard jump cache entries for any tb which might potentially + * overlap the flushed page, which includes the previous. + */ + tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); + tb_jmp_cache_clear_page(cpu, addr); } /** @@ -792,8 +789,14 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, return; } - for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { - tb_flush_jmp_cache(cpu, d.addr + i); + /* + * Discard jump cache entries for any tb which might potentially + * overlap the flushed pages, which includes the previous. + */ + d.addr -= TARGET_PAGE_SIZE; + for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { + tb_jmp_cache_clear_page(cpu, d.addr); + d.addr += TARGET_PAGE_SIZE; } }