/* suppress any remaining jumps to this TB */
tb_jmp_unlink(tb);
- tb_ctx.tb_phys_invalidate_count++;
+ atomic_set(&tcg_ctx->tb_phys_invalidate_count,
+ tcg_ctx->tb_phys_invalidate_count + 1);
}
#ifdef CONFIG_SOFTMMU
cpu_fprintf(f, "\nStatistics:\n");
cpu_fprintf(f, "TB flush count %u\n",
atomic_read(&tb_ctx.tb_flush_count));
- cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count);
+ cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
tcg_dump_info(f, cpu_fprintf);
}
/* statistics */
unsigned tb_flush_count;
- int tb_phys_invalidate_count;
};
extern TBContext tb_ctx;
return capacity;
}
+size_t tcg_tb_phys_invalidate_count(void)
+{
+ unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
+ unsigned int i;
+ size_t total = 0;
+
+ for (i = 0; i < n_ctxs; i++) {
+ const TCGContext *s = atomic_read(&tcg_ctxs[i]);
+
+ total += atomic_read(&s->tb_phys_invalidate_count);
+ }
+ return total;
+}
+
/* pool based memory allocation */
void *tcg_malloc_internal(TCGContext *s, int size)
{
/* Threshold to flush the translated code buffer. */
void *code_gen_highwater;
+ size_t tb_phys_invalidate_count;
+
/* Track which vCPU triggers events */
CPUState *cpu; /* *_trans */
void tcg_tb_insert(TranslationBlock *tb);
void tcg_tb_remove(TranslationBlock *tb);
+size_t tcg_tb_phys_invalidate_count(void);
TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
size_t tcg_nb_tbs(void);