static inline void
atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, uint16_t info)
{
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
}
static inline
static inline
void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, uint16_t info)
{
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}
static inline
static inline
void atomic_trace_st_post(CPUArchState *env, target_ulong addr, uint16_t info)
{
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/plugin.h"
#include "trace/mem.h"
#if DATA_SIZE == 16
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
+ qemu_plugin_disable_mem_helpers(cpu);
}
if (cpu_in_exclusive_context(cpu)) {
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
+ qemu_plugin_disable_mem_helpers(cpu);
+
assert_no_pages_locked();
}
* Since this is placed within CPUNegativeOffsetState, the smallest
* negative offsets are at the end of the struct.
*/
+
typedef struct CPUTLB {
CPUTLBCommon c;
CPUTLBDesc d[NB_MMU_MODES];
#include "trace-root.h"
#endif
+#include "qemu/plugin.h"
#include "trace/mem.h"
#if DATA_SIZE == 8
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
-
#if !defined(SOFTMMU_CODE_ACCESS)
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
uintptr_t hostaddr = addr + entry->addend;
res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
}
+#ifndef SOFTMMU_CODE_ACCESS
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
+#endif
return res;
}
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
-
#if !defined(SOFTMMU_CODE_ACCESS)
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
uintptr_t hostaddr = addr + entry->addend;
res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
}
+#ifndef SOFTMMU_CODE_ACCESS
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
+#endif
return res;
}
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
-
#if !defined(SOFTMMU_CODE_ACCESS)
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
uintptr_t hostaddr = addr + entry->addend;
glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
}
+#ifndef SOFTMMU_CODE_ACCESS
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
+#endif
}
static inline void
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
-#ifdef CODE_ACCESS
RES_TYPE ret;
+#ifdef CODE_ACCESS
set_helper_retaddr(1);
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
- return ret;
#else
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, false, MMU_USER_IDX));
- return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false,
+ MMU_USER_IDX);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
+ ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
#endif
+ return ret;
}
#ifndef CODE_ACCESS
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
-#ifdef CODE_ACCESS
int ret;
+#ifdef CODE_ACCESS
set_helper_retaddr(1);
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
- return ret;
#else
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, true, MO_TE, false, MMU_USER_IDX));
- return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false,
+ MMU_USER_IDX);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
+ ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
+ return ret;
}
#ifndef CODE_ACCESS
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
RES_TYPE v)
{
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, true, MMU_USER_IDX));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true,
+ MMU_USER_IDX);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
static inline void
#include "tcg-mo.h"
#include "trace-tcg.h"
#include "trace/mem.h"
+#include "exec/plugin-gen.h"
/* Reduce the number of ifdefs below. This assumes that all uses of
TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
tcg_debug_assert(idx == TB_EXIT_REQUESTED);
}
+ plugin_gen_disable_mem_helpers();
tcg_gen_op1i(INDEX_op_exit_tb, val);
}
tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0);
tcg_ctx->goto_tb_issue_mask |= 1 << idx;
#endif
+ plugin_gen_disable_mem_helpers();
/* When not chaining, we simply fall through to the "fallback" exit. */
if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
tcg_gen_op1i(INDEX_op_goto_tb, idx);
void tcg_gen_lookup_and_goto_ptr(void)
{
if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- TCGv_ptr ptr = tcg_temp_new_ptr();
+ TCGv_ptr ptr;
+
+ plugin_gen_disable_mem_helpers();
+ ptr = tcg_temp_new_ptr();
gen_helper_lookup_tb_ptr(ptr, cpu_env);
tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
}
}
+static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info)
+{
+#ifdef CONFIG_PLUGIN
+ if (tcg_ctx->plugin_insn == NULL) {
+ return;
+ }
+ plugin_gen_empty_mem_callback(vaddr, info);
+#endif
+}
+
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
{
MemOp orig_memop;
+ uint16_t info = trace_mem_get_info(memop, idx, 0);
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
- addr, trace_mem_get_info(memop, idx, 0));
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
}
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
+ plugin_gen_mem_callbacks(addr, info);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
{
TCGv_i32 swap = NULL;
+ uint16_t info = trace_mem_get_info(memop, idx, 1);
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
- addr, trace_mem_get_info(memop, idx, 1));
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i32();
}
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
+ plugin_gen_mem_callbacks(addr, info);
if (swap) {
tcg_temp_free_i32(swap);
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
{
MemOp orig_memop;
+ uint16_t info;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 1, 0);
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
- addr, trace_mem_get_info(memop, idx, 0));
+ info = trace_mem_get_info(memop, idx, 0);
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
}
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
+ plugin_gen_mem_callbacks(addr, info);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
{
TCGv_i64 swap = NULL;
+ uint16_t info;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 1, 1);
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
- addr, trace_mem_get_info(memop, idx, 1));
+ info = trace_mem_get_info(memop, idx, 1);
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i64();
}
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
+ plugin_gen_mem_callbacks(addr, info);
if (swap) {
tcg_temp_free_i64(swap);
#include "exec/memop.h"
#include "exec/tb-context.h"
#include "qemu/bitops.h"
+#include "qemu/plugin.h"
#include "qemu/queue.h"
#include "tcg-mo.h"
#include "tcg-target.h"