* page. This is especially relevant to speed up TLB_NOTDIRTY.
*/
g_assert(size > 0);
- helper_ret_stb_mmu(env, vaddr, byte, oi, ra);
+ cpu_stb_mmu(env, vaddr, byte, oi, ra);
haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
if (likely(haddr)) {
memset(haddr + 1, byte, size - 1);
} else {
for (i = 1; i < size; i++) {
- helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra);
+ cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
}
}
}
* Do a single access and test if we can then get access to the
* page. This is especially relevant to speed up TLB_NOTDIRTY.
*/
- byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra);
+ byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra);
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
return byte;
#endif
* Do a single access and test if we can then get access to the
* page. This is especially relevant to speed up TLB_NOTDIRTY.
*/
- helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra);
+ cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
#endif
}