DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_WG, i64, env, tl, tl)
DEF_HELPER_3(taddcctv, tl, env, tl, tl)
DEF_HELPER_3(tsubcctv, tl, env, tl, tl)
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+DEF_HELPER_FLAGS_3(ld_code, TCG_CALL_NO_WG, i64, env, tl, i32)
+#endif
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32)
DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32)
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
uint32_t last_addr = addr;
#endif
- MemOpIdx oi;
do_check_align(env, addr, size - 1, GETPC());
switch (asi) {
case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
break;
- case ASI_KERNELTXT: /* Supervisor code access */
- oi = make_memop_idx(memop, cpu_mmu_index(env_cpu(env), true));
- switch (size) {
- case 1:
- ret = cpu_ldb_code_mmu(env, addr, oi, GETPC());
- break;
- case 2:
- ret = cpu_ldw_code_mmu(env, addr, oi, GETPC());
- break;
- default:
- case 4:
- ret = cpu_ldl_code_mmu(env, addr, oi, GETPC());
- break;
- case 8:
- ret = cpu_ldq_code_mmu(env, addr, oi, GETPC());
- break;
- }
- break;
case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */
case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */
case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */
case 0x4c: /* SuperSPARC MMU Breakpoint Action */
ret = env->mmubpaction;
break;
- case ASI_USERTXT: /* User code access, XXX */
default:
sparc_raise_mmu_fault(cs, addr, false, false, asi, size, GETPC());
ret = 0;
case ASI_USERDATA: /* User data access */
case ASI_KERNELDATA: /* Supervisor data access */
+ case ASI_USERTXT: /* User code access */
+ case ASI_KERNELTXT: /* Supervisor code access */
case ASI_P: /* Implicit primary context data access (v9 only?) */
case ASI_M_BYPASS: /* MMU passthrough */
case ASI_LEON_BYPASS: /* LEON MMU passthrough */
#endif
}
+uint64_t helper_ld_code(CPUSPARCState *env, target_ulong addr, uint32_t oi)
+{
+ MemOp mop = get_memop(oi);
+ uintptr_t ra = GETPC();
+ uint64_t ret;
+
+ switch (mop & MO_SIZE) {
+ case MO_8:
+ ret = cpu_ldb_code_mmu(env, addr, oi, ra);
+ if (mop & MO_SIGN) {
+ ret = (int8_t)ret;
+ }
+ break;
+ case MO_16:
+ ret = cpu_ldw_code_mmu(env, addr, oi, ra);
+ if ((mop & MO_BSWAP) != MO_TE) {
+ ret = bswap16(ret);
+ }
+ if (mop & MO_SIGN) {
+ ret = (int16_t)ret;
+ }
+ break;
+ case MO_32:
+ ret = cpu_ldl_code_mmu(env, addr, oi, ra);
+ if ((mop & MO_BSWAP) != MO_TE) {
+ ret = bswap32(ret);
+ }
+ if (mop & MO_SIGN) {
+ ret = (int32_t)ret;
+ }
+ break;
+ case MO_64:
+ ret = cpu_ldq_code_mmu(env, addr, oi, ra);
+ if ((mop & MO_BSWAP) != MO_TE) {
+ ret = bswap64(ret);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return ret;
+}
+
#endif /* CONFIG_USER_ONLY */
#else /* TARGET_SPARC64 */
GET_ASI_EXCP,
GET_ASI_DIRECT,
GET_ASI_DTWINX,
+ GET_ASI_CODE,
GET_ASI_BLOCK,
GET_ASI_SHORT,
GET_ASI_BCOPY,
|| (asi == ASI_USERDATA
&& (dc->def->features & CPU_FEATURE_CASA))) {
switch (asi) {
- case ASI_USERDATA: /* User data access */
+ case ASI_USERDATA: /* User data access */
mem_idx = MMU_USER_IDX;
type = GET_ASI_DIRECT;
break;
- case ASI_KERNELDATA: /* Supervisor data access */
+ case ASI_KERNELDATA: /* Supervisor data access */
mem_idx = MMU_KERNEL_IDX;
type = GET_ASI_DIRECT;
break;
+ case ASI_USERTXT: /* User text access */
+ mem_idx = MMU_USER_IDX;
+ type = GET_ASI_CODE;
+ break;
+ case ASI_KERNELTXT: /* Supervisor text access */
+ mem_idx = MMU_KERNEL_IDX;
+ type = GET_ASI_CODE;
+ break;
case ASI_M_BYPASS: /* MMU passthrough */
case ASI_LEON_BYPASS: /* LEON MMU passthrough */
mem_idx = MMU_PHYS_IDX;
case GET_ASI_DIRECT:
tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
break;
+
+ case GET_ASI_CODE:
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+ {
+ MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
+ tcg_gen_trunc_i64_tl(dst, t64);
+ }
+ break;
+#else
+ g_assert_not_reached();
+#endif
+
default:
{
TCGv_i32 r_asi = tcg_constant_i32(da->asi);
}
break;
+ case GET_ASI_CODE:
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+ {
+ MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
+
+ /* See above. */
+ if ((da->memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr_i64_tl(lo, hi, tmp);
+ } else {
+ tcg_gen_extr_i64_tl(hi, lo, tmp);
+ }
+ }
+ break;
+#else
+ g_assert_not_reached();
+#endif
+
default:
/* ??? In theory we've handled all of the ASIs that are valid
for ldda, and this should raise DAE_invalid_asi. However,