Replacing it with tcg_gen_extrl_i64_i32.
Signed-off-by: Richard Henderson <rth@twiddle.net>
REQUIRE_REG_31(rb);
t32 = tcg_temp_new_i32();
va = load_gpr(ctx, ra);
- tcg_gen_trunc_i64_i32(t32, va);
+ tcg_gen_extrl_i64_i32(t32, va);
gen_helper_memory_to_s(vc, t32);
tcg_temp_free_i32(t32);
break;
REQUIRE_REG_31(rb);
t32 = tcg_temp_new_i32();
va = load_gpr(ctx, ra);
- tcg_gen_trunc_i64_i32(t32, va);
+ tcg_gen_extrl_i64_i32(t32, va);
gen_helper_memory_to_f(vc, t32);
tcg_temp_free_i32(t32);
break;
TCGv_i64 flag = tcg_temp_new_i64();
tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
- tcg_gen_trunc_i64_i32(cpu_ZF, flag);
+ tcg_gen_extrl_i64_i32(cpu_ZF, flag);
tcg_gen_shri_i64(flag, result, 32);
- tcg_gen_trunc_i64_i32(cpu_NF, flag);
+ tcg_gen_extrl_i64_i32(cpu_NF, flag);
tcg_temp_free_i64(flag);
}
if (sf) {
gen_set_NZ64(result);
} else {
- tcg_gen_trunc_i64_i32(cpu_ZF, result);
- tcg_gen_trunc_i64_i32(cpu_NF, result);
+ tcg_gen_extrl_i64_i32(cpu_ZF, result);
+ tcg_gen_extrl_i64_i32(cpu_NF, result);
}
tcg_gen_movi_i32(cpu_CF, 0);
tcg_gen_movi_i32(cpu_VF, 0);
tcg_gen_movi_i64(tmp, 0);
tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
- tcg_gen_trunc_i64_i32(cpu_CF, flag);
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
gen_set_NZ64(result);
tcg_gen_andc_i64(flag, flag, tmp);
tcg_temp_free_i64(tmp);
tcg_gen_shri_i64(flag, flag, 32);
- tcg_gen_trunc_i64_i32(cpu_VF, flag);
+ tcg_gen_extrl_i64_i32(cpu_VF, flag);
tcg_gen_mov_i64(dest, result);
tcg_temp_free_i64(result);
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
- tcg_gen_trunc_i64_i32(t0_32, t0);
- tcg_gen_trunc_i64_i32(t1_32, t1);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
gen_set_NZ64(result);
tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
- tcg_gen_trunc_i64_i32(cpu_CF, flag);
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
tcg_gen_xor_i64(flag, result, t0);
tmp = tcg_temp_new_i64();
tcg_gen_and_i64(flag, flag, tmp);
tcg_temp_free_i64(tmp);
tcg_gen_shri_i64(flag, flag, 32);
- tcg_gen_trunc_i64_i32(cpu_VF, flag);
+ tcg_gen_extrl_i64_i32(cpu_VF, flag);
tcg_gen_mov_i64(dest, result);
tcg_temp_free_i64(flag);
tcg_temp_free_i64(result);
TCGv_i32 t1_32 = tcg_temp_new_i32();
TCGv_i32 tmp;
- tcg_gen_trunc_i64_i32(t0_32, t0);
- tcg_gen_trunc_i64_i32(t1_32, t1);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
tcg_gen_extu_i32_i64(cf_64, cpu_CF);
tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
- tcg_gen_trunc_i64_i32(cpu_CF, cf_64);
+ tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
gen_set_NZ64(result);
tcg_gen_xor_i64(vf_64, result, t0);
tcg_gen_xor_i64(tmp, t0, t1);
tcg_gen_andc_i64(vf_64, vf_64, tmp);
tcg_gen_shri_i64(vf_64, vf_64, 32);
- tcg_gen_trunc_i64_i32(cpu_VF, vf_64);
+ tcg_gen_extrl_i64_i32(cpu_VF, vf_64);
tcg_gen_mov_i64(dest, result);
t1_32 = tcg_temp_new_i32();
tmp = tcg_const_i32(0);
- tcg_gen_trunc_i64_i32(t0_32, t0);
- tcg_gen_trunc_i64_i32(t1_32, t1);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
TCGv_i32 nzcv = tcg_temp_new_i32();
/* take NZCV from R[t] */
- tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
+ tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
/* bit 31, N */
tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32();
t1 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(t0, src);
- tcg_gen_trunc_i64_i32(t1, shift_amount);
+ tcg_gen_extrl_i64_i32(t0, src);
+ tcg_gen_extrl_i64_i32(t1, shift_amount);
tcg_gen_rotr_i32(t0, t0, t1);
tcg_gen_extu_i32_i64(dst, t0);
tcg_temp_free_i32(t0);
gen_helper_clz64(tcg_rd, tcg_rn);
} else {
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
gen_helper_clz(tcg_tmp32, tcg_tmp32);
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
tcg_temp_free_i32(tcg_tmp32);
gen_helper_cls64(tcg_rd, tcg_rn);
} else {
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
gen_helper_cls32(tcg_tmp32, tcg_tmp32);
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
tcg_temp_free_i32(tcg_tmp32);
gen_helper_rbit64(tcg_rd, tcg_rn);
} else {
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
gen_helper_rbit(tcg_tmp32, tcg_tmp32);
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
tcg_temp_free_i32(tcg_tmp32);
assert(elements == 4);
read_vec_element(s, tcg_elt, rn, 0, MO_32);
- tcg_gen_trunc_i64_i32(tcg_elt1, tcg_elt);
+ tcg_gen_extrl_i64_i32(tcg_elt1, tcg_elt);
read_vec_element(s, tcg_elt, rn, 1, MO_32);
- tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
+ tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
read_vec_element(s, tcg_elt, rn, 2, MO_32);
- tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
+ tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
read_vec_element(s, tcg_elt, rn, 3, MO_32);
- tcg_gen_trunc_i64_i32(tcg_elt3, tcg_elt);
+ tcg_gen_extrl_i64_i32(tcg_elt3, tcg_elt);
do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
static NeonGenNarrowFn * const xtnfns[3] = {
gen_helper_neon_narrow_u8,
gen_helper_neon_narrow_u16,
- tcg_gen_trunc_i64_i32,
+ tcg_gen_extrl_i64_i32,
};
static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
gen_helper_neon_unarrow_sat8,
} else {
TCGv_i32 tcg_lo = tcg_temp_new_i32();
TCGv_i32 tcg_hi = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tcg_lo, tcg_op);
+ tcg_gen_extrl_i64_i32(tcg_lo, tcg_op);
gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
tcg_gen_shri_i64(tcg_op, tcg_op, 32);
- tcg_gen_trunc_i64_i32(tcg_hi, tcg_op);
+ tcg_gen_extrl_i64_i32(tcg_hi, tcg_op);
gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
tcg_temp_free_i32(tcg_lo);
static void do_narrow_high_u32(TCGv_i32 res, TCGv_i64 in)
{
tcg_gen_shri_i64(in, in, 32);
- tcg_gen_trunc_i64_i32(res, in);
+ tcg_gen_extrl_i64_i32(res, in);
}
static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
} else {
tmp = tcg_temp_new_i32();
iwmmxt_load_reg(cpu_V0, rd);
- tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
}
tcg_gen_andi_i32(tmp, tmp, mask);
tcg_gen_mov_i32(dest, tmp);
rdhi = (insn >> 16) & 0xf;
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
iwmmxt_load_reg(cpu_V0, wrd);
- tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
} else { /* TMCRR */
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
iwmmxt_store_reg(cpu_V0, wrd);
if (insn & (1 << 22)) { /* WSTRD */
gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
} else { /* WSTRW wRd */
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
gen_aa32_st32(tmp, addr, get_mem_index(s));
}
} else {
if (insn & (1 << 22)) { /* WSTRH */
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
gen_aa32_st16(tmp, addr, get_mem_index(s));
} else { /* WSTRB */
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
gen_aa32_st8(tmp, addr, get_mem_index(s));
}
}
switch ((insn >> 22) & 3) {
case 0:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
if (insn & 8) {
tcg_gen_ext8s_i32(tmp, tmp);
} else {
break;
case 1:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
if (insn & 8) {
tcg_gen_ext16s_i32(tmp, tmp);
} else {
break;
case 2:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
break;
}
store_reg(s, rd, tmp);
if (insn & ARM_CP_RW_BIT) { /* MRA */
iwmmxt_load_reg(cpu_V0, acc);
- tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
} else { /* MAR */
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
} else {
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
}
- tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
+ tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
tcg_temp_free_i32(tcg_tmp);
tcg_temp_free_i64(tcg_res);
switch (size) {
case 0: gen_helper_neon_narrow_u8(dest, src); break;
case 1: gen_helper_neon_narrow_u16(dest, src); break;
- case 2: tcg_gen_trunc_i64_i32(dest, src); break;
+ case 2: tcg_gen_extrl_i64_i32(dest, src); break;
default: abort();
}
}
break;
case 2:
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
break;
default: abort();
}
case 2:
tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
break;
default: abort();
}
tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
}
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
store_reg(s, rt, tmp);
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rt2, tmp);
} else {
{
TCGv_i32 tmp;
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, val);
+ tcg_gen_extrl_i64_i32(tmp, val);
store_reg(s, rlow, tmp);
tmp = tcg_temp_new_i32();
tcg_gen_shri_i64(val, val, 32);
- tcg_gen_trunc_i64_i32(tmp, val);
+ tcg_gen_extrl_i64_i32(tmp, val);
store_reg(s, rhigh, tmp);
}
tmp64 = gen_muls_i64_i32(tmp, tmp2);
tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
}
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rn, tmp);
break;
tmp64 = gen_muls_i64_i32(tmp, tmp2);
tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
if (rs != 15)
{
}
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
break;
case 7: /* Unsigned sum of absolute differences. */
tcg_temp_free(addr);
for (i = 0; i < (nr >> 1); i++) {
- tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]);
+ tcg_gen_extrl_i64_i32(cpu_R[i * 2], tmp[i]);
tcg_gen_shri_i64(tmp[i], tmp[i], 32);
- tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
+ tcg_gen_extrl_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
tcg_temp_free_i64(tmp[i]);
}
if (nr & 1) {
if (s->env->macsr & MACSR_FI) {
gen_helper_get_macf(rx, cpu_env, acc);
} else if ((s->env->macsr & MACSR_OMC) == 0) {
- tcg_gen_trunc_i64_i32(rx, acc);
+ tcg_gen_extrl_i64_i32(rx, acc);
} else if (s->env->macsr & MACSR_SU) {
gen_helper_get_macs(rx, acc);
} else {
tcg_gen_ext_i32_i64(t1, b);
tcg_gen_mul_i64(t0, t0, t1);
- tcg_gen_trunc_i64_i32(d, t0);
+ tcg_gen_extrl_i64_i32(d, t0);
tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_trunc_i64_i32(d2, t0);
+ tcg_gen_extrl_i64_i32(d2, t0);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
tcg_gen_extu_i32_i64(t1, b);
tcg_gen_mul_i64(t0, t0, t1);
- tcg_gen_trunc_i64_i32(d, t0);
+ tcg_gen_extrl_i64_i32(d, t0);
tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_trunc_i64_i32(d2, t0);
+ tcg_gen_extrl_i64_i32(d2, t0);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
if (ctx->hflags & MIPS_HFLAG_FRE) {
generate_exception(ctx, EXCP_RI);
}
- tcg_gen_trunc_i64_i32(t, fpu_f64[reg]);
+ tcg_gen_extrl_i64_i32(t, fpu_f64[reg]);
}
static void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
if (ctx->hflags & MIPS_HFLAG_F64) {
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_shri_i64(t64, fpu_f64[reg], 32);
- tcg_gen_trunc_i64_i32(t, t64);
+ tcg_gen_extrl_i64_i32(t, t64);
tcg_temp_free_i64(t64);
} else {
gen_load_fpr32(ctx, t, reg | 1);
tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
tcg_gen_add_i64(td, ta, tb);
- tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_extrl_i64_i32(res, td);
tcg_gen_shri_i64(td, td, 31);
tcg_gen_andi_i64(td, td, 0x3);
/* Jump to lab when no overflow. */
tcg_gen_shri_i64(tcy, tcy, 10);
tcg_gen_add_i64(td, ta, tb);
tcg_gen_add_i64(td, td, tcy);
- tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_extrl_i64_i32(res, td);
tcg_gen_shri_i64(td, td, 32);
tcg_gen_andi_i64(td, td, 0x3);
/* Jump to lab when no overflow. */
tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
tcg_gen_sub_i64(td, ta, tb);
- tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_extrl_i64_i32(res, td);
tcg_gen_shri_i64(td, td, 31);
tcg_gen_andi_i64(td, td, 0x3);
/* Jump to lab when no overflow. */
tcg_gen_ext_i32_i64(t1, dst);
tcg_gen_concat_i32_i64(t2, maclo, machi);
tcg_gen_add_i64(t2, t2, t1);
- tcg_gen_trunc_i64_i32(maclo, t2);
+ tcg_gen_extrl_i64_i32(maclo, t2);
tcg_gen_shri_i64(t2, t2, 32);
- tcg_gen_trunc_i64_i32(machi, t2);
+ tcg_gen_extrl_i64_i32(machi, t2);
tcg_temp_free_i32(dst);
tcg_temp_free(ttmp);
tcg_temp_free_i64(t1);
TCGv_i32 sr_ove = tcg_temp_local_new_i32();
tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
tcg_gen_addi_i64(td, ta, sign_extend(I16, 16));
- tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_extrl_i64_i32(res, td);
tcg_gen_shri_i64(td, td, 32);
tcg_gen_andi_i64(td, td, 0x3);
/* Jump to lab when no overflow. */
tcg_gen_extu_i32_i64(tcy, sr_cy);
tcg_gen_addi_i64(td, ta, sign_extend(I16, 16));
tcg_gen_add_i64(td, td, tcy);
- tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_extrl_i64_i32(res, td);
tcg_gen_shri_i64(td, td, 32);
tcg_gen_andi_i64(td, td, 0x3);
/* Jump to lab when no overflow. */
tcg_gen_ext_i32_i64(t1, t0);
tcg_gen_concat_i32_i64(t2, maclo, machi);
tcg_gen_add_i64(t2, t2, t1);
- tcg_gen_trunc_i64_i32(maclo, t2);
+ tcg_gen_extrl_i64_i32(maclo, t2);
tcg_gen_shri_i64(t2, t2, 32);
- tcg_gen_trunc_i64_i32(machi, t2);
+ tcg_gen_extrl_i64_i32(machi, t2);
tcg_temp_free_i32(t0);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
tcg_gen_ext_i32_i64(t1, t0);
tcg_gen_concat_i32_i64(t2, maclo, machi);
tcg_gen_sub_i64(t2, t2, t1);
- tcg_gen_trunc_i64_i32(maclo, t2);
+ tcg_gen_extrl_i64_i32(maclo, t2);
tcg_gen_shri_i64(t2, t2, 32);
- tcg_gen_trunc_i64_i32(machi, t2);
+ tcg_gen_extrl_i64_i32(machi, t2);
tcg_temp_free_i32(t0);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t2);
case CC_OP_LTGT0_32:
c->is_64 = false;
c->u.s32.a = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
+ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
c->u.s32.b = tcg_const_i32(0);
break;
case CC_OP_LTGT_32:
case CC_OP_SUBU_32:
c->is_64 = false;
c->u.s32.a = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
+ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
c->u.s32.b = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
+ tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
break;
case CC_OP_LTGT0_64:
c->is_64 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
+ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
tcg_gen_movi_i32(c->u.s32.b, 0);
} else {
- tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
+ tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
}
break;
store_reg32_i64(r1, t);
c.u.s32.a = tcg_temp_new_i32();
c.u.s32.b = tcg_const_i32(0);
- tcg_gen_trunc_i64_i32(c.u.s32.a, t);
+ tcg_gen_extrl_i64_i32(c.u.s32.a, t);
tcg_temp_free_i64(t);
return help_branch(s, &c, is_imm, imm, o->in2);
store_reg32h_i64(r1, t);
c.u.s32.a = tcg_temp_new_i32();
c.u.s32.b = tcg_const_i32(0);
- tcg_gen_trunc_i64_i32(c.u.s32.a, t);
+ tcg_gen_extrl_i64_i32(c.u.s32.a, t);
tcg_temp_free_i64(t);
return help_branch(s, &c, 1, imm, o->in2);
tcg_gen_add_i64(t, regs[r1], regs[r3]);
c.u.s32.a = tcg_temp_new_i32();
c.u.s32.b = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(c.u.s32.a, t);
- tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
+ tcg_gen_extrl_i64_i32(c.u.s32.a, t);
+ tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
store_reg32_i64(r1, t);
tcg_temp_free_i64(t);
{
TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
TCGv_i32 t1 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(t1, o->in1);
+ tcg_gen_extrl_i64_i32(t1, o->in1);
potential_page_fault(s);
gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
set_cc_static(s);
/* Store CC back to cc_op. Wait until after the store so that any
exception gets the old cc_op value. */
- tcg_gen_trunc_i64_i32(cc_op, cc);
+ tcg_gen_extrl_i64_i32(cc_op, cc);
tcg_temp_free_i64(cc);
set_cc_static(s);
return NO_EXIT;
/* Save back state now that we've passed all exceptions. */
tcg_gen_mov_i64(regs[r1], outh);
tcg_gen_mov_i64(regs[r1 + 1], outl);
- tcg_gen_trunc_i64_i32(cc_op, cc);
+ tcg_gen_extrl_i64_i32(cc_op, cc);
tcg_temp_free_i64(outh);
tcg_temp_free_i64(outl);
tcg_temp_free_i64(cc);
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i32 t2 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(t2, o->in1);
+ tcg_gen_extrl_i64_i32(t2, o->in1);
gen_helper_cvd(t1, t2);
tcg_temp_free_i32(t2);
tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 to = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(t1, o->in1);
- tcg_gen_trunc_i64_i32(t2, o->in2);
+ tcg_gen_extrl_i64_i32(t1, o->in1);
+ tcg_gen_extrl_i64_i32(t2, o->in2);
tcg_gen_rotl_i32(to, t1, t2);
tcg_gen_extu_i32_i64(o->out, to);
tcg_temp_free_i32(t1);
static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
{
TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, t);
+ tcg_gen_extrl_i64_i32(tmp, t);
tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
tcg_gen_shri_i64(t, t, 32);
- tcg_gen_trunc_i64_i32(tmp, t);
+ tcg_gen_extrl_i64_i32(tmp, t);
tcg_gen_mov_i32(cpu_fregs[reg], tmp);
tcg_temp_free_i32(tmp);
}
TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
- tcg_gen_trunc_i64_i32(ret, t);
+ tcg_gen_extrl_i64_i32(ret, t);
tcg_temp_free_i64(t);
return ret;
#if TARGET_LONG_BITS == 64
cc_src1_32 = tcg_temp_new_i32();
cc_src2_32 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
- tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
+ tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
+ tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
#else
cc_src1_32 = cpu_cc_dst;
cc_src2_32 = cpu_cc_src;
#if TARGET_LONG_BITS == 64
cc_src1_32 = tcg_temp_new_i32();
cc_src2_32 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
- tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
+ tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
+ tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
#else
cc_src1_32 = cpu_cc_src;
cc_src2_32 = cpu_cc_src2;
the later. */
c32 = tcg_temp_new_i32();
if (cmp->is_bool) {
- tcg_gen_trunc_i64_i32(c32, cmp->c1);
+ tcg_gen_extrl_i64_i32(c32, cmp->c1);
} else {
TCGv_i64 c64 = tcg_temp_new_i64();
tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
- tcg_gen_trunc_i64_i32(c32, c64);
+ tcg_gen_extrl_i64_i32(c32, c64);
tcg_temp_free_i64(c64);
}
tcg_gen_mul_i64(t1, t1, t3);
tcg_gen_add_i64(t1, t2, t1);
- tcg_gen_trunc_i64_i32(ret, t1);
+ tcg_gen_extrl_i64_i32(ret, t1);
/* calc V
t1 > 0x7fffffff */
tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
/* t1 < -0x80000000 */
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* only the add overflows, if t2 < t1
calc V bit */
tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
tcg_gen_sari_i64(t2, t2, up_shift);
tcg_gen_add_i64(t3, t1, t2);
- tcg_gen_trunc_i64_i32(temp3, t3);
+ tcg_gen_extrl_i64_i32(temp3, t3);
/* calc v bit */
tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
tcg_gen_mul_i64(t1, t1, t3);
tcg_gen_sub_i64(t1, t2, t1);
- tcg_gen_trunc_i64_i32(ret, t1);
+ tcg_gen_extrl_i64_i32(ret, t1);
/* calc V
t2 > 0x7fffffff */
tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
/* result < -0x80000000 */
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
/* calc V bit, only the sub can overflow, if t1 > t2 */
tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
tcg_gen_add_i64(t2, t2, t4);
tcg_gen_sub_i64(t3, t1, t2);
- tcg_gen_trunc_i64_i32(temp3, t3);
+ tcg_gen_extrl_i64_i32(temp3, t3);
/* calc v bit */
tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
TCGv_i64 tmp = tcg_temp_new_i64(); \
tcg_gen_extu_i32_i64(tmp, reg); \
tcg_gen_##cmd##_i64(v, v, tmp); \
- tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
+ tcg_gen_extrl_i64_i32(cpu_R[RRR_R], v); \
tcg_temp_free_i64(v); \
tcg_temp_free_i64(tmp); \
} while (0)
tcg_gen_deposit_i64(ret, lo, hi, 32, 32);
}
-static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
-{
- tcg_gen_extrl_i64_i32(ret, arg);
-}
-
/* QEMU specific operations. */
#ifndef TARGET_LONG_BITS
#define tcg_gen_divu_tl tcg_gen_divu_i64
#define tcg_gen_remu_tl tcg_gen_remu_i64
#define tcg_gen_discard_tl tcg_gen_discard_i64
-#define tcg_gen_trunc_tl_i32 tcg_gen_trunc_i64_i32
+#define tcg_gen_trunc_tl_i32 tcg_gen_extrl_i64_i32
#define tcg_gen_trunc_i64_tl tcg_gen_mov_i64
#define tcg_gen_extu_i32_tl tcg_gen_extu_i32_i64
#define tcg_gen_ext_i32_tl tcg_gen_ext_i32_i64
#define tcg_gen_remu_tl tcg_gen_remu_i32
#define tcg_gen_discard_tl tcg_gen_discard_i32
#define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32
-#define tcg_gen_trunc_i64_tl tcg_gen_trunc_i64_i32
+#define tcg_gen_trunc_i64_tl tcg_gen_extrl_i64_i32
#define tcg_gen_extu_i32_tl tcg_gen_mov_i32
#define tcg_gen_ext_i32_tl tcg_gen_mov_i32
#define tcg_gen_extu_tl_i64 tcg_gen_extu_i32_i64