We will need a backend interface for performing 32-bit zero-extend.
Use it in tcg_reg_alloc_op in the meantime.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
tcg_out_uxt(s, MO_16, rd, rn);
}
+static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
+{
+ tcg_out_movr(s, TCG_TYPE_I32, rd, rn);
+}
+
static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
TCGReg rn, int64_t aimm)
{
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
break;
case INDEX_op_extu_i32_i64:
- case INDEX_op_ext32u_i64:
- tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
+ tcg_out_ext32u(s, a0, a1);
break;
case INDEX_op_deposit_i64:
case INDEX_op_ext16u_i64:
case INDEX_op_ext16u_i32:
case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
default:
g_assert_not_reached();
}
g_assert_not_reached();
}
+static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
+{
+ g_assert_not_reached();
+}
+
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
TCGReg rd, TCGReg rn, int flags)
{
tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
}
-static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
+static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
{
/* 32-bit mov zero extends. */
tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
tcg_out_bswap64(s, a0);
break;
case INDEX_op_extu_i32_i64:
- case INDEX_op_ext32u_i64:
case INDEX_op_extrl_i64_i32:
tcg_out_ext32u(s, a0, a1);
break;
case INDEX_op_ext16u_i32:
case INDEX_op_ext16u_i64:
case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
default:
g_assert_not_reached();
}
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
break;
- case INDEX_op_ext32u_i64:
case INDEX_op_extu_i32_i64:
tcg_out_ext32u(s, a0, a1);
break;
case INDEX_op_ext16u_i32:
case INDEX_op_ext16u_i64:
case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
default:
g_assert_not_reached();
}
static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
{
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
if (use_mips32r2_instructions) {
tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0);
} else {
case INDEX_op_extrl_i64_i32:
tcg_out_ext32s(s, a0, a1);
break;
- case INDEX_op_ext32u_i64:
case INDEX_op_extu_i32_i64:
tcg_out_ext32u(s, a0, a1);
break;
case INDEX_op_ext16s_i32:
case INDEX_op_ext16s_i64:
case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
default:
g_assert_not_reached();
}
tcg_out32(s, EXTSW | RA(dst) | RS(src));
}
-static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
+static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
{
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
tcg_out_rld(s, RLDICL, dst, src, 0, 32);
}
case INDEX_op_ext16u_i32:
case INDEX_op_ext16u_i64:
case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
default:
g_assert_not_reached();
}
tcg_out_qemu_st(s, args, true);
break;
- case INDEX_op_ext32u_i64:
case INDEX_op_extu_i32_i64:
tcg_out_ext32u(s, a0, a1);
break;
case INDEX_op_ext16u_i32:
case INDEX_op_ext16u_i64:
case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
default:
g_assert_not_reached();
}
tcg_out_insn(s, RRE, LGFR, dest, src);
}
-static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
+static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
{
tcg_out_insn(s, RRE, LLGFR, dest, src);
}
/* Look for the zero-extensions. */
if ((val & valid) == 0xffffffff) {
- tgen_ext32u(s, dest, dest);
+ tcg_out_ext32u(s, dest, dest);
return;
}
if ((val & valid) == 0xff) {
/* With MIE3, and bit 0 of m4 set, we get the complete result. */
if (HAVE_FACILITY(MISC_INSN_EXT3)) {
if (type == TCG_TYPE_I32) {
- tgen_ext32u(s, dest, src);
+ tcg_out_ext32u(s, dest, src);
src = dest;
}
tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
case MO_UL | MO_BSWAP:
/* swapped unsigned int load with upper bits zeroed */
tcg_out_insn(s, RXY, LRV, data, base, index, disp);
- tgen_ext32u(s, data, data);
+ tcg_out_ext32u(s, data, data);
break;
case MO_UL:
tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
offsetof(CPUTLBEntry, addend));
if (TARGET_LONG_BITS == 32) {
- tgen_ext32u(s, TCG_REG_R3, addr_reg);
+ tcg_out_ext32u(s, TCG_REG_R3, addr_reg);
return TCG_REG_R3;
}
return addr_reg;
tcg_out_ext16u(s, TCG_REG_R4, data_reg);
break;
case MO_UL:
- tgen_ext32u(s, TCG_REG_R4, data_reg);
+ tcg_out_ext32u(s, TCG_REG_R4, data_reg);
break;
case MO_UQ:
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
TCGReg *index_reg, tcg_target_long *disp)
{
if (TARGET_LONG_BITS == 32) {
- tgen_ext32u(s, TCG_TMP0, *addr_reg);
+ tcg_out_ext32u(s, TCG_TMP0, *addr_reg);
*addr_reg = TCG_TMP0;
}
if (guest_base < 0x80000) {
if (a2 & TCG_BSWAP_OS) {
tcg_out_ext32s(s, a0, a0);
} else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
- tgen_ext32u(s, a0, a0);
+ tcg_out_ext32u(s, a0, a0);
}
break;
tcg_out_ext32s(s, args[0], args[1]);
break;
case INDEX_op_extu_i32_i64:
- case INDEX_op_ext32u_i64:
- tgen_ext32u(s, args[0], args[1]);
+ tcg_out_ext32u(s, args[0], args[1]);
break;
case INDEX_op_add2_i64:
case INDEX_op_ext16u_i32:
case INDEX_op_ext16u_i64:
case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
default:
g_assert_not_reached();
}
tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
}
+static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
+{
+ tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
+}
+
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
tcg_target_long imm)
{
tcg_out_ext16u(s, r, r);
break;
case MO_32:
- tcg_out_arith(s, r, r, 0, SHIFT_SRL);
+ tcg_out_ext32u(s, r, r);
break;
case MO_64:
break;
/* If the guest address must be zero-extended, do so now. */
if (TARGET_LONG_BITS == 32) {
- tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
+ tcg_out_ext32u(s, r0, addr);
return r0;
}
return addr;
unsigned t_bits;
if (TARGET_LONG_BITS == 32) {
- tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
+ tcg_out_ext32u(s, TCG_REG_T1, addr);
addr = TCG_REG_T1;
}
unsigned t_bits;
if (TARGET_LONG_BITS == 32) {
- tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
+ tcg_out_ext32u(s, TCG_REG_T1, addr);
addr = TCG_REG_T1;
}
tcg_out_ext32s(s, a0, a1);
break;
case INDEX_op_extu_i32_i64:
- case INDEX_op_ext32u_i64:
- tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
+ tcg_out_ext32u(s, a0, a1);
break;
case INDEX_op_extrl_i64_i32:
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
case INDEX_op_ext16u_i32:
case INDEX_op_ext16u_i64:
case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
default:
g_assert_not_reached();
}
static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
+static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
static void tcg_out_goto_tb(TCGContext *s, int which);
case INDEX_op_ext32s_i64:
tcg_out_ext32s(s, new_args[0], new_args[1]);
break;
+ case INDEX_op_ext32u_i64:
+ tcg_out_ext32u(s, new_args[0], new_args[1]);
+ break;
default:
if (def->flags & TCG_OPF_VECTOR) {
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
tcg_out_op_rr(s, INDEX_op_ext32s_i64, rd, rs);
}
+static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
+{
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ tcg_debug_assert(TCG_TARGET_HAS_ext32u_i64);
+ tcg_out_op_rr(s, INDEX_op_ext32u_i64, rd, rs);
+}
+
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
tcg_target_long imm)
{
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
- CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
CASE_64(ext_i32)
CASE_64(extu_i32)
CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
case INDEX_op_ext16u_i32:
case INDEX_op_ext16u_i64:
case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
default:
g_assert_not_reached();
}