return;
case GET_ASI_DTWINX:
- assert(TARGET_LONG_BITS == 64);
- tcg_gen_qemu_ld_tl(hi, addr, da->mem_idx, da->memop | MO_ALIGN_16);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_ld_tl(lo, addr, da->mem_idx, da->memop);
+#ifdef TARGET_SPARC64
+ {
+ MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
+ TCGv_i128 t = tcg_temp_new_i128();
+
+ tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
+ /*
+ * Note that LE twinx acts as if each 64-bit register result is
+ * byte swapped. We perform one 128-bit LE load, so must swap
+ * the order of the writebacks.
+ */
+ if ((mop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr_i128_i64(lo, hi, t);
+ } else {
+ tcg_gen_extr_i128_i64(hi, lo, t);
+ }
+ }
break;
+#else
+ g_assert_not_reached();
+#endif
case GET_ASI_DIRECT:
{
break;
case GET_ASI_DTWINX:
- assert(TARGET_LONG_BITS == 64);
- tcg_gen_qemu_st_tl(hi, addr, da->mem_idx, da->memop | MO_ALIGN_16);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_st_tl(lo, addr, da->mem_idx, da->memop);
+#ifdef TARGET_SPARC64
+ {
+ MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
+ TCGv_i128 t = tcg_temp_new_i128();
+
+ /*
+ * Note that LE twinx acts as if each 64-bit register result is
+ * byte swapped. We perform one 128-bit LE store, so must swap
+ * the order of the construction.
+ */
+ if ((mop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat_i64_i128(t, lo, hi);
+ } else {
+ tcg_gen_concat_i64_i128(t, hi, lo);
+ }
+ tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
+ }
break;
+#else
+ g_assert_not_reached();
+#endif
case GET_ASI_DIRECT:
{