TRANS_FEAT(SM3TT2A, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt2a)
TRANS_FEAT(SM3TT2B, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt2b)
+static bool trans_XAR(DisasContext *s, arg_XAR *a)
+{
+ if (!dc_isar_feature(aa64_sha3, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ gen_gvec_xar(MO_64, vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm), a->imm, 16,
+ vec_full_reg_size(s));
+ }
+ return true;
+}
+
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM
}
}
-/* Crypto XAR
- * 31 21 20 16 15 10 9 5 4 0
- * +-----------------------+------+--------+------+------+
- * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
- * +-----------------------+------+--------+------+------+
- */
-static void disas_crypto_xar(DisasContext *s, uint32_t insn)
-{
- int rm = extract32(insn, 16, 5);
- int imm6 = extract32(insn, 10, 6);
- int rn = extract32(insn, 5, 5);
- int rd = extract32(insn, 0, 5);
-
- if (!dc_isar_feature(aa64_sha3, s)) {
- unallocated_encoding(s);
- return;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
-
- gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm), imm6, 16,
- vec_full_reg_size(s));
-}
-
/* C3.6 Data processing - SIMD, inc Crypto
*
* As the decode gets a little complex we are using a table based
{ 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
{ 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
{ 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
- { 0xce800000, 0xffe00000, disas_crypto_xar },
{ 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
{ 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
{ 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },