if (!fp_access_check(s)) {
return;
}
+ memop = finalize_memop_asimd(s, size);
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
is_store = (opc == 0);
is_signed = !is_store && extract32(opc, 1, 1);
is_extended = (size < 3) && extract32(opc, 0, 1);
+ memop = finalize_memop(s, size + is_signed * MO_SIGN);
}
if (rn == 31) {
tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
- memop = finalize_memop(s, size + is_signed * MO_SIGN);
clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, memop);
if (is_vector) {
if (!fp_access_check(s)) {
return;
}
+ memop = finalize_memop_asimd(s, size);
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
is_store = (opc == 0);
is_signed = !is_store && extract32(opc, 1, 1);
is_extended = (size < 3) && extract32(opc, 0, 1);
+ memop = finalize_memop(s, size + is_signed * MO_SIGN);
}
if (rn == 31) {
offset = imm12 << size;
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
- memop = finalize_memop(s, size + is_signed * MO_SIGN);
clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, memop);
if (is_vector) {
* promote consecutive little-endian elements below.
*/
clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
- total, finalize_memop(s, size));
+ total, finalize_memop_asimd(s, size));
/*
* Consecutive little-endian elements from a single register
total = selem << scale;
tcg_rn = cpu_reg_sp(s, rn);
- mop = finalize_memop(s, scale);
+ mop = finalize_memop_asimd(s, scale);
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
total, mop);