From: Richard Henderson Date: Mon, 9 Dec 2024 03:39:01 +0000 (-0600) Subject: tcg/optimize: Use fold_masks_zs in fold_xor X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=c890fd71794601431694ce0650055fbe927a1d8e;p=qemu.git tcg/optimize: Use fold_masks_zs in fold_xor Avoid the use of the OptContext slots. Find TempOptInfo once. Remove fold_masks as the function becomes unused. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- diff --git a/tcg/optimize.c b/tcg/optimize.c index 047cb5a1ee..d543266b8d 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1077,11 +1077,6 @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask) return fold_masks_zs(ctx, op, -1, s_mask); } -static bool fold_masks(OptContext *ctx, TCGOp *op) -{ - return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask); -} - /* * An "affected" mask bit is 0 if and only if the result is identical * to the first input. Thus if the entire mask is 0, the operation @@ -2769,6 +2764,9 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) static bool fold_xor(OptContext *ctx, TCGOp *op) { + uint64_t z_mask, s_mask; + TempOptInfo *t1, *t2; + if (fold_const2_commutative(ctx, op) || fold_xx_to_i(ctx, op, 0) || fold_xi_to_x(ctx, op, 0) || @@ -2776,11 +2774,11 @@ static bool fold_xor(OptContext *ctx, TCGOp *op) return true; } - ctx->z_mask = arg_info(op->args[1])->z_mask - | arg_info(op->args[2])->z_mask; - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return fold_masks(ctx, op); + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + z_mask = t1->z_mask | t2->z_mask; + s_mask = t1->s_mask & t2->s_mask; + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)