On Thu, Nov 6, 2025 at 6:19 AM <[email protected]> wrote:
>
> From: Pan Li <[email protected]>
>
> There are 3 kinds of widen_mul during the unsigned SAT_MUL pattern, aka
> * widen_mul directly, like _3 w* _4
> * convert and the widen_mul, like (uint64_t)_3 *w (uint64_t)_4
> * convert and then mul, like (uint64_t)_3 * (uint64_t)_4
>
> All of them will be referenced during different forms of unsigned
> SAT_MUL pattern match, but actually we can wrap them into a helper
> which present the "widening_mul" sematics.  With this helper, some
> unnecessary pattern and duplicated code could be eliminated.  Like
> min based pattern, this patch focus on bit_ior based pattern.
>
> The below test suites are passed for this patch:
> 1. The rv64gcv fully regression tests.
> 2. The x86 bootstrap tests.
> 3. The x86 fully regression tests.

OK.

Thanks,
Richard.

> gcc/ChangeLog:
>
>         * match.pd: Leverage usmul_widen_mult by bit_ior based
>         unsigned SAT_MUL pattern.
>
> Signed-off-by: Pan Li <[email protected]>
> ---
>  gcc/match.pd | 44 +++++++++-----------------------------------
>  1 file changed, 9 insertions(+), 35 deletions(-)
>
> diff --git a/gcc/match.pd b/gcc/match.pd
> index 0ea86d97416..3cd9ab1e9b0 100644
> --- a/gcc/match.pd
> +++ b/gcc/match.pd
> @@ -3726,53 +3726,27 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
>        bool c2_is_type_precision_p = c2 == prec;
>       }
>       (if (widen_prec > prec && c2_is_type_precision_p && c4_is_max_p)))))
> -  (for mult_op (mult widen_mult)
> -   (match (unsigned_integer_sat_mul @0 @1)
> -    /* SAT_U_MUL (X, Y) = {
> -        WT x = (WT)a * (WT)b;
> -        NT hi = x >> (sizeof(NT) * 8);
> -         NT lo = (NT)x;
> -         return lo | -!!hi;
> -       } while WT is uint128_t, uint64_t, uint32_t, uint16_t,
> -         and T is uint64_t, uint32_t, uint16_t, uint8_t.  */
> -    (convert1?
> -     (bit_ior
> -      (convert?
> -       (negate
> -       (convert (ne (convert2? (rshift @3 INTEGER_CST@2)) integer_zerop))))
> -      (convert (mult_op:c@3 (convert@4 @0) (convert@5 @1)))))
> -    (if (types_match (type, @0, @1))
> -     (with
> -      {
> -       unsigned prec = TYPE_PRECISION (type);
> -       unsigned widen_prec = TYPE_PRECISION (TREE_TYPE (@3));
> -       unsigned cvt4_prec = TYPE_PRECISION (TREE_TYPE (@4));
> -       unsigned cvt5_prec = TYPE_PRECISION (TREE_TYPE (@5));
> -
> -       bool widen_mult_p = mult_op == WIDEN_MULT_EXPR && cvt4_prec == 
> cvt5_prec
> -        && widen_prec == cvt5_prec * 2;
> -       bool mult_p = mult_op == MULT_EXPR && cvt4_prec == cvt5_prec
> -        && cvt4_prec == widen_prec && widen_prec > prec;
> -       bool c2_is_type_precision_p = tree_to_uhwi (@2) == prec;
> -      }
> -      (if (c2_is_type_precision_p && (mult_p || widen_mult_p)))))))
>    (match (unsigned_integer_sat_mul @0 @1)
> +   /* SAT_U_MUL (X, Y) = {
> +       WT x = (WT)a * (WT)b;
> +       NT hi = x >> (sizeof(NT) * 8);
> +       NT lo = (NT)x;
> +       return lo | -!!hi;
> +      } while WT is uint128_t, uint64_t, uint32_t, uint16_t,
> +       and T is uint64_t, uint32_t, uint16_t, uint8_t.  */
>     (convert1?
>      (bit_ior
>       (convert?
>        (negate
>         (convert (ne (convert2? (rshift @3 INTEGER_CST@2)) integer_zerop))))
> -     (convert (widen_mult:c@3 @0 @1))))
> +     (convert (usmul_widen_mult@3 @0 @1))))
>     (if (types_match (type, @0, @1))
>      (with
>       {
>        unsigned prec = TYPE_PRECISION (type);
> -      unsigned widen_prec = TYPE_PRECISION (TREE_TYPE (@3));
> -
>        bool c2_is_type_precision_p = tree_to_uhwi (@2) == prec;
> -      bool widen_mult_p = prec * 2 == widen_prec;
>       }
> -     (if (c2_is_type_precision_p && widen_mult_p)))))
> +     (if (c2_is_type_precision_p)))))
>  )
>
>  /* The boundary condition for case 10: IMM = 1:
> --
> 2.43.0
>

Reply via email to