The following patch merges binary patterns from the branch that are exercised by fold_stmt and gimple_fold_stmt_to_constant in the process of removing the dispatch of fold_binary from those.
I filed PR63862 for a C frontend bug which I noticed because of fixing a fold bug which happily looked through sign-changing conversions when asking tree_expr_nonnegative_p for the shift amount. Bootstrapped and tested on x86_64-unknown-linux-gnu, applied to trunk. Richard. 2014-11-14 Richard Biener <rguent...@suse.de> * match.pd: Implement more binary patterns exercised by fold_stmt. * fold-const.c (sing_bit_p): Export. (exact_inverse): Likewise. (fold_binary_loc): Remove patterns here. (tree_unary_nonnegative_warnv_p): Use CASE_CONVERT. * fold-const.h (sing_bit_p): Declare. (exact_inverse): Likewise. * gcc.c-torture/execute/shiftopt-1.c: XFAIL invalid parts. Index: gcc/fold-const.c =================================================================== *** gcc/fold-const.c.orig 2014-11-14 10:16:47.845424237 +0100 --- gcc/fold-const.c 2014-11-14 10:17:45.005421735 +0100 *************** static tree decode_field_reference (loca *** 130,136 **** HOST_WIDE_INT *, machine_mode *, int *, int *, tree *, tree *); - static tree sign_bit_p (tree, const_tree); static int simple_operand_p (const_tree); static bool simple_operand_p_2 (tree); static tree range_binop (enum tree_code, tree, tree, int, tree, int); --- 130,135 ---- *************** all_ones_mask_p (const_tree mask, unsign *** 3651,3657 **** The return value is the (sub)expression whose sign bit is VAL, or NULL_TREE otherwise. */ ! static tree sign_bit_p (tree exp, const_tree val) { int width; --- 3650,3656 ---- The return value is the (sub)expression whose sign bit is VAL, or NULL_TREE otherwise. */ ! tree sign_bit_p (tree exp, const_tree val) { int width; *************** fold_addr_of_array_ref_difference (locat *** 9474,9480 **** /* If the real or vector real constant CST of type TYPE has an exact inverse, return it, else return NULL. */ ! static tree exact_inverse (tree type, tree cst) { REAL_VALUE_TYPE r; --- 9473,9479 ---- /* If the real or vector real constant CST of type TYPE has an exact inverse, return it, else return NULL. */ ! tree exact_inverse (tree type, tree cst) { REAL_VALUE_TYPE r; *************** fold_binary_loc (location_t loc, *** 9963,9987 **** } else { - /* See if ARG1 is zero and X + ARG1 reduces to X. */ - if (fold_real_zero_addition_p (TREE_TYPE (arg0), arg1, 0)) - return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0)); - - /* Likewise if the operands are reversed. */ - if (fold_real_zero_addition_p (TREE_TYPE (arg1), arg0, 0)) - return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg1)); - - /* Convert X + -C into X - C. */ - if (TREE_CODE (arg1) == REAL_CST - && REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg1))) - { - tem = fold_negate_const (arg1, type); - if (!TREE_OVERFLOW (arg1) || !flag_trapping_math) - return fold_build2_loc (loc, MINUS_EXPR, type, - fold_convert_loc (loc, type, arg0), - fold_convert_loc (loc, type, tem)); - } - /* Fold __complex__ ( x, 0 ) + __complex__ ( 0, y ) to __complex__ ( x, y ). This is not the same for SNaNs or if signed zeros are involved. */ --- 9962,9967 ---- *************** fold_binary_loc (location_t loc, *** 10023,10034 **** && (tem = distribute_real_division (loc, code, type, arg0, arg1))) return tem; - /* Convert x+x into x*2.0. */ - if (operand_equal_p (arg0, arg1, 0) - && SCALAR_FLOAT_TYPE_P (type)) - return fold_build2_loc (loc, MULT_EXPR, type, arg0, - build_real (type, dconst2)); - /* Convert a + (b*c + d*e) into (a + b*c) + d*e. We associate floats only if the user has specified -fassociative-math. */ --- 10003,10008 ---- *************** fold_binary_loc (location_t loc, *** 10381,10389 **** if (! FLOAT_TYPE_P (type)) { - if (integer_zerop (arg0)) - return negate_expr (fold_convert_loc (loc, type, arg1)); - /* Fold A - (A & B) into ~B & A. */ if (!TREE_SIDE_EFFECTS (arg0) && TREE_CODE (arg1) == BIT_AND_EXPR) --- 10355,10360 ---- *************** fold_binary_loc (location_t loc, *** 10428,10443 **** } } - /* See if ARG1 is zero and X - ARG1 reduces to X. */ - else if (fold_real_zero_addition_p (TREE_TYPE (arg0), arg1, 1)) - return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0)); - - /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether - ARG0 is zero and X + ARG0 reduces to X, since that would mean - (-ARG1 + ARG0) reduces to -ARG1. */ - else if (fold_real_zero_addition_p (TREE_TYPE (arg1), arg0, 0)) - return negate_expr (fold_convert_loc (loc, type, arg1)); - /* Fold __complex__ ( x, 0 ) - __complex__ ( 0, y ) to __complex__ ( x, -y ). This is not the same for SNaNs or if signed zeros are involved. */ --- 10399,10404 ---- *************** fold_binary_loc (location_t loc, *** 10553,10563 **** if (! FLOAT_TYPE_P (type)) { - /* Transform x * -1 into -x. Make sure to do the negation - on the original operand with conversions not stripped - because we can only strip non-sign-changing conversions. */ - if (integer_minus_onep (arg1)) - return fold_convert_loc (loc, type, negate_expr (op0)); /* Transform x * -C into -x * C if x is easily negatable. */ if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) == -1 --- 10514,10519 ---- *************** fold_binary_loc (location_t loc, *** 10621,10649 **** } else { - /* Maybe fold x * 0 to 0. The expressions aren't the same - when x is NaN, since x * 0 is also NaN. Nor are they the - same in modes with signed zeros, since multiplying a - negative value by 0 gives -0, not +0. */ - if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0))) - && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))) - && real_zerop (arg1)) - return omit_one_operand_loc (loc, type, arg1, arg0); - /* In IEEE floating point, x*1 is not equivalent to x for snans. - Likewise for complex arithmetic with signed zeros. */ - if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0))) - && (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))) - || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0))) - && real_onep (arg1)) - return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0)); - - /* Transform x * -1.0 into -x. */ - if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0))) - && (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))) - || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0))) - && real_minus_onep (arg1)) - return fold_convert_loc (loc, type, negate_expr (arg0)); - /* Convert (C1/X)*C2 into (C1*C2)/X. This transformation may change the result for floating point types due to rounding so it is applied only if -fassociative-math was specify. */ --- 10577,10582 ---- *************** fold_binary_loc (location_t loc, *** 11522,11554 **** && real_zerop (arg1)) return NULL_TREE; - /* Optimize A / A to 1.0 if we don't care about - NaNs or Infinities. Skip the transformation - for non-real operands. */ - if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (arg0)) - && ! HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0))) - && ! HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (arg0))) - && operand_equal_p (arg0, arg1, 0)) - { - tree r = build_real (TREE_TYPE (arg0), dconst1); - - return omit_two_operands_loc (loc, type, r, arg0, arg1); - } - - /* The complex version of the above A / A optimization. */ - if (COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0)) - && operand_equal_p (arg0, arg1, 0)) - { - tree elem_type = TREE_TYPE (TREE_TYPE (arg0)); - if (! HONOR_NANS (TYPE_MODE (elem_type)) - && ! HONOR_INFINITIES (TYPE_MODE (elem_type))) - { - tree r = build_real (elem_type, dconst1); - /* omit_two_operands will call fold_convert for us. */ - return omit_two_operands_loc (loc, type, r, arg0, arg1); - } - } - /* (-A) / (-B) -> A / B */ if (TREE_CODE (arg0) == NEGATE_EXPR && negate_expr_p (arg1)) return fold_build2_loc (loc, RDIV_EXPR, type, --- 11455,11460 ---- *************** fold_binary_loc (location_t loc, *** 11559,11600 **** negate_expr (arg0), TREE_OPERAND (arg1, 0)); - /* In IEEE floating point, x/1 is not equivalent to x for snans. */ - if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0))) - && real_onep (arg1)) - return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0)); - - /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */ - if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0))) - && real_minus_onep (arg1)) - return non_lvalue_loc (loc, fold_convert_loc (loc, type, - negate_expr (arg0))); - - /* If ARG1 is a constant, we can convert this to a multiply by the - reciprocal. This does not have the same rounding properties, - so only do this if -freciprocal-math. We can actually - always safely do it if ARG1 is a power of two, but it's hard to - tell if it is or not in a portable manner. */ - if (optimize - && (TREE_CODE (arg1) == REAL_CST - || (TREE_CODE (arg1) == COMPLEX_CST - && COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg1))) - || (TREE_CODE (arg1) == VECTOR_CST - && VECTOR_FLOAT_TYPE_P (TREE_TYPE (arg1))))) - { - if (flag_reciprocal_math - && 0 != (tem = const_binop (code, build_one_cst (type), arg1))) - return fold_build2_loc (loc, MULT_EXPR, type, arg0, tem); - /* Find the reciprocal if optimizing and the result is exact. - TODO: Complex reciprocal not implemented. */ - if (TREE_CODE (arg1) != COMPLEX_CST) - { - tree inverse = exact_inverse (TREE_TYPE (arg0), arg1); - - if (inverse) - return fold_build2_loc (loc, MULT_EXPR, type, arg0, inverse); - } - } /* Convert A/B/C to A/(B*C). */ if (flag_reciprocal_math && TREE_CODE (arg0) == RDIV_EXPR) --- 11465,11470 ---- *************** fold_binary_loc (location_t loc, *** 11817,11829 **** } } - /* For unsigned integral types, FLOOR_DIV_EXPR is the same as - TRUNC_DIV_EXPR. Rewrite into the latter in this case. */ - if (INTEGRAL_TYPE_P (type) - && TYPE_UNSIGNED (type) - && code == FLOOR_DIV_EXPR) - return fold_build2_loc (loc, TRUNC_DIV_EXPR, type, op0, op1); - /* Fall through */ case ROUND_DIV_EXPR: --- 11687,11692 ---- *************** fold_binary_loc (location_t loc, *** 11831,11841 **** case EXACT_DIV_EXPR: if (integer_zerop (arg1)) return NULL_TREE; - /* X / -1 is -X. */ - if (!TYPE_UNSIGNED (type) - && TREE_CODE (arg1) == INTEGER_CST - && wi::eq_p (arg1, -1)) - return fold_convert_loc (loc, type, negate_expr (arg0)); /* Convert -A / -B to A / B when the type is signed and overflow is undefined. */ --- 11694,11699 ---- *************** fold_binary_loc (location_t loc, *** 11898,11923 **** case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case TRUNC_MOD_EXPR: - /* X % -1 is zero. */ - if (!TYPE_UNSIGNED (type) - && TREE_CODE (arg1) == INTEGER_CST - && wi::eq_p (arg1, -1)) - return omit_one_operand_loc (loc, type, integer_zero_node, arg0); - - /* X % -C is the same as X % C. */ - if (code == TRUNC_MOD_EXPR - && TYPE_SIGN (type) == SIGNED - && TREE_CODE (arg1) == INTEGER_CST - && !TREE_OVERFLOW (arg1) - && wi::neg_p (arg1) - && !TYPE_OVERFLOW_TRAPS (type) - /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ - && !sign_bit_p (arg1, arg1)) - return fold_build2_loc (loc, code, type, - fold_convert_loc (loc, type, arg0), - fold_convert_loc (loc, type, - negate_expr (arg1))); - /* X % -Y is the same as X % Y. */ if (code == TRUNC_MOD_EXPR && !TYPE_UNSIGNED (type) --- 11756,11761 ---- *************** fold_binary_loc (location_t loc, *** 11971,12000 **** case LROTATE_EXPR: case RROTATE_EXPR: - if (integer_all_onesp (arg0)) - return omit_one_operand_loc (loc, type, arg0, arg1); - goto shift; - case RSHIFT_EXPR: - /* Optimize -1 >> x for arithmetic right shifts. */ - if (integer_all_onesp (arg0) && !TYPE_UNSIGNED (type) - && tree_expr_nonnegative_p (arg1)) - return omit_one_operand_loc (loc, type, arg0, arg1); - /* ... fall through ... */ - case LSHIFT_EXPR: - shift: - if (integer_zerop (arg1)) - return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0)); - if (integer_zerop (arg0)) - return omit_one_operand_loc (loc, type, arg0, arg1); - - /* Prefer vector1 << scalar to vector1 << vector2 - if vector2 is uniform. */ - if (VECTOR_TYPE_P (TREE_TYPE (arg1)) - && (tem = uniform_vector_p (arg1)) != NULL_TREE) - return fold_build2_loc (loc, code, type, op0, tem); - /* Since negative shift count is not well-defined, don't try to compute it in the compiler. */ if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0) --- 11809,11816 ---- *************** fold_binary_loc (location_t loc, *** 12054,12068 **** } } - /* Rewrite an LROTATE_EXPR by a constant into an - RROTATE_EXPR by a new constant. */ - if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST) - { - tree tem = build_int_cst (TREE_TYPE (arg1), prec); - tem = const_binop (MINUS_EXPR, tem, arg1); - return fold_build2_loc (loc, RROTATE_EXPR, type, op0, tem); - } - /* If we have a rotate of a bit operation with the rotate count and the second operand of the bit operation both constant, permute the two operations. */ --- 11870,11875 ---- *************** fold_binary_loc (location_t loc, *** 12110,12132 **** return NULL_TREE; case MIN_EXPR: - if (operand_equal_p (arg0, arg1, 0)) - return omit_one_operand_loc (loc, type, arg0, arg1); - if (INTEGRAL_TYPE_P (type) - && operand_equal_p (arg1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) - return omit_one_operand_loc (loc, type, arg1, arg0); tem = fold_minmax (loc, MIN_EXPR, type, arg0, arg1); if (tem) return tem; goto associate; case MAX_EXPR: - if (operand_equal_p (arg0, arg1, 0)) - return omit_one_operand_loc (loc, type, arg0, arg1); - if (INTEGRAL_TYPE_P (type) - && TYPE_MAX_VALUE (type) - && operand_equal_p (arg1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) - return omit_one_operand_loc (loc, type, arg1, arg0); tem = fold_minmax (loc, MAX_EXPR, type, arg0, arg1); if (tem) return tem; --- 11917,11928 ---- *************** tree_unary_nonnegative_warnv_p (enum tre *** 14799,14805 **** return tree_expr_nonnegative_warnv_p (op0, strict_overflow_p); ! case NOP_EXPR: { tree inner_type = TREE_TYPE (op0); tree outer_type = type; --- 14595,14601 ---- return tree_expr_nonnegative_warnv_p (op0, strict_overflow_p); ! CASE_CONVERT: { tree inner_type = TREE_TYPE (op0); tree outer_type = type; Index: gcc/fold-const.h =================================================================== *** gcc/fold-const.h.orig 2014-11-14 10:16:47.845424237 +0100 --- gcc/fold-const.h 2014-11-14 10:17:45.005421735 +0100 *************** extern tree make_range_step (location_t, *** 167,171 **** --- 167,173 ---- extern tree build_range_check (location_t, tree, tree, int, tree, tree); extern bool merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree, tree); + extern tree sign_bit_p (tree, const_tree); + extern tree exact_inverse (tree, tree); #endif // GCC_FOLD_CONST_H Index: gcc/match.pd =================================================================== *** gcc/match.pd.orig 2014-11-14 10:16:47.845424237 +0100 --- gcc/match.pd 2014-11-14 10:17:45.006421735 +0100 *************** along with GCC; see the file COPYING3. *** 53,71 **** (pointer_plus integer_zerop @1) (non_lvalue (convert @1))) /* Simplify x - x. This is unsafe for certain floats even in non-IEEE formats. In IEEE, it is unsafe because it does wrong for NaNs. Also note that operand_equal_p is always false if an operand is volatile. */ (simplify ! (minus @0 @0) ! (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (TYPE_MODE (type))) ! { build_zero_cst (type); })) (simplify ! (mult @0 integer_zerop@1) ! @1) /* Make sure to preserve divisions by zero. This is the reason why we don't simplify x / x to 1 or 0 / x to 0. */ --- 53,111 ---- (pointer_plus integer_zerop @1) (non_lvalue (convert @1))) + /* See if ARG1 is zero and X + ARG1 reduces to X. + Likewise if the operands are reversed. */ + (simplify + (plus:c @0 real_zerop@1) + (if (fold_real_zero_addition_p (type, @1, 0)) + (non_lvalue @0))) + + /* See if ARG1 is zero and X - ARG1 reduces to X. */ + (simplify + (minus @0 real_zerop@1) + (if (fold_real_zero_addition_p (type, @1, 1)) + (non_lvalue @0))) + /* Simplify x - x. This is unsafe for certain floats even in non-IEEE formats. In IEEE, it is unsafe because it does wrong for NaNs. Also note that operand_equal_p is always false if an operand is volatile. */ (simplify ! (minus @0 @0) ! (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (TYPE_MODE (type))) ! { build_zero_cst (type); })) (simplify ! (mult @0 integer_zerop@1) ! @1) ! ! /* Maybe fold x * 0 to 0. The expressions aren't the same ! when x is NaN, since x * 0 is also NaN. Nor are they the ! same in modes with signed zeros, since multiplying a ! negative value by 0 gives -0, not +0. */ ! (simplify ! (mult @0 real_zerop@1) ! (if (!HONOR_NANS (TYPE_MODE (type)) ! && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))) ! @1)) ! ! /* In IEEE floating point, x*1 is not equivalent to x for snans. ! Likewise for complex arithmetic with signed zeros. */ ! (simplify ! (mult @0 real_onep) ! (if (!HONOR_SNANS (TYPE_MODE (type)) ! && (!HONOR_SIGNED_ZEROS (TYPE_MODE (type)) ! || !COMPLEX_FLOAT_TYPE_P (type))) ! (non_lvalue @0))) ! ! /* Transform x * -1.0 into -x. */ ! (simplify ! (mult @0 real_minus_onep) ! (if (!HONOR_SNANS (TYPE_MODE (type)) ! && (!HONOR_SIGNED_ZEROS (TYPE_MODE (type)) ! || !COMPLEX_FLOAT_TYPE_P (type))) ! (negate @0))) /* Make sure to preserve divisions by zero. This is the reason why we don't simplify x / x to 1 or 0 / x to 0. */ *************** along with GCC; see the file COPYING3. *** 74,92 **** (op @0 integer_onep) (non_lvalue @0))) /* Same applies to modulo operations, but fold is inconsistent here and simplifies 0 % x to 0, only preserving literal 0 % 0. */ ! (for op (ceil_mod floor_mod round_mod trunc_mod) /* 0 % X is always zero. */ (simplify ! (op integer_zerop@0 @1) /* But not for 0 % 0 so that we can get the proper warnings and errors. */ (if (!integer_zerop (@1)) @0)) /* X % 1 is always zero. */ (simplify ! (op @0 integer_onep) ! { build_zero_cst (type); })) /* x | ~0 -> ~0 */ (simplify --- 114,211 ---- (op @0 integer_onep) (non_lvalue @0))) + /* X / -1 is -X. */ + (for div (trunc_div ceil_div floor_div round_div exact_div) + (simplify + (div @0 INTEGER_CST@1) + (if (!TYPE_UNSIGNED (type) + && wi::eq_p (@1, -1)) + (negate @0)))) + + /* For unsigned integral types, FLOOR_DIV_EXPR is the same as + TRUNC_DIV_EXPR. Rewrite into the latter in this case. */ + (simplify + (floor_div @0 @1) + (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type)) + (trunc_div @0 @1))) + + /* Optimize A / A to 1.0 if we don't care about + NaNs or Infinities. Skip the transformation + for non-real operands. */ + (simplify + (rdiv @0 @0) + (if (SCALAR_FLOAT_TYPE_P (type) + && ! HONOR_NANS (TYPE_MODE (type)) + && ! HONOR_INFINITIES (TYPE_MODE (type))) + { build_real (type, dconst1); }) + /* The complex version of the above A / A optimization. */ + (if (COMPLEX_FLOAT_TYPE_P (type) + && ! HONOR_NANS (TYPE_MODE (TREE_TYPE (type))) + && ! HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (type)))) + { build_complex (type, build_real (TREE_TYPE (type), dconst1), + build_real (TREE_TYPE (type), dconst0)); })) + + /* In IEEE floating point, x/1 is not equivalent to x for snans. */ + (simplify + (rdiv @0 real_onep) + (if (!HONOR_SNANS (TYPE_MODE (type))) + (non_lvalue @0))) + + /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */ + (simplify + (rdiv @0 real_minus_onep) + (if (!HONOR_SNANS (TYPE_MODE (type))) + (negate @0))) + + /* If ARG1 is a constant, we can convert this to a multiply by the + reciprocal. This does not have the same rounding properties, + so only do this if -freciprocal-math. We can actually + always safely do it if ARG1 is a power of two, but it's hard to + tell if it is or not in a portable manner. */ + (for cst (REAL_CST COMPLEX_CST VECTOR_CST) + (simplify + (rdiv @0 cst@1) + (if (optimize) + (if (flag_reciprocal_math) + (with + { tree tem = fold_binary (RDIV_EXPR, type, build_one_cst (type), @1); } + (if (tem) + (mult @0 { tem; } )))) + (if (cst != COMPLEX_CST) + (with { tree inverse = exact_inverse (type, @1); } + (if (inverse) + (mult @0 { inverse; } ))))))) + /* Same applies to modulo operations, but fold is inconsistent here and simplifies 0 % x to 0, only preserving literal 0 % 0. */ ! (for mod (ceil_mod floor_mod round_mod trunc_mod) /* 0 % X is always zero. */ (simplify ! (mod integer_zerop@0 @1) /* But not for 0 % 0 so that we can get the proper warnings and errors. */ (if (!integer_zerop (@1)) @0)) /* X % 1 is always zero. */ (simplify ! (mod @0 integer_onep) ! { build_zero_cst (type); }) ! /* X % -1 is zero. */ ! (simplify ! (mod @0 INTEGER_CST@1) ! (if (!TYPE_UNSIGNED (type) ! && wi::eq_p (@1, -1)) ! { build_zero_cst (type); }))) ! ! /* X % -C is the same as X % C. */ ! (simplify ! (trunc_mod @0 INTEGER_CST@1) ! (if (TYPE_SIGN (type) == SIGNED ! && !TREE_OVERFLOW (@1) ! && wi::neg_p (@1) ! && !TYPE_OVERFLOW_TRAPS (type) ! /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ ! && !sign_bit_p (@1, @1)) ! (trunc_mod @0 (negate @1)))) /* x | ~0 -> ~0 */ (simplify *************** along with GCC; see the file COPYING3. *** 393,398 **** --- 512,575 ---- (convert @1)))))) + /* Simplifications of MIN_EXPR and MAX_EXPR. */ + + (for minmax (min max) + (simplify + (minmax @0 @0) + @0)) + (simplify + (min @0 @1) + (if (INTEGRAL_TYPE_P (type) + && TYPE_MIN_VALUE (type) + && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) + @1)) + (simplify + (max @0 @1) + (if (INTEGRAL_TYPE_P (type) + && TYPE_MAX_VALUE (type) + && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) + @1)) + + + /* Simplifications of shift and rotates. */ + + (for rotate (lrotate rrotate) + (simplify + (rotate integer_all_onesp@0 @1) + @0)) + + /* Optimize -1 >> x for arithmetic right shifts. */ + (simplify + (rshift integer_all_onesp@0 @1) + (if (!TYPE_UNSIGNED (type) + && tree_expr_nonnegative_p (@1)) + @0)) + + (for shiftrotate (lrotate rrotate lshift rshift) + (simplify + (shiftrotate @0 integer_zerop) + (non_lvalue @0)) + (simplify + (shiftrotate integer_zerop@0 @1) + @0) + /* Prefer vector1 << scalar to vector1 << vector2 + if vector2 is uniform. */ + (for vec (VECTOR_CST CONSTRUCTOR) + (simplify + (shiftrotate @0 vec@1) + (with { tree tem = uniform_vector_p (@1); } + (if (tem) + (shiftrotate @0 { tem; })))))) + + /* Rewrite an LROTATE_EXPR by a constant into an + RROTATE_EXPR by a new constant. */ + (simplify + (lrotate @0 INTEGER_CST@1) + (rrotate @0 { fold_binary (MINUS_EXPR, TREE_TYPE (@1), + build_int_cst (TREE_TYPE (@1), + element_precision (type)), @1); })) + /* Simplifications of conversions. */ *************** along with GCC; see the file COPYING3. *** 568,573 **** --- 745,782 ---- (if (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type)) (convert @0))) + /* Canonicalization of binary operations. */ + + /* Convert X + -C into X - C. */ + (simplify + (plus @0 REAL_CST@1) + (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) + (with { tree tem = fold_unary (NEGATE_EXPR, type, @1); } + (if (!TREE_OVERFLOW (tem) || !flag_trapping_math) + (minus @0 { tem; }))))) + + /* Convert x+x into x*2.0. */ + (simplify + (plus @0 @0) + (if (SCALAR_FLOAT_TYPE_P (type)) + (mult @0 { build_real (type, dconst2); }))) + + (simplify + (minus integer_zerop @1) + (negate @1)) + + /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether + ARG0 is zero and X + ARG0 reduces to X, since that would mean + (-ARG1 + ARG0) reduces to -ARG1. */ + (simplify + (minus real_zerop@0 @1) + (if (fold_real_zero_addition_p (type, @0, 0)) + (negate @1))) + + /* Transform x * -1 into -x. */ + (simplify + (mult @0 integer_minus_onep) + (negate @0)) /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */ (simplify Index: gcc/testsuite/gcc.c-torture/execute/shiftopt-1.c =================================================================== *** gcc/testsuite/gcc.c-torture/execute/shiftopt-1.c.orig 2014-11-14 10:16:47.845424237 +0100 --- gcc/testsuite/gcc.c-torture/execute/shiftopt-1.c 2014-11-14 10:17:59.460421103 +0100 *************** utest (unsigned int x) *** 22,32 **** --- 22,37 ---- if (0 >> x != 0) link_error (); + /* XFAIL: the C frontend converts the shift amount to 'int' + thus we get -1 >> (int)x which means the shift amount may + be negative. See PR63862. */ + #if 0 if (-1 >> x != -1) link_error (); if (~0 >> x != ~0) link_error (); + #endif } void