On Fri, Jul 22, 2011 at 10:14 AM, Kai Tietz <ktiet...@googlemail.com> wrote:
> 2011/7/21 Richard Guenther <richard.guent...@gmail.com>:
>> On Thu, Jul 21, 2011 at 3:09 PM, Kai Tietz <ktiet...@googlemail.com> wrote:
>>> Hello,
>>>
>>> this patch changes TRUTH-expression patterns into BIT-expression ones
>>> and adjusts code-flow
>>> for this.
>>>
>>> ChangeLog gcc
>>>
>>> 2011-07-21  Kai Tietz  <kti...@redhat.com>
>>>
>>>        * tree-vrp.c (extract_range_from_binary_expr): Convert
>>>        truth expression to bimary expression,
>>>        (extract_range_from_unary_expr): Likewise.
>>>        (extract_range_from_assignment): Likewise.
>>>        (build_assert_expr_for): Likewise.
>>>        (register_edge_assert_for_1): Likewise.
>>>        (simplify_truth_ops_using_ranges): Likewise.
>>>        (simplify_stmt_using_ranges): Likewise.
>>>
>>>        do_dce flag to deside, if BB's statements are scanned
>>>        in last to first, or first to last order.
>>>
>>> Bootstrapped and regression tested for all standard languages
>>> (including Ada and Obj-C++) on
>>> host x86_64-pc-linux-gnu.  Ok for apply?
>>
>> *sigh*, you didn't address any of my comments.
>>
>> Again ...
>>
>>>
>>> Regards,
>>> Kai
>>>
>>> Index: gcc-head/gcc/tree-vrp.c
>>> ===================================================================
>>> --- gcc-head.orig/gcc/tree-vrp.c
>>> +++ gcc-head/gcc/tree-vrp.c
>>> @@ -2172,8 +2172,6 @@ extract_range_from_binary_expr (value_ra
>>>       && code != MAX_EXPR
>>>       && code != BIT_AND_EXPR
>>> -      && code != BIT_IOR_EXPR
>>> -      && code != TRUTH_AND_EXPR
>>> -      && code != TRUTH_OR_EXPR)
>>> +      && code != BIT_IOR_EXPR)
>>>     {
>>>       /* We can still do constant propagation here.  */
>>>       tree const_op0 = op_with_constant_singleton_value_range (op0);
>>> @@ -2228,8 +2227,7 @@ extract_range_from_binary_expr (value_ra
>>>      divisions.  TODO, we may be able to derive anti-ranges in
>>>      some cases.  */
>>>   if (code != BIT_AND_EXPR
>>> -      && code != TRUTH_AND_EXPR
>>> -      && code != TRUTH_OR_EXPR
>>> +      && code != BIT_IOR_EXPR
>>>       && code != TRUNC_DIV_EXPR
>>>       && code != FLOOR_DIV_EXPR
>>>       && code != CEIL_DIV_EXPR
>>> @@ -2251,7 +2249,10 @@ extract_range_from_binary_expr (value_ra
>>>       || POINTER_TYPE_P (TREE_TYPE (op0))
>>>       || POINTER_TYPE_P (TREE_TYPE (op1)))
>>>     {
>>> -      if (code == MIN_EXPR || code == MAX_EXPR)
>>> +      /* We need to preserve here bitwise-or for pointer types.  */
>>
>> Preserve?  I asked how you end up seeing BIT_IOR_EXPR here, you
>> didn't answer that yet.
>
> Well, we see that here in case of truth.  So we need to allow it at
> top, and indeed I noticed that we have IOR patterns on address-typed
> expressions, so I added here default handling for it, as otherwise we
> would run into the gcc_unreachable for it.

We would run into it right now without this patch and we don't.

>>> +      if (code == BIT_IOR_EXPR)
>>> +        set_value_range_to_varying (vr);
>>> +      else if (code == MIN_EXPR || code == MAX_EXPR)
>>>        {
>>>          /* For MIN/MAX expressions with pointers, we only care about
>>>             nullness, if both are non null, then the result is nonnull.
>>> @@ -2296,57 +2297,8 @@ extract_range_from_binary_expr (value_ra
>>>
>>>   /* For integer ranges, apply the operation to each end of the
>>>      range and see what we end up with.  */
>>> -  if (code == TRUTH_AND_EXPR
>>> -      || code == TRUTH_OR_EXPR)
>>> -    {
>>> -      /* If one of the operands is zero, we know that the whole
>>> -        expression evaluates zero.  */
>>> -      if (code == TRUTH_AND_EXPR
>>> -         && ((vr0.type == VR_RANGE
>>> -              && integer_zerop (vr0.min)
>>> -              && integer_zerop (vr0.max))
>>> -             || (vr1.type == VR_RANGE
>>> -                 && integer_zerop (vr1.min)
>>> -                 && integer_zerop (vr1.max))))
>>> -       {
>>> -         type = VR_RANGE;
>>> -         min = max = build_int_cst (expr_type, 0);
>>> -       }
>>> -      /* If one of the operands is one, we know that the whole
>>> -        expression evaluates one.  */
>>> -      else if (code == TRUTH_OR_EXPR
>>> -              && ((vr0.type == VR_RANGE
>>> -                   && integer_onep (vr0.min)
>>> -                   && integer_onep (vr0.max))
>>> -                  || (vr1.type == VR_RANGE
>>> -                      && integer_onep (vr1.min)
>>> -                      && integer_onep (vr1.max))))
>>> -       {
>>> -         type = VR_RANGE;
>>> -         min = max = build_int_cst (expr_type, 1);
>>> -       }
>>> -      else if (vr0.type != VR_VARYING
>>> -              && vr1.type != VR_VARYING
>>> -              && vr0.type == vr1.type
>>> -              && !symbolic_range_p (&vr0)
>>> -              && !overflow_infinity_range_p (&vr0)
>>> -              && !symbolic_range_p (&vr1)
>>> -              && !overflow_infinity_range_p (&vr1))
>>> -       {
>>> -         /* Boolean expressions cannot be folded with int_const_binop.  */
>>> -         min = fold_binary (code, expr_type, vr0.min, vr1.min);
>>> -         max = fold_binary (code, expr_type, vr0.max, vr1.max);
>>> -       }
>>> -      else
>>> -       {
>>> -         /* The result of a TRUTH_*_EXPR is always true or false.  */
>>> -         set_value_range_to_truthvalue (vr, expr_type);
>>> -         return;
>>> -       }
>>> -    }
>>> -  else if (code == PLUS_EXPR
>>> -          || code == MIN_EXPR
>>> -          || code == MAX_EXPR)
>>
>> The above hunk looks good, removing dead code, but ...
>>
>>> +  if (code == PLUS_EXPR || code == MIN_EXPR
>>> +      || code == MAX_EXPR)
>>>     {
>>>       /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
>>>         VR_VARYING.  It would take more effort to compute a precise
>>> @@ -2679,73 +2631,127 @@ extract_range_from_binary_expr (value_ra
>>>       double_int may_be_nonzero0, may_be_nonzero1;
>>>       double_int must_be_nonzero0, must_be_nonzero1;
>>>
>>> -      vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0);
>>> -      vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1);
>>> -      int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
>>> -                                                 &must_be_nonzero0);
>>> -      int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
>>> -                                                 &must_be_nonzero1);
>>> -
>>> -      type = VR_RANGE;
>>> -      if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p)
>>> -       min = max = int_const_binop (code, vr0.max, vr1.max);
>>> -      else if (!int_cst_range0 && !int_cst_range1)
>>> +      /* If one of the operands is zero, we know that the whole
>>> +        expression evaluates zero.  */
>>
>> ... starting here it get's odd.  You are 1:1 replacing the code here.
>> Don't do that.
>>
>> Instead do nothing here in this patch.
>>
>>> +      if (code == BIT_AND_EXPR
>>> +         && ((vr0.type == VR_RANGE
>>> +              && integer_zerop (vr0.min)
>>> +              && integer_zerop (vr0.max))
>>> +             || (vr1.type == VR_RANGE
>>> +                 && integer_zerop (vr1.min)
>>> +                 && integer_zerop (vr1.max))))
>>> +       {
>>> +         type = VR_RANGE;
>>> +         min = max = build_int_cst (expr_type, 0);
>>> +       }
>>> +      /* If one of the operands has all bits set to one, we know
>>> +         that the whole expression evaluates to this one.  */
>>> +      else if (code == BIT_IOR_EXPR
>>> +              && (vr0.type == VR_RANGE
>>> +                  && integer_all_onesp (vr0.min)
>>> +                  && integer_all_onesp (vr0.max)))
>>> +       {
>>> +         type = VR_RANGE;
>>> +         min = max = fold_convert (expr_type, vr0.min);
>>> +       }
>>> +      else if (code == BIT_IOR_EXPR
>>> +              && (vr1.type == VR_RANGE
>>> +                  && integer_all_onesp (vr1.min)
>>> +                  && integer_all_onesp (vr1.max)))
>>>        {
>>> -         set_value_range_to_varying (vr);
>>> -         return;
>>> +         type = VR_RANGE;
>>> +         min = max = fold_convert (expr_type, vr1.min);
>>>        }
>>> -      else if (code == BIT_AND_EXPR)
>>> +      else if (TYPE_PRECISION (TREE_TYPE (op1)) == 1)
>>>        {
>>> -         min = double_int_to_tree (expr_type,
>>> -                                   double_int_and (must_be_nonzero0,
>>> -                                                   must_be_nonzero1));
>>> -         max = double_int_to_tree (expr_type,
>>> -                                   double_int_and (may_be_nonzero0,
>>> -                                                   may_be_nonzero1));
>>> -         if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
>>> -           min = NULL_TREE;
>>> -         if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
>>> -           max = NULL_TREE;
>>> -         if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
>>> -           {
>>> -             if (min == NULL_TREE)
>>> -               min = build_int_cst (expr_type, 0);
>>> -             if (max == NULL_TREE || tree_int_cst_lt (vr0.max, max))
>>> -               max = vr0.max;
>>> +         if (vr0.type != VR_VARYING
>>> +                  && vr1.type != VR_VARYING
>>> +                  && vr0.type == vr1.type
>>> +                  && !symbolic_range_p (&vr0)
>>> +                  && !overflow_infinity_range_p (&vr0)
>>> +                  && !symbolic_range_p (&vr1)
>>> +                  && !overflow_infinity_range_p (&vr1))
>>> +           {
>>> +             /* Boolean expressions cannot be folded with int_const_binop. 
>>>  */
>>> +             min = fold_binary (code, expr_type, vr0.min, vr1.min);
>>> +             max = fold_binary (code, expr_type, vr0.max, vr1.max);
>>> +           }
>
> Right I kept this logic due the general code seem to have issues with
> const_binop and Boolean expressions.  If you are sure this comment is
> wrong, we might be able to remove here the complete hunks before and
> just use old code for boolean-case, too.

I'm sure this comment is bogus.  As with the above, we'd run into the
bug right now, so just remove the TRUTH_* cases.

>>> +         else
>>> +           {
>>> +             set_value_range_to_varying (vr);
>>> +             return;
>>>            }
>>> -         if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
>>> +       }
>>> +       else
>>> +        {
>>> +         vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0);
>>> +         vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1);
>>> +         int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, 
>>> &may_be_nonzero0,
>>> +                                                     &must_be_nonzero0);
>>> +         int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, 
>>> &may_be_nonzero1,
>>> +                                                     &must_be_nonzero1);
>>> +
>>> +         type = VR_RANGE;
>>> +         if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p)
>>> +           min = max = int_const_binop (code, vr0.max, vr1.max);
>>> +         else if (!int_cst_range0 && !int_cst_range1)
>>>            {
>>> -             if (min == NULL_TREE)
>>> -               min = build_int_cst (expr_type, 0);
>>> -             if (max == NULL_TREE || tree_int_cst_lt (vr1.max, max))
>>> -               max = vr1.max;
>>> +             set_value_range_to_varying (vr);
>>> +             return;
>>> +           }
>>> +         else if (code == BIT_AND_EXPR)
>>> +           {
>>> +             min = double_int_to_tree (expr_type,
>>> +                                       double_int_and (must_be_nonzero0,
>>> +                                                       must_be_nonzero1));
>>> +             max = double_int_to_tree (expr_type,
>>> +                                       double_int_and (may_be_nonzero0,
>>> +                                                       may_be_nonzero1));
>>> +             if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
>>> +               min = NULL_TREE;
>>> +             if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
>>> +               max = NULL_TREE;
>>> +             if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
>>> +               {
>>> +                 if (min == NULL_TREE)
>>> +                   min = build_int_cst (expr_type, 0);
>>> +                 if (max == NULL_TREE || tree_int_cst_lt (vr0.max, max))
>>> +                   max = vr0.max;
>>> +               }
>>> +             if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
>>> +               {
>>> +                 if (min == NULL_TREE)
>>> +                   min = build_int_cst (expr_type, 0);
>>> +                 if (max == NULL_TREE || tree_int_cst_lt (vr1.max, max))
>>> +                   max = vr1.max;
>>> +               }
>>> +           }
>>> +         else if (!int_cst_range0
>>> +                  || !int_cst_range1
>>> +                  || tree_int_cst_sgn (vr0.min) < 0
>>> +                  || tree_int_cst_sgn (vr1.min) < 0)
>>> +           {
>>> +             set_value_range_to_varying (vr);
>>> +             return;
>>>            }
>>> -       }
>>> -      else if (!int_cst_range0
>>> -              || !int_cst_range1
>>> -              || tree_int_cst_sgn (vr0.min) < 0
>>> -              || tree_int_cst_sgn (vr1.min) < 0)
>>> -       {
>>> -         set_value_range_to_varying (vr);
>>> -         return;
>>> -       }
>>> -      else
>>> -       {
>>> -         min = double_int_to_tree (expr_type,
>>> -                                   double_int_ior (must_be_nonzero0,
>>> -                                                   must_be_nonzero1));
>>> -         max = double_int_to_tree (expr_type,
>>> -                                   double_int_ior (may_be_nonzero0,
>>> -                                                   may_be_nonzero1));
>>> -         if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
>>> -           min = vr0.min;
>>>          else
>>> -           min = vrp_int_const_binop (MAX_EXPR, min, vr0.min);
>>> -         if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
>>> -           max = NULL_TREE;
>>> -         min = vrp_int_const_binop (MAX_EXPR, min, vr1.min);
>>> +           {
>>> +             min = double_int_to_tree (expr_type,
>>> +                                       double_int_ior (must_be_nonzero0,
>>> +                                                       must_be_nonzero1));
>>> +             max = double_int_to_tree (expr_type,
>>> +                                       double_int_ior (may_be_nonzero0,
>>> +                                                       may_be_nonzero1));
>>> +             if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
>>> +               min = vr0.min;
>>> +             else
>>> +               min = vrp_int_const_binop (MAX_EXPR, min, vr0.min);
>>> +             if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
>>> +               max = NULL_TREE;
>>> +             min = vrp_int_const_binop (MAX_EXPR, min, vr1.min);
>>> +           }
>>>        }
>>> -    }
>>> +     }
>>>   else
>>>     gcc_unreachable ();
>>>
>>> @@ -2806,7 +2812,7 @@ extract_range_from_unary_expr (value_ran
>>>      cannot easily determine a resulting range.  */
>>>   if (code == FIX_TRUNC_EXPR
>>>       || code == FLOAT_EXPR
>>> -      || code == BIT_NOT_EXPR
>>> +      || (code == BIT_NOT_EXPR && TYPE_PRECISION (type) != 1)
>>
>> Likewise - this looks unrelated to TRUTH_* code removal.  And I asked you
>> to instead properly handle BOT_NOT_EXPR completely (in a separate
>> patch).
>
> Well, I don't think it is unrealted for truth to binary transition,
> but you are right that this check
> is for now dead code (see later comment why it isn't, and why we will
> see here BIT_XOR_EXPR see second part).  To disallow here BIT_XOR_EXPR
> might not be a big deal, but we might loose here some
> folding-opportunites in vrp, which gets then done by later-passes
> anyway.
>
>>>       || code == CONJ_EXPR)
>>>     {
>>>       /* We can still do constant propagation here.  */
>>> @@ -3300,10 +3306,7 @@ extract_range_from_assignment (value_ran
>>>     extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
>>>   else if (code == SSA_NAME)
>>>     extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
>>> -  else if (TREE_CODE_CLASS (code) == tcc_binary
>>> -          || code == TRUTH_AND_EXPR
>>> -          || code == TRUTH_OR_EXPR
>>> -          || code == TRUTH_XOR_EXPR)
>>> +  else if (TREE_CODE_CLASS (code) == tcc_binary)
>>>     extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
>>>                                    gimple_expr_type (stmt),
>>>                                    gimple_assign_rhs1 (stmt),
>>> @@ -3973,9 +3976,10 @@ build_assert_expr_for (tree cond, tree v
>>>       tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
>>>       assertion = gimple_build_assign (n, a);
>>>     }
>>> -  else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
>>> +  else if (TREE_CODE (cond) == BIT_NOT_EXPR
>>> +          && TYPE_PRECISION (TREE_TYPE (cond)) == 1)
>>>     {
>>> -      /* Given !V, build the assignment N = false.  */
>>> +      /* Given ~V, build the assignment N = false.  */
>>>       tree op0 = TREE_OPERAND (cond, 0);
>>>       gcc_assert (op0 == v);
>>>       assertion = gimple_build_assign (n, boolean_false_node);
>>
>> See my previous comment.  This is dead code, if you want to do a cleanup
>> inline it into its sole caller.
>>
>>> @@ -4516,11 +4520,9 @@ register_edge_assert_for_1 (tree op, enu
>>>                                              invert);
>>>     }
>>>   else if ((code == NE_EXPR
>>> -           && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR
>>> -               || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR))
>>> +           && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
>>>           || (code == EQ_EXPR
>>> -              && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR
>>> -                  || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)))
>>> +              && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
>>>     {
>>>       /* Recurse on each operand.  */
>>>       retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
>>> @@ -4528,7 +4530,8 @@ register_edge_assert_for_1 (tree op, enu
>>>       retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
>>>                                            code, e, bsi);
>>>     }
>>> -  else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR)
>>> +  else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
>>> +          && TYPE_PRECISION (TREE_TYPE (op)) == 1)
>>>     {
>>>       /* Recurse, flipping CODE.  */
>>>       code = invert_tree_comparison (code, false);
>>> @@ -4585,8 +4588,8 @@ register_edge_assert_for (tree name, edg
>>>      the value zero or one, then we may be able to assert values
>>>      for SSA_NAMEs which flow into COND.  */
>>>
>>> -  /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
>>> -     statement of NAME we can assert both operands of the TRUTH_AND_EXPR
>>> +  /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
>>> +     statement of NAME we can assert both operands of the BIT_AND_EXPR
>>>      have nonzero value.  */
>>>   if (((comp_code == EQ_EXPR && integer_onep (val))
>>>        || (comp_code == NE_EXPR && integer_zerop (val))))
>>> @@ -4594,8 +4597,7 @@ register_edge_assert_for (tree name, edg
>>>       gimple def_stmt = SSA_NAME_DEF_STMT (name);
>>>
>>>       if (is_gimple_assign (def_stmt)
>>> -         && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR
>>> -             || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR))
>>> +         && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
>>>        {
>>>          tree op0 = gimple_assign_rhs1 (def_stmt);
>>>          tree op1 = gimple_assign_rhs2 (def_stmt);
>>> @@ -4604,8 +4606,8 @@ register_edge_assert_for (tree name, edg
>>>        }
>>>     }
>>>
>>> -  /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
>>> -     statement of NAME we can assert both operands of the TRUTH_OR_EXPR
>>> +  /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
>>> +     statement of NAME we can assert both operands of the BIT_IOR_EXPR
>>>      have zero value.  */
>>>   if (((comp_code == EQ_EXPR && integer_zerop (val))
>>>        || (comp_code == NE_EXPR && integer_onep (val))))
>>> @@ -4613,11 +4615,12 @@ register_edge_assert_for (tree name, edg
>>>       gimple def_stmt = SSA_NAME_DEF_STMT (name);
>>>
>>>       if (is_gimple_assign (def_stmt)
>>> -         && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR
>>> +         && ((gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
>>> +              && TYPE_PRECISION (TREE_TYPE (name)) == 1)
>>>              /* For BIT_IOR_EXPR only if NAME == 0 both operands have
>>>                 necessarily zero value.  */
>>>              || (comp_code == EQ_EXPR
>>> -                 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))))
>>> +                 && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)))
>>
>> Again you ignored my previous comments - why do I even look at the
>> patches?  This code simplifies if you CSE the BIT_IOR_EXPR test
>> and move and adjust the comment.
>>
>>>        {
>>>          tree op0 = gimple_assign_rhs1 (def_stmt);
>>>          tree op1 = gimple_assign_rhs2 (def_stmt);
>>> @@ -6772,7 +6775,7 @@ simplify_truth_ops_using_ranges (gimple_
>>>         return false;
>>>     }
>>>
>>> -  if (rhs_code == TRUTH_NOT_EXPR)
>>> +  if (rhs_code == BIT_NOT_EXPR && TYPE_PRECISION (TREE_TYPE (op0)) == 1)
>>>     {
>>
>> We never want to transform BIT_NOT_EXPR to BIT_XOR_EXPR, so
>> please instead remove this code and do not call
>> simplify_thuth_ops_using_ranges for BIT_NOT_EXPRs.
>>
>>>       rhs_code = NE_EXPR;
>>>       op1 = build_int_cst (TREE_TYPE (op0), 1);
>>> @@ -6787,7 +6790,7 @@ simplify_truth_ops_using_ranges (gimple_
>>>           /* Exclude anything that should have been already folded.  */
>>>          if (rhs_code != EQ_EXPR
>>>              && rhs_code != NE_EXPR
>>> -             && rhs_code != TRUTH_XOR_EXPR)
>>> +             && rhs_code != BIT_XOR_EXPR)
>>
>> Sure not, we are not calling this function with BIT_XOR_EXPR either.
>>
>>>            return false;
>>>
>>>          if (!integer_zerop (op1)
>>> @@ -6799,6 +6802,8 @@ simplify_truth_ops_using_ranges (gimple_
>>>          if (rhs_code == EQ_EXPR)
>>>            {
>>>              rhs_code = NE_EXPR;
>>> +             /* We can use here TRUTH_NOT_EXPR for doing logical-not
>>> +                on constant.  */
>>>              op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1);
>>>            }
>>>        }
>>> @@ -6831,14 +6836,9 @@ simplify_truth_ops_using_ranges (gimple_
>>>       else
>>>        location = gimple_location (stmt);
>>>
>>> -      if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR)
>>> -        warning_at (location, OPT_Wstrict_overflow,
>>> -                   _("assuming signed overflow does not occur when "
>>> -                     "simplifying && or || to & or |"));
>>> -      else
>>> -        warning_at (location, OPT_Wstrict_overflow,
>>> -                   _("assuming signed overflow does not occur when "
>>> -                     "simplifying ==, != or ! to identity or ^"));
>>> +      warning_at (location, OPT_Wstrict_overflow,
>>> +                 _("assuming signed overflow does not occur when "
>>> +                   "simplifying ==, != or ! to identity or ^"));
>>>     }
>>>
>>>   need_conversion =
>>> @@ -6853,13 +6853,10 @@ simplify_truth_ops_using_ranges (gimple_
>>>
>>>   switch (rhs_code)
>>>     {
>>> -    case TRUTH_AND_EXPR:
>>> -      rhs_code = BIT_AND_EXPR;
>>> -      break;
>>> -    case TRUTH_OR_EXPR:
>>> -      rhs_code = BIT_IOR_EXPR;
>>> +    case BIT_AND_EXPR:
>>> +    case BIT_IOR_EXPR:
>>>       break;
>>> -    case TRUTH_XOR_EXPR:
>>> +    case BIT_XOR_EXPR:
>>
>> We do not call this function for BIT_*_EXPR.
>
> We have to, and we do.

We don't.  And we won't have to.

>  I agree that for now this code here does not
> much for BIT_IOR/BIT_AND.  But with doing result sinking into
> type-cast, it does pretty much here.  Please see second patch about
> why this code about BIT_*_EXPR is all but dead.

The second patch puts an unrelated simplification into this function.

> And in light of this even BIT_NOT_EXPR is all but dead-code (and
> should be transformed to XOR), so that we are able to handle cases
> like:
>
> int x, D3;
> _Bool D1, D2;
>
> D1 = (bool) x;
> D2 = ~x;
> D3 = (int) D2
>
> and transform them to 'D3 = x ^ 1'

No, because that would be a wrong transformation.

>>>     case NE_EXPR:
>>>       if (integer_zerop (op1))
>>>        {
>>> @@ -7412,16 +7409,15 @@ simplify_stmt_using_ranges (gimple_stmt_
>>>
>>>       switch (rhs_code)
>>>        {
>>> +       case BIT_NOT_EXPR:
>>> +         if (TYPE_PRECISION (TREE_TYPE (rhs1)) != 1)
>>> +           break;
>>> +         /* Fall through.  */
>>
>> See above.
>
> See comment above.
>
>>>        case EQ_EXPR:
>>>        case NE_EXPR:
>>> -       case TRUTH_NOT_EXPR:
>>> -       case TRUTH_AND_EXPR:
>>> -       case TRUTH_OR_EXPR:
>>> -        case TRUTH_XOR_EXPR:
>>> -          /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR
>>> +          /* Transform EQ_EXPR, NE_EXPR, BIT_NOT_EXPR into BIT_XOR_EXPR
>>>             or identity if the RHS is zero or one, and the LHS are known
>>> -            to be boolean values.  Transform all TRUTH_*_EXPR into
>>> -             BIT_*_EXPR if both arguments are known to be boolean values.  
>>> */
>>> +            to be boolean values.  */
>>>          if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
>>>            return simplify_truth_ops_using_ranges (gsi, stmt);
>>>          break;
>>> @@ -7449,7 +7445,11 @@ simplify_stmt_using_ranges (gimple_stmt_
>>>             if all the bits being cleared are already cleared or
>>>             all the bits being set are already set.  */
>>>          if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
>>> -           return simplify_bit_ops_using_ranges (gsi, stmt);
>>> +           {
>>> +             if (simplify_truth_ops_using_ranges (gsi, stmt))
>>> +               return true;
>>> +             return simplify_bit_ops_using_ranges (gsi, stmt);
>>
>> Stale hunk I suppose.
>
> IMHO not stale. In fact the jumping point about handling here
> bitwise-binary boolean typed expressions.  And for second patch an
> essential thing.

It becomes stale if you follow my earlier comments.

Richard.

>> Richard.
>>
>>> +           }
>>>          break;
>>>
>>>        CASE_CONVERT:
>>>
>
> Regards,
> Kai
>

Reply via email to