On Wed, Jun 18, 2014 at 6:55 AM, Thomas Preud'homme
<thomas.preudho...@arm.com> wrote:
>> From: Richard Biener [mailto:richard.guent...@gmail.com]
>> Sent: Wednesday, June 11, 2014 4:32 PM
>> >
>> >
>> > Is this OK for trunk? Does this bug qualify for a backport patch to
>> > 4.8 and 4.9 branches?
>>
>> This is ok for trunk and also for backporting (after a short while to
>> see if there is any fallout).
>
> Below is the backported patch for 4.8/4.9. Is this ok for both 4.8 and
> 4.9? If yes, how much more should I wait before committing?
>
> Tested on both 4.8 and 4.9 without regression in the testsuite after
> a bootstrap.

This is ok to commit now.

Thanks,
Richard.

> diff --git a/gcc/ChangeLog b/gcc/ChangeLog
> index 1e35bbe..0559b7f 100644
> --- a/gcc/ChangeLog
> +++ b/gcc/ChangeLog
> @@ -1,3 +1,16 @@
> +2014-06-12  Thomas Preud'homme  <thomas.preudho...@arm.com>
> +
> +       PR tree-optimization/61306
> +       * tree-ssa-math-opts.c (struct symbolic_number): Store type of
> +       expression instead of its size.
> +       (do_shift_rotate): Adapt to change in struct symbolic_number. Return
> +       false to prevent optimization when the result is unpredictable due to
> +       arithmetic right shift of signed type with highest byte is set.
> +       (verify_symbolic_number_p): Adapt to change in struct symbolic_number.
> +       (find_bswap_1): Likewise. Return NULL to prevent optimization when the
> +       result is unpredictable due to sign extension.
> +       (find_bswap): Adapt to change in struct symbolic_number.
> +
>  2014-06-12  Alan Modra  <amo...@gmail.com>
>
>         PR target/61300
> diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
> index 757cb74..139f23c 100644
> --- a/gcc/testsuite/ChangeLog
> +++ b/gcc/testsuite/ChangeLog
> @@ -1,3 +1,9 @@
> +2014-06-12  Thomas Preud'homme  <thomas.preudho...@arm.com>
> +
> +       * gcc.c-torture/execute/pr61306-1.c: New test.
> +       * gcc.c-torture/execute/pr61306-2.c: Likewise.
> +       * gcc.c-torture/execute/pr61306-3.c: Likewise.
> +
>  2014-06-11  Richard Biener  <rguent...@suse.de>
>
>         PR tree-optimization/61452
> diff --git a/gcc/testsuite/gcc.c-torture/execute/pr61306-1.c 
> b/gcc/testsuite/gcc.c-torture/execute/pr61306-1.c
> new file mode 100644
> index 0000000..ebc90a3
> --- /dev/null
> +++ b/gcc/testsuite/gcc.c-torture/execute/pr61306-1.c
> @@ -0,0 +1,39 @@
> +#ifdef __INT32_TYPE__
> +typedef __INT32_TYPE__ int32_t;
> +#else
> +typedef int int32_t;
> +#endif
> +
> +#ifdef __UINT32_TYPE__
> +typedef __UINT32_TYPE__ uint32_t;
> +#else
> +typedef unsigned uint32_t;
> +#endif
> +
> +#define __fake_const_swab32(x) ((uint32_t)(                  \
> +       (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) |    \
> +       (((uint32_t)(x) & (uint32_t)0x0000ff00UL) <<  8) |    \
> +       (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >>  8) |    \
> +       (( (int32_t)(x) &  (int32_t)0xff000000UL) >> 24)))
> +
> +/* Previous version of bswap optimization failed to consider sign extension
> +   and as a result would replace an expression *not* doing a bswap by a
> +   bswap.  */
> +
> +__attribute__ ((noinline, noclone)) uint32_t
> +fake_bswap32 (uint32_t in)
> +{
> +  return __fake_const_swab32 (in);
> +}
> +
> +int
> +main(void)
> +{
> +  if (sizeof (int32_t) * __CHAR_BIT__ != 32)
> +    return 0;
> +  if (sizeof (uint32_t) * __CHAR_BIT__ != 32)
> +    return 0;
> +  if (fake_bswap32 (0x87654321) != 0xffffff87)
> +    __builtin_abort ();
> +  return 0;
> +}
> diff --git a/gcc/testsuite/gcc.c-torture/execute/pr61306-2.c 
> b/gcc/testsuite/gcc.c-torture/execute/pr61306-2.c
> new file mode 100644
> index 0000000..886ecfd
> --- /dev/null
> +++ b/gcc/testsuite/gcc.c-torture/execute/pr61306-2.c
> @@ -0,0 +1,40 @@
> +#ifdef __INT16_TYPE__
> +typedef __INT16_TYPE__ int16_t;
> +#else
> +typedef short int16_t;
> +#endif
> +
> +#ifdef __UINT32_TYPE__
> +typedef __UINT32_TYPE__ uint32_t;
> +#else
> +typedef unsigned uint32_t;
> +#endif
> +
> +#define __fake_const_swab32(x) ((uint32_t)(                          \
> +       (((uint32_t)         (x) & (uint32_t)0x000000ffUL) << 24) |   \
> +       (((uint32_t)(int16_t)(x) & (uint32_t)0x00ffff00UL) <<  8) |   \
> +       (((uint32_t)         (x) & (uint32_t)0x00ff0000UL) >>  8) |   \
> +       (((uint32_t)         (x) & (uint32_t)0xff000000UL) >> 24)))
> +
> +
> +/* Previous version of bswap optimization failed to consider sign extension
> +   and as a result would replace an expression *not* doing a bswap by a
> +   bswap.  */
> +
> +__attribute__ ((noinline, noclone)) uint32_t
> +fake_bswap32 (uint32_t in)
> +{
> +  return __fake_const_swab32 (in);
> +}
> +
> +int
> +main(void)
> +{
> +  if (sizeof (uint32_t) * __CHAR_BIT__ != 32)
> +    return 0;
> +  if (sizeof (int16_t) * __CHAR_BIT__ != 16)
> +    return 0;
> +  if (fake_bswap32 (0x81828384) != 0xff838281)
> +    __builtin_abort ();
> +  return 0;
> +}
> diff --git a/gcc/testsuite/gcc.c-torture/execute/pr61306-3.c 
> b/gcc/testsuite/gcc.c-torture/execute/pr61306-3.c
> new file mode 100644
> index 0000000..6086e27
> --- /dev/null
> +++ b/gcc/testsuite/gcc.c-torture/execute/pr61306-3.c
> @@ -0,0 +1,13 @@
> +short a = -1;
> +int b;
> +char c;
> +
> +int
> +main ()
> +{
> +  c = a;
> +  b = a | c;
> +  if (b != -1)
> +    __builtin_abort ();
> +  return 0;
> +}
> diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
> index 9ff857c..2b656ae 100644
> --- a/gcc/tree-ssa-math-opts.c
> +++ b/gcc/tree-ssa-math-opts.c
> @@ -1620,7 +1620,7 @@ make_pass_cse_sincos (gcc::context *ctxt)
>
>  struct symbolic_number {
>    unsigned HOST_WIDEST_INT n;
> -  int size;
> +  tree type;
>  };
>
>  /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
> @@ -1632,13 +1632,15 @@ do_shift_rotate (enum tree_code code,
>                  struct symbolic_number *n,
>                  int count)
>  {
> +  int bitsize = TYPE_PRECISION (n->type);
> +
>    if (count % 8 != 0)
>      return false;
>
>    /* Zero out the extra bits of N in order to avoid them being shifted
>       into the significant bits.  */
> -  if (n->size < (int)sizeof (HOST_WIDEST_INT))
> -    n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
> +  if (bitsize < 8 * (int)sizeof (HOST_WIDEST_INT))
> +    n->n &= ((unsigned HOST_WIDEST_INT)1 << bitsize) - 1;
>
>    switch (code)
>      {
> @@ -1646,20 +1648,23 @@ do_shift_rotate (enum tree_code code,
>        n->n <<= count;
>        break;
>      case RSHIFT_EXPR:
> +      /* Arithmetic shift of signed type: result is dependent on the value.  
> */
> +      if (!TYPE_UNSIGNED (n->type) && (n->n & (0xff << (bitsize - 8))))
> +       return false;
>        n->n >>= count;
>        break;
>      case LROTATE_EXPR:
> -      n->n = (n->n << count) | (n->n >> ((n->size * BITS_PER_UNIT) - count));
> +      n->n = (n->n << count) | (n->n >> (bitsize - count));
>        break;
>      case RROTATE_EXPR:
> -      n->n = (n->n >> count) | (n->n << ((n->size * BITS_PER_UNIT) - count));
> +      n->n = (n->n >> count) | (n->n << (bitsize - count));
>        break;
>      default:
>        return false;
>      }
>    /* Zero unused bits for size.  */
> -  if (n->size < (int)sizeof (HOST_WIDEST_INT))
> -    n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
> +  if (bitsize < 8 * (int)sizeof (HOST_WIDEST_INT))
> +    n->n &= ((unsigned HOST_WIDEST_INT)1 << bitsize) - 1;
>    return true;
>  }
>
> @@ -1676,7 +1681,7 @@ verify_symbolic_number_p (struct symbolic_number *n, 
> gimple stmt)
>    if (TREE_CODE (lhs_type) != INTEGER_TYPE)
>      return false;
>
> -  if (TYPE_PRECISION (lhs_type) != n->size * BITS_PER_UNIT)
> +  if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
>      return false;
>
>    return true;
> @@ -1733,20 +1738,23 @@ find_bswap_1 (gimple stmt, struct symbolic_number *n, 
> int limit)
>          to initialize the symbolic number.  */
>        if (!source_expr1)
>         {
> +         int size;
> +
>           /* Set up the symbolic number N by setting each byte to a
>              value between 1 and the byte size of rhs1.  The highest
>              order byte is set to n->size and the lowest order
>              byte to 1.  */
> -         n->size = TYPE_PRECISION (TREE_TYPE (rhs1));
> -         if (n->size % BITS_PER_UNIT != 0)
> +         n->type = TREE_TYPE (rhs1);
> +         size = TYPE_PRECISION (n->type);
> +         if (size % BITS_PER_UNIT != 0)
>             return NULL_TREE;
> -         n->size /= BITS_PER_UNIT;
> +         size /= BITS_PER_UNIT;
>           n->n = (sizeof (HOST_WIDEST_INT) < 8 ? 0 :
>                   (unsigned HOST_WIDEST_INT)0x08070605 << 32 | 0x04030201);
>
> -         if (n->size < (int)sizeof (HOST_WIDEST_INT))
> +         if (size < (int)sizeof (HOST_WIDEST_INT))
>             n->n &= ((unsigned HOST_WIDEST_INT)1 <<
> -                    (n->size * BITS_PER_UNIT)) - 1;
> +                    (size * BITS_PER_UNIT)) - 1;
>
>           source_expr1 = rhs1;
>         }
> @@ -1755,12 +1763,12 @@ find_bswap_1 (gimple stmt, struct symbolic_number *n, 
> int limit)
>         {
>         case BIT_AND_EXPR:
>           {
> -           int i;
> +           int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
>             unsigned HOST_WIDEST_INT val = widest_int_cst_value (rhs2);
>             unsigned HOST_WIDEST_INT tmp = val;
>
>             /* Only constants masking full bytes are allowed.  */
> -           for (i = 0; i < n->size; i++, tmp >>= BITS_PER_UNIT)
> +           for (i = 0; i < size; i++, tmp >>= BITS_PER_UNIT)
>               if ((tmp & 0xff) != 0 && (tmp & 0xff) != 0xff)
>                 return NULL_TREE;
>
> @@ -1776,19 +1784,28 @@ find_bswap_1 (gimple stmt, struct symbolic_number *n, 
> int limit)
>           break;
>         CASE_CONVERT:
>           {
> -           int type_size;
> +           int type_size, old_type_size;
> +           tree type;
>
> -           type_size = TYPE_PRECISION (gimple_expr_type (stmt));
> +           type = gimple_expr_type (stmt);
> +           type_size = TYPE_PRECISION (type);
>             if (type_size % BITS_PER_UNIT != 0)
>               return NULL_TREE;
>
> +           /* Sign extension: result is dependent on the value.  */
> +           old_type_size = TYPE_PRECISION (n->type);
> +           if (!TYPE_UNSIGNED (n->type)
> +               && type_size > old_type_size
> +               && n->n & (0xff << (old_type_size - 8)))
> +             return NULL_TREE;
> +
>             if (type_size / BITS_PER_UNIT < (int)(sizeof (HOST_WIDEST_INT)))
>               {
>                 /* If STMT casts to a smaller type mask out the bits not
>                    belonging to the target type.  */
>                 n->n &= ((unsigned HOST_WIDEST_INT)1 << type_size) - 1;
>               }
> -           n->size = type_size / BITS_PER_UNIT;
> +           n->type = type;
>           }
>           break;
>         default:
> @@ -1801,7 +1818,7 @@ find_bswap_1 (gimple stmt, struct symbolic_number *n, 
> int limit)
>
>    if (rhs_class == GIMPLE_BINARY_RHS)
>      {
> -      int i;
> +      int i, size;
>        struct symbolic_number n1, n2;
>        unsigned HOST_WIDEST_INT mask;
>        tree source_expr2;
> @@ -1825,11 +1842,12 @@ find_bswap_1 (gimple stmt, struct symbolic_number *n, 
> int limit)
>           source_expr2 = find_bswap_1 (rhs2_stmt, &n2, limit - 1);
>
>           if (source_expr1 != source_expr2
> -             || n1.size != n2.size)
> +             || TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
>             return NULL_TREE;
>
> -         n->size = n1.size;
> -         for (i = 0, mask = 0xff; i < n->size; i++, mask <<= BITS_PER_UNIT)
> +         n->type = n1.type;
> +         size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
> +         for (i = 0, mask = 0xff; i < size; i++, mask <<= BITS_PER_UNIT)
>             {
>               unsigned HOST_WIDEST_INT masked1, masked2;
>
> @@ -1868,7 +1886,7 @@ find_bswap (gimple stmt)
>
>    struct symbolic_number n;
>    tree source_expr;
> -  int limit;
> +  int limit, bitsize;
>
>    /* The last parameter determines the depth search limit.  It usually
>       correlates directly to the number of bytes to be touched.  We
> @@ -1883,13 +1901,14 @@ find_bswap (gimple stmt)
>      return NULL_TREE;
>
>    /* Zero out the extra bits of N and CMP.  */
> -  if (n.size < (int)sizeof (HOST_WIDEST_INT))
> +  bitsize = TYPE_PRECISION (n.type);
> +  if (bitsize < 8 * (int)sizeof (HOST_WIDEST_INT))
>      {
>        unsigned HOST_WIDEST_INT mask =
> -       ((unsigned HOST_WIDEST_INT)1 << (n.size * BITS_PER_UNIT)) - 1;
> +       ((unsigned HOST_WIDEST_INT)1 << bitsize) - 1;
>
>        n.n &= mask;
> -      cmp >>= (sizeof (HOST_WIDEST_INT) - n.size) * BITS_PER_UNIT;
> +      cmp >>= sizeof (HOST_WIDEST_INT) * BITS_PER_UNIT - bitsize;
>      }
>
>    /* A complete byte swap should make the symbolic number to start
>
> Best regards,
>
> Thomas
>
>

Reply via email to