At some point we've grown a shift_truncation_mask hook, but we're not
using it everywhere we're masking shift counts. This patch changes the
instances I found.
Bernd
* simplify-rtx.c (simplify_const_binary_operation): Use the
shift_truncation_mask hook instead of performing modulo by
width. Compare against mode precision, not bitsize.
* combine.c (combine_simplify_rtx, simplify_shift_const_1):
Use shift_truncation_mask instead of constructing the value
manually.
Index: gcc/simplify-rtx.c
===================================================================
--- gcc/simplify-rtx.c.orig
+++ gcc/simplify-rtx.c
@@ -3704,8 +3704,8 @@ simplify_const_binary_operation (enum rt
shift_truncation_mask, since the shift might not be part of an
ashlM3, lshrM3 or ashrM3 instruction. */
if (SHIFT_COUNT_TRUNCATED)
- arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
- else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
+ arg1 &= targetm.shift_truncation_mask (mode);
+ else if (arg1 < 0 || arg1 >= GET_MODE_PRECISION (mode))
return 0;
val = (code == ASHIFT
Index: gcc/combine.c
===================================================================
--- gcc/combine.c.orig
+++ gcc/combine.c
@@ -5941,9 +5941,7 @@ combine_simplify_rtx (rtx x, enum machin
else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
SUBST (XEXP (x, 1),
force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
- ((unsigned HOST_WIDE_INT) 1
- << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
- - 1,
+ targetm.shift_truncation_mask (GET_MODE (x)),
0));
break;
@@ -9896,7 +9894,7 @@ simplify_shift_const_1 (enum rtx_code co
want to do this inside the loop as it makes it more difficult to
combine shifts. */
if (SHIFT_COUNT_TRUNCATED)
- orig_count &= GET_MODE_BITSIZE (mode) - 1;
+ orig_count &= targetm.shift_truncation_mask (mode);
/* If we were given an invalid count, don't do anything except exactly
what was requested. */