https://gcc.gnu.org/g:94ef5ce0a23707d5e9097a8f1c1d1fd54af9c58f
commit 94ef5ce0a23707d5e9097a8f1c1d1fd54af9c58f Author: Alexandre Oliva <ol...@gnu.org> Date: Tue Dec 10 06:49:32 2024 -0300 ifcombine field-merge: saturate align at load size Diff: --- gcc/gimple-fold.cc | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc index a31fc283d51b..8f7380928a4e 100644 --- a/gcc/gimple-fold.cc +++ b/gcc/gimple-fold.cc @@ -8204,16 +8204,21 @@ fold_truth_andor_for_ifcombine (enum tree_code code, tree truth_type, to be relative to a field of that size. */ first_bit = MIN (ll_bitpos, rl_bitpos); end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize); + HOST_WIDE_INT align = TYPE_ALIGN (TREE_TYPE (ll_inner)); + /* Guard from types with wider-than-size alignment. We must not widen the + load beyond its total size. This is rare. */ + while (align > BITS_PER_UNIT + && known_gt ((unsigned HOST_WIDE_INT)align, + tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (ll_inner))))) + align /= 2; if (get_best_mode (end_bit - first_bit, first_bit, 0, 0, - TYPE_ALIGN (TREE_TYPE (ll_inner)), BITS_PER_WORD, - volatilep, &lnmode)) + align, BITS_PER_WORD, volatilep, &lnmode)) l_split_load = false; else { /* Consider the possibility of recombining loads if any of the fields straddles across an alignment boundary, so that either part can be loaded along with the other field. */ - HOST_WIDE_INT align = TYPE_ALIGN (TREE_TYPE (ll_inner)); HOST_WIDE_INT boundary = compute_split_boundary_from_align (align, ll_bitpos, ll_bitsize, rl_bitpos, rl_bitsize);