The following removes calls to fold_call_stmt as that does not end up exercising all the match.pd rules we have for builtins now. Instead use gimple_fold_stmt_to_constant[_1].
It also removes some awkward manual re-folding code from tree-ssa-threadedge.c and notes further cleanup opportunities around its callback. Bootstrapped and tested on x86_64-unknown-linux-gnu, applied to trunk. Richard. 2015-10-26 Richard Biener <rguent...@suse.de> * tree-object-size.c: Remove builtins.h include, include tree-cfg.h. (do_valueize): New function. (pass_object_sizes::execute): Use gimple_fold_stmt_to_constant and replace_uses_by. * tree-ssa-threadedge.c: Remove builtins.h include, include gimple-fold.h (fold_assignment_stmt): Remove. (threadedge_valueize): New function. (record_temporary_equivalences_from_stmts): Use gimple_fold_stmt_to_constant_1, note additional cleanup opportunities. Index: gcc/tree-object-size.c =================================================================== *** gcc/tree-object-size.c (revision 229311) --- gcc/tree-object-size.c (working copy) *************** along with GCC; see the file COPYING3. *** 36,42 **** #include "gimple-iterator.h" #include "tree-pass.h" #include "tree-ssa-propagate.h" ! #include "builtins.h" struct object_size_info { --- 36,42 ---- #include "gimple-iterator.h" #include "tree-pass.h" #include "tree-ssa-propagate.h" ! #include "tree-cfg.h" struct object_size_info { *************** public: *** 1231,1236 **** --- 1231,1244 ---- }; // class pass_object_sizes + /* Dummy valueize function. */ + + static tree + do_valueize (tree t) + { + return t; + } + unsigned int pass_object_sizes::execute (function *fun) { *************** pass_object_sizes::execute (function *fu *** 1287,1293 **** continue; } ! result = fold_call_stmt (as_a <gcall *> (call), false); if (!result) { tree ost = gimple_call_arg (call, 1); --- 1295,1305 ---- continue; } ! tree lhs = gimple_call_lhs (call); ! if (!lhs) ! continue; ! ! result = gimple_fold_stmt_to_constant (call, do_valueize); if (!result) { tree ost = gimple_call_arg (call, 1); *************** pass_object_sizes::execute (function *fu *** 1318,1339 **** fprintf (dump_file, "\n"); } - tree lhs = gimple_call_lhs (call); - if (!lhs) - continue; - /* Propagate into all uses and fold those stmts. */ ! gimple *use_stmt; ! imm_use_iterator iter; ! FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) ! { ! use_operand_p use_p; ! FOR_EACH_IMM_USE_ON_STMT (use_p, iter) ! SET_USE (use_p, result); ! gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt); ! fold_stmt (&gsi); ! update_stmt (gsi_stmt (gsi)); ! } } } --- 1330,1337 ---- fprintf (dump_file, "\n"); } /* Propagate into all uses and fold those stmts. */ ! replace_uses_by (lhs, result); } } Index: gcc/tree-ssa-threadedge.c =================================================================== *** gcc/tree-ssa-threadedge.c (revision 229311) --- gcc/tree-ssa-threadedge.c (working copy) *************** along with GCC; see the file COPYING3. *** 36,42 **** #include "tree-ssa-threadedge.h" #include "tree-ssa-threadbackward.h" #include "tree-ssa-dom.h" ! #include "builtins.h" /* To avoid code explosion due to jump threading, we limit the number of statements we are going to copy. This variable --- 36,42 ---- #include "tree-ssa-threadedge.h" #include "tree-ssa-threadbackward.h" #include "tree-ssa-dom.h" ! #include "gimple-fold.h" /* To avoid code explosion due to jump threading, we limit the number of statements we are going to copy. This variable *************** record_temporary_equivalences_from_phis *** 180,233 **** return true; } ! /* Fold the RHS of an assignment statement and return it as a tree. ! May return NULL_TREE if no simplification is possible. */ static tree ! fold_assignment_stmt (gimple *stmt) { ! enum tree_code subcode = gimple_assign_rhs_code (stmt); ! ! switch (get_gimple_rhs_class (subcode)) { ! case GIMPLE_SINGLE_RHS: ! return fold (gimple_assign_rhs1 (stmt)); ! ! case GIMPLE_UNARY_RHS: ! { ! tree lhs = gimple_assign_lhs (stmt); ! tree op0 = gimple_assign_rhs1 (stmt); ! return fold_unary (subcode, TREE_TYPE (lhs), op0); ! } ! ! case GIMPLE_BINARY_RHS: ! { ! tree lhs = gimple_assign_lhs (stmt); ! tree op0 = gimple_assign_rhs1 (stmt); ! tree op1 = gimple_assign_rhs2 (stmt); ! return fold_binary (subcode, TREE_TYPE (lhs), op0, op1); ! } ! ! case GIMPLE_TERNARY_RHS: ! { ! tree lhs = gimple_assign_lhs (stmt); ! tree op0 = gimple_assign_rhs1 (stmt); ! tree op1 = gimple_assign_rhs2 (stmt); ! tree op2 = gimple_assign_rhs3 (stmt); ! ! /* Sadly, we have to handle conditional assignments specially ! here, because fold expects all the operands of an expression ! to be folded before the expression itself is folded, but we ! can't just substitute the folded condition here. */ ! if (gimple_assign_rhs_code (stmt) == COND_EXPR) ! op0 = fold (op0); ! ! return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2); ! } ! ! default: ! gcc_unreachable (); } } /* Try to simplify each statement in E->dest, ultimately leading to --- 180,197 ---- return true; } ! /* Valueize hook for gimple_fold_stmt_to_constant_1. */ static tree ! threadedge_valueize (tree t) { ! if (TREE_CODE (t) == SSA_NAME) { ! tree tem = SSA_NAME_VALUE (t); ! if (tem) ! return tem; } + return t; } /* Try to simplify each statement in E->dest, ultimately leading to *************** record_temporary_equivalences_from_stmts *** 371,418 **** else { /* A statement that is not a trivial copy or ASSERT_EXPR. ! We're going to temporarily copy propagate the operands ! and see if that allows us to simplify this statement. */ ! tree *copy; ! ssa_op_iter iter; ! use_operand_p use_p; ! unsigned int num, i = 0; ! ! num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE)); ! copy = XCNEWVEC (tree, num); ! ! /* Make a copy of the uses & vuses into USES_COPY, then cprop into ! the operands. */ ! FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE) ! { ! tree tmp = NULL; ! tree use = USE_FROM_PTR (use_p); ! ! copy[i++] = use; ! if (TREE_CODE (use) == SSA_NAME) ! tmp = SSA_NAME_VALUE (use); ! if (tmp) ! SET_USE (use_p, tmp); ! } ! ! /* Try to fold/lookup the new expression. Inserting the expression into the hash table is unlikely to help. */ ! if (is_gimple_call (stmt)) ! cached_lhs = fold_call_stmt (as_a <gcall *> (stmt), false); ! else ! cached_lhs = fold_assignment_stmt (stmt); ! if (!cached_lhs || (TREE_CODE (cached_lhs) != SSA_NAME && !is_gimple_min_invariant (cached_lhs))) ! cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack); ! /* Restore the statement's original uses/defs. */ ! i = 0; ! FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE) ! SET_USE (use_p, copy[i++]); ! free (copy); } /* Record the context sensitive equivalence if we were able --- 335,384 ---- else { /* A statement that is not a trivial copy or ASSERT_EXPR. ! Try to fold the new expression. Inserting the expression into the hash table is unlikely to help. */ ! /* ??? The DOM callback below can be changed to setting ! the mprts_hook around the call to thread_across_edge, ! avoiding the use substitution. The VRP hook should be ! changed to properly valueize operands itself using ! SSA_NAME_VALUE in addition to its own lattice. */ ! cached_lhs = gimple_fold_stmt_to_constant_1 (stmt, ! threadedge_valueize); if (!cached_lhs || (TREE_CODE (cached_lhs) != SSA_NAME && !is_gimple_min_invariant (cached_lhs))) ! { ! /* We're going to temporarily copy propagate the operands ! and see if that allows us to simplify this statement. */ ! tree *copy; ! ssa_op_iter iter; ! use_operand_p use_p; ! unsigned int num, i = 0; ! ! num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES); ! copy = XALLOCAVEC (tree, num); ! ! /* Make a copy of the uses & vuses into USES_COPY, then cprop into ! the operands. */ ! FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES) ! { ! tree tmp = NULL; ! tree use = USE_FROM_PTR (use_p); ! ! copy[i++] = use; ! if (TREE_CODE (use) == SSA_NAME) ! tmp = SSA_NAME_VALUE (use); ! if (tmp) ! SET_USE (use_p, tmp); ! } ! cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack); ! /* Restore the statement's original uses/defs. */ ! i = 0; ! FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES) ! SET_USE (use_p, copy[i++]); ! } } /* Record the context sensitive equivalence if we were able