------- Comment #7 from amonakov at gcc dot gnu dot org 2010-01-11 15:04
-------
Our previous patch (http://gcc.gnu.org/ml/gcc-patches/2009-12/msg01215.html)
failed to correctly fix the problem, and the new testcase uncovers a flaw in
that implementation.
We 'forgot' to recompute topological order if it was invalidated in
tidy_control_flow but not in maybe_tidy_empty_bb called from that function.
Fixed by passing recompute_toporder_p to the latter on top of the mentioned
previous patch as below (the patch also makes maybe_tidy_empty_bb static by
moving the only caller into the same file).
* sel-sched-ir.c (maybe_tidy_empty_bb): Make static. Add new
argument. Update all callers.
(purge_empty_blocks): Export and move from...
* sel-sched.c (purge_empty_blocks): ... here. Delete.
* sel-sched-ir.h (maybe_tidy_empty_bb): Delete prototype.
(purge_empty_blocks): Declare.
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index cffee1c..e20df17 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -3538,13 +3538,13 @@ sel_recompute_toporder (void)
}
/* Tidy the possibly empty block BB. */
-bool
-maybe_tidy_empty_bb (basic_block bb)
+static bool
+maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p)
{
basic_block succ_bb, pred_bb;
edge e;
edge_iterator ei;
- bool rescan_p, recompute_toporder_p = false;
+ bool rescan_p;
/* Keep empty bb only if this block immediately precedes EXIT and
has incoming non-fallthrough edge, or it has no predecessors or
@@ -3630,7 +3630,7 @@ tidy_control_flow (basic_block xbb, bool full_tidying)
insn_t first, last;
/* First check whether XBB is empty. */
- changed = maybe_tidy_empty_bb (xbb);
+ changed = maybe_tidy_empty_bb (xbb, false);
if (changed || !full_tidying)
return changed;
@@ -3694,7 +3694,7 @@ tidy_control_flow (basic_block xbb, bool full_tidying)
that contained that jump, becomes empty too. In such case
remove it too. */
if (sel_bb_empty_p (xbb->prev_bb))
- changed = maybe_tidy_empty_bb (xbb->prev_bb);
+ changed = maybe_tidy_empty_bb (xbb->prev_bb, recompute_toporder_p);
else if (recompute_toporder_p)
sel_recompute_toporder ();
}
@@ -3702,6 +3702,24 @@ tidy_control_flow (basic_block xbb, bool full_tidying)
return changed;
}
+/* Purge meaningless empty blocks in the middle of a region. */
+void
+purge_empty_blocks (void)
+{
+ /* Do not attempt to delete preheader. */
+ int i = sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0))) ? 1 : 0;
+
+ while (i < current_nr_blocks)
+ {
+ basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
+
+ if (maybe_tidy_empty_bb (b, false))
+ continue;
+
+ i++;
+ }
+}
+
/* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
do not delete insn's data, because it will be later re-emitted.
Return true if we have removed some blocks afterwards. */
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index 317258c..b5121c0 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -1619,7 +1619,7 @@ extern bool tidy_control_flow (basic_block, bool);
extern void free_bb_note_pool (void);
extern void sel_remove_empty_bb (basic_block, bool, bool);
-extern bool maybe_tidy_empty_bb (basic_block bb);
+extern void purge_empty_blocks (void);
extern basic_block sel_split_edge (edge);
extern basic_block sel_create_recovery_block (insn_t);
extern void sel_merge_blocks (basic_block, basic_block);
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index 37be754..9271b80 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -6790,24 +6790,6 @@ setup_current_loop_nest (int rgn)
gcc_assert (LOOP_MARKED_FOR_PIPELINING_P (current_loop_nest));
}
-/* Purge meaningless empty blocks in the middle of a region. */
-static void
-purge_empty_blocks (void)
-{
- /* Do not attempt to delete preheader. */
- int i = sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0))) ? 1 : 0;
-
- while (i < current_nr_blocks)
- {
- basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
-
- if (maybe_tidy_empty_bb (b))
- continue;
-
- i++;
- }
-}
-
/* Compute instruction priorities for current region. */
static void
sel_compute_priorities (int rgn)
--
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=42245