On Friday 01 July 2005 01:05, Vladimir Makarov wrote:
> Andrey Belevantsev wrote:
> > Vladimir Makarov wrote:
> >> I'll look at this PR today.
> >
> > We've looked today at this issue. We think the problem is that
> > proposed patch of sched_get_condition() treats conditional jumps
> > likely to COND_EXECs, but it doesn't fix other places in sched-deps,
> > where COND_EXECs are considered. Maxim Kuvyrkov proposed the attached
> > patch, which allows gcc to bootstrap on ia64 and fixes the testcase in
> > PR.
>
> It seems like processing cond-exec was added long ago in 2000 with
> keeping Itanium only in mind.  But other targets with conditional
> execution have other forms of jump insns.  So Richard Earnshaw made a
> quick fix a year ago.
>
> On the first glance the patch looks ok for me but I am going to
> investigate this problem in more details.  So probably I'll approve it
> on the next week (sorry we have canadian holiday tomorrow).

BTW this is what I have so far to fix Richard Earnshaw's problem.  It
just adds dependencies on all cond_exec insns to the jump at the end of
a basic block when doing intra-block scheduling.  The add_dependence
changes are necessary because it was literally impossible to add this
kind of dependency.  add_dependence simply refuses to add dependencies
with mutually exlusive conditions.

I have not even tried to compile this yet, this is just to give you an
idea of what I want to do.  Does it look like a sane approach?

Gr.
Steven

Index: sched-deps.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/sched-deps.c,v
retrieving revision 1.93
diff -u -3 -p -r1.93 sched-deps.c
--- sched-deps.c        25 Jun 2005 02:01:03 -0000      1.93
+++ sched-deps.c        30 Jun 2005 23:24:34 -0000
@@ -171,6 +171,7 @@ sched_get_condition (rtx insn)
   return 0;
 }
 
+
 /* Return nonzero if conditions COND1 and COND2 can never be both true.  */
 
 static int
@@ -184,6 +185,30 @@ conditions_mutex_p (rtx cond1, rtx cond2
     return 1;
   return 0;
 }
+
+/* Return true if insn1 and insn2 can never depend on one another because
+   the conditions under which they are executed are mutually exclusive.  */
+bool
+sched_insns_conditions_mutex_p (rtx insn1, rtx insn2)
+{
+  /* flow.c doesn't handle conditional lifetimes entirely correctly;
+     calls mess up the conditional lifetimes.  */
+  if (!CALL_P (insn) && !CALL_P (elem))
+    {
+      cond1 = sched_get_condition (insn);
+      cond2 = sched_get_condition (elem);
+      if (cond1 && cond2
+         && conditions_mutex_p (cond1, cond2)
+         /* Make sure first instruction doesn't affect condition of second
+            instruction if switched.  */
+         && !modified_in_p (cond1, elem)
+         /* Make sure second instruction doesn't affect condition of first
+            instruction if switched.  */
+         && !modified_in_p (cond2, insn))
+       return true;
+    }
+  return false;
+}
 
 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
    LOG_LINKS of INSN, if not already there.  DEP_TYPE indicates the
@@ -207,26 +232,6 @@ add_dependence (rtx insn, rtx elem, enum
   if (NOTE_P (elem))
     return 0;
 
-  /* flow.c doesn't handle conditional lifetimes entirely correctly;
-     calls mess up the conditional lifetimes.  */
-  /* ??? add_dependence is the wrong place to be eliding dependencies,
-     as that forgets that the condition expressions themselves may
-     be dependent.  */
-  if (!CALL_P (insn) && !CALL_P (elem))
-    {
-      cond1 = sched_get_condition (insn);
-      cond2 = sched_get_condition (elem);
-      if (cond1 && cond2
-         && conditions_mutex_p (cond1, cond2)
-         /* Make sure first instruction doesn't affect condition of second
-            instruction if switched.  */
-         && !modified_in_p (cond1, elem)
-         /* Make sure second instruction doesn't affect condition of first
-            instruction if switched.  */
-         && !modified_in_p (cond2, insn))
-       return 0;
-    }
-
   present_p = 1;
 #ifdef INSN_SCHEDULING
   /* ??? No good way to tell from here whether we're doing interblock
@@ -348,7 +353,10 @@ static void
 add_dependence_list (rtx insn, rtx list, enum reg_note dep_type)
 {
   for (; list; list = XEXP (list, 1))
-    add_dependence (insn, XEXP (list, 0), dep_type);
+    {
+      if (! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
+       add_dependence (insn, XEXP (list, 0), dep_type);
+    }
 }
 
 /* Similar, but free *LISTP at the same time.  */
@@ -360,7 +368,8 @@ add_dependence_list_and_free (rtx insn, 
   for (list = *listp, *listp = NULL; list ; list = next)
     {
       next = XEXP (list, 1);
-      add_dependence (insn, XEXP (list, 0), dep_type);
+      if (! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
+       add_dependence (insn, XEXP (list, 0), dep_type);
       free_INSN_LIST_node (list);
     }
 }
@@ -393,7 +402,7 @@ delete_all_dependences (rtx insn)
 static void
 fixup_sched_groups (rtx insn)
 {
-  rtx link;
+  rtx link, prev_nonnote;
 
   for (link = LOG_LINKS (insn); link ; link = XEXP (link, 1))
     {
@@ -405,14 +414,17 @@ fixup_sched_groups (rtx insn)
          if (XEXP (link, 0) == i)
            goto next_link;
        } while (SCHED_GROUP_P (i));
-      add_dependence (i, XEXP (link, 0), REG_NOTE_KIND (link));
+      if (! sched_insns_conditions_mutex_p (i, XEXP (link, 0)))
+       add_dependence (i, XEXP (link, 0), REG_NOTE_KIND (link));
     next_link:;
     }
 
   delete_all_dependences (insn);
 
-  if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote_insn (insn)))
-    add_dependence (insn, prev_nonnote_insn (insn), REG_DEP_ANTI);
+  prev_nonnote = prev_nonnote_insn (insn);
+  if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
+      && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
+    add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
 }
 
 /* Process an insn's memory dependencies.  There are four kinds of
@@ -620,7 +632,8 @@ sched_analyze_1 (struct deps *deps, rtx 
          pending_mem = deps->pending_read_mems;
          while (pending)
            {
-             if (anti_dependence (XEXP (pending_mem, 0), t))
+             if (anti_dependence (XEXP (pending_mem, 0), t)
+                 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
                add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
 
              pending = XEXP (pending, 1);
@@ -631,7 +644,8 @@ sched_analyze_1 (struct deps *deps, rtx 
          pending_mem = deps->pending_write_mems;
          while (pending)
            {
-             if (output_dependence (XEXP (pending_mem, 0), t))
+             if (output_dependence (XEXP (pending_mem, 0), t)
+                 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
                add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
 
              pending = XEXP (pending, 1);
@@ -759,7 +773,8 @@ sched_analyze_2 (struct deps *deps, rtx 
        pending_mem = deps->pending_read_mems;
        while (pending)
          {
-           if (read_dependence (XEXP (pending_mem, 0), t))
+           if (read_dependence (XEXP (pending_mem, 0), t)
+               && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
              add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
 
            pending = XEXP (pending, 1);
@@ -771,16 +786,17 @@ sched_analyze_2 (struct deps *deps, rtx 
        while (pending)
          {
            if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
-                                t, rtx_varies_p))
-             add_dependence (insn, XEXP (pending, 0), 0);
+                                t, rtx_varies_p)
+               && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
+             add_dependence (insn, XEXP (pending, 0), REG_DEP_TRUE);
 
            pending = XEXP (pending, 1);
            pending_mem = XEXP (pending_mem, 1);
          }
 
        for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
-         if (!JUMP_P (XEXP (u, 0))
-             || deps_may_trap_p (x))
+         if ((! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
+             && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
            add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
 
        /* Always add these dependencies to pending_reads, since
@@ -966,7 +982,8 @@ sched_analyze_insn (struct deps *deps, r
          pending_mem = deps->pending_write_mems;
          while (pending)
            {
-             add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
+             if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
+               add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
              pending = XEXP (pending, 1);
              pending_mem = XEXP (pending_mem, 1);
            }
@@ -975,7 +992,8 @@ sched_analyze_insn (struct deps *deps, r
          pending_mem = deps->pending_read_mems;
          while (pending)
            {
-             if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
+             if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
+                 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
                add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
              pending = XEXP (pending, 1);
              pending_mem = XEXP (pending_mem, 1);
Index: sched-ebb.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/sched-ebb.c,v
retrieving revision 1.44
diff -u -3 -p -r1.44 sched-ebb.c
--- sched-ebb.c 25 Jun 2005 02:01:03 -0000      1.44
+++ sched-ebb.c 30 Jun 2005 23:24:34 -0000
@@ -454,7 +454,8 @@ add_deps_for_risky_insns (rtx head, rtx 
            /* We can not change the mode of the backward
               dependency because REG_DEP_ANTI has the lowest
               rank.  */
-           if (add_dependence (insn, prev, REG_DEP_ANTI))
+           if (! sched_insns_conditions_mutex_p (insn, prev)
+               && add_dependence (insn, prev, REG_DEP_ANTI))
              add_forward_dependence (prev, insn, REG_DEP_ANTI);
             break;
 
Index: sched-int.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/sched-int.h,v
retrieving revision 1.38
diff -u -3 -p -r1.38 sched-int.h
--- sched-int.h 25 Jun 2005 02:01:03 -0000      1.38
+++ sched-int.h 30 Jun 2005 23:24:35 -0000
@@ -331,6 +331,7 @@ enum INSN_TRAP_CLASS
 extern void print_insn (char *, rtx, int);
 
 /* Functions in sched-deps.c.  */
+extern bool sched_insns_conditions_mutex_p (rtx, rtx);
 extern int add_dependence (rtx, rtx, enum reg_note);
 extern void sched_analyze (struct deps *, rtx, rtx);
 extern void init_deps (struct deps *);
Index: sched-rgn.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/sched-rgn.c,v
retrieving revision 1.96
diff -u -3 -p -r1.96 sched-rgn.c
--- sched-rgn.c 25 Jun 2005 02:01:03 -0000      1.96
+++ sched-rgn.c 30 Jun 2005 23:24:35 -0000
@@ -1881,6 +1881,8 @@ add_branch_dependences (rtx head, rtx ta
      cc0 setters remain at the end because they can't be moved away from
      their cc0 user.
 
+     COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
+
      Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
      are not moved before reload because we can wind up with register
      allocation failures.  */
@@ -1904,7 +1906,8 @@ add_branch_dependences (rtx head, rtx ta
        {
          if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
            {
-             add_dependence (last, insn, REG_DEP_ANTI);
+             if (! sched_insns_conditions_mutex_p (last, insn))
+               add_dependence (last, insn, REG_DEP_ANTI);
              INSN_REF_COUNT (insn)++;
            }
 
@@ -1930,9 +1933,61 @@ add_branch_dependences (rtx head, rtx ta
        if (INSN_REF_COUNT (insn) != 0)
          continue;
 
-       add_dependence (last, insn, REG_DEP_ANTI);
+       if (! sched_insns_conditions_mutex_p (last, insn))
+         add_dependence (last, insn, REG_DEP_ANTI);
        INSN_REF_COUNT (insn) = 1;
       }
+
+#ifdef HAVE_conditional_execution
+  /* Finally, if the block ends in a jump, and we are doing intra-block
+     scheduling, make sure that the branch depends on any COND_EXEC insns
+     inside the block to avoid moving the COND_EXECs past the branch insn.
+
+     We only have to do this after reload, because (1) before reload there
+     are no COND_EXEC insns, and (2) the region scheduler is an intra-block
+     scheduler after reload.
+
+     FIXME: We could in some cases move COND_EXEC insns past the branch if
+     this scheduler would be a little smarter.  Consider this code:
+
+               T = [addr]
+       C  ?    addr += 4
+       C  ?    X += 12
+       C  ?    T += 1
+       !C ?    jump foo
+
+     On a target with a one cycle stall on a memory access the optimal
+     sequence would be:
+
+               T = [addr]
+       C  ?    addr += 4
+       C  ?    T += 1
+       C  ?    jump foo
+       !C ?    X += 12
+
+     We don't want to put the 'X += 12' before the branch because it just
+     wastes a cycle of execution time when the branch is taken.
+
+     Note that in the example "!C" will always be true.  That is another
+     possible improvement for handling COND_EXECs in this scheduler: it
+     could remove always-true predicates.  */
+
+  if (!reload_completed || ! JUMP_P (tail))
+    return;
+
+  insn = PREV_INSN (tail);
+  while (insn)
+    {
+      /* Note that we want to add this dependency even when
+        sched_insns_conditions_mutex_p returns true.  The whole point
+        is that we _want_ this dependency, even if these insns really
+        are independent.  */
+      if (GET_CODE (PATTERN (insn)) == COND_EXEC)
+       add_dependence (tail, insn, REG_DEP_ANTI);
+
+      insn = PREV_INSN (insn);
+    }
+#endif
 }
 
 /* Data structures for the computation of data dependences in a regions.  We

Reply via email to