Here we have a testcase that affects both the C++ memory model and
transactional memory.
[Hans, this is caused by the same problem that is causing the
speculative register promotion issue you and Torvald pointed me at].
In the following testcase (adapted from the PR), the loop invariant
motion pass caches a pre-existing value for g_2, and then performs a
store to g_2 on every path, causing a store data race:
int g_1 = 1;
int g_2 = 0;
int func_1(void)
{
int l;
for (l = 0; l < 1234; l++)
{
if (g_1)
return l;
else
g_2 = 0; <-- Store to g_2 should only happen if !g_1
}
return 999;
}
This gets transformed into something like:
g_2_lsm = g_2;
if (g_1) {
g_2 = g_2_lsm; // boo! hiss!
return 0;
} else {
g_2_lsm = 0;
g_2 = g_2_lsm;
}
The spurious write to g_2 could cause a data race.
Andrew has suggested verifying that the store to g_2 was actually
different than on entry, and letting PHI copy propagation optimize the
redundant comparisons. Like this:
g_2_lsm = g_2;
if (g_1) {
if (g_2_lsm != g_2) // hopefully optimized away
g_2 = g_2_lsm; // hopefully optimized away
return 0;
} else {
g_2_lsm = 0;
if (g_2_lsm != g_2) // hopefully optimized away
g_2 = g_2_lsm;
}
...which PHI copy propagation and/or friends should be able to optimize.
For that matter, regardless of the memory model, the proposed solution
would improve the existing code by removing the extra store (in this
contrived test case anyhow).
Anyone see a problem with this approach (see attached patch)?
Assuming the unlikely scenario that everyone likes this :), I have two
problems that I would like answered. But feel free to ignore if the
approach is a no go.
1. No pass can figure out the equality (or inequality) of g_2_lsm and
g_2. In the PR, Richard mentions that both FRE/PRE can figure this out,
but they are not run after store motion. DOM should also be able to,
but apparently does not :(. Tips?
2. The GIMPLE_CONDs I create in this patch are currently causing
problems with complex floats/doubles. do_jump is complaining that it
can't compare them, so I assume it is too late (in tree-ssa-loop-im.c)
to compare complex values since complex lowering has already happened
(??). Is there a more canonical way of creating a GIMPLE_COND that may
contain complex floats at this stage?
Thanks folks.
* tree-ssa-loop-im.c (execute_sm): Guard store motion with a
conditional.
* opts.c (finish_options): Do not allow store or load data races
with -fgnu-tm.
Index: tree-ssa-loop-im.c
===================================================================
--- tree-ssa-loop-im.c (revision 186108)
+++ tree-ssa-loop-im.c (working copy)
@@ -1999,8 +1999,59 @@ execute_sm (struct loop *loop, VEC (edge
FOR_EACH_VEC_ELT (edge, exits, i, ex)
{
- store = gimple_build_assign (unshare_expr (ref->mem), tmp_var);
- gsi_insert_on_edge (ex, store);
+ basic_block new_bb, then_bb, old_dest;
+ edge then_old_edge;
+ gimple_stmt_iterator gsi;
+ gimple stmt;
+ tree t1;
+
+ if (PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
+ {
+ store = gimple_build_assign (unshare_expr (ref->mem), tmp_var);
+ gsi_insert_on_edge (ex, store);
+ }
+ else
+ {
+ old_dest = ex->dest;
+ new_bb = split_edge (ex);
+ then_bb = create_empty_bb (new_bb);
+ if (current_loops && new_bb->loop_father)
+ add_bb_to_loop (then_bb, new_bb->loop_father);
+
+ gsi = gsi_start_bb (new_bb);
+ t1 = make_rename_temp (TREE_TYPE (ref->mem), NULL);
+ stmt = gimple_build_assign (t1, unshare_expr (ref->mem));
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+ stmt = gimple_build_cond (NE_EXPR, t1, tmp_var,
+ NULL_TREE, NULL_TREE);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+
+ gsi = gsi_start_bb (then_bb);
+ store = gimple_build_assign (unshare_expr (ref->mem), tmp_var);
+ gsi_insert_after (&gsi, store, GSI_CONTINUE_LINKING);
+
+ make_edge (new_bb, then_bb, EDGE_TRUE_VALUE);
+ make_edge (new_bb, old_dest, EDGE_FALSE_VALUE);
+ then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU);
+ set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
+
+ for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next
(&gsi))
+ {
+ gimple phi = gsi_stmt (gsi);
+ unsigned i;
+
+ for (i = 0; i < gimple_phi_num_args (phi); i++)
+ if (gimple_phi_arg_edge (phi, i)->src == new_bb)
+ {
+ tree arg = gimple_phi_arg_def (phi, i);
+ add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
+ update_stmt (phi);
+ }
+ }
+ /* Remove the original fall through edge. This was the
+ single_succ_edge (new_bb). */
+ EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
+ }
}
}
Index: opts.c
===================================================================
--- opts.c (revision 186108)
+++ opts.c (working copy)
@@ -663,8 +663,16 @@ finish_options (struct gcc_options *opts
opts->x_flag_toplevel_reorder = 0;
}
- if (opts->x_flag_tm && opts->x_flag_non_call_exceptions)
- sorry ("transactional memory is not supported with non-call exceptions");
+ if (opts->x_flag_tm)
+ {
+ if (opts->x_flag_non_call_exceptions)
+ sorry ("transactional memory is not supported with non-call
exceptions");
+
+ set_param_value ("allow-load-data-races", 0,
+ opts->x_param_values, opts_set->x_param_values);
+ set_param_value ("allow-store-data-races", 0,
+ opts->x_param_values, opts_set->x_param_values);
+ }
/* -Wmissing-noreturn is alias for -Wsuggest-attribute=noreturn. */
if (opts->x_warn_missing_noreturn)