On Fri, Jun 26, 2015 at 01:07:09PM +0100, Matthew Wahab wrote:
> 
> This patch backports the changes made to strengthen the barriers emitted for
> the __sync fetch-and-op/op-and-fetch builtins.
> 
> The trunk patch submission is at
> https://gcc.gnu.org/ml/gcc-patches/2015-05/msg01989.html
> The commit is at https://gcc.gnu.org/ml/gcc-cvs/2015-06/msg00076.html
> 
> Tested the series for aarch64-none-linux-gnu with check-gcc
> 
> Ok for the branch?
> Matthew

OK.

Thanks,
James

> 
> 2015-06-26  Matthew Wahab  <matthew.wa...@arm.com>
> 
>       Backport from trunk.
>       2015-06-01  Matthew Wahab  <matthew.wa...@arm.com>
> 
>       PR target/65697
>       * config/aarch64/aarch64.c (aarch64_emit_post_barrier):New.
>       (aarch64_split_atomic_op): Check for __sync memory models, emit
>       appropriate initial loads and final barriers.
> 

> From d6d3351b4547d0ad52e4d7e9955fafdced11491a Mon Sep 17 00:00:00 2001
> From: mwahab <mwahab@138bc75d-0d04-0410-961f-82ee72b054a4>
> Date: Mon, 1 Jun 2015 15:18:19 +0000
> Subject: [PATCH 2/4] [Aarch64][5.1] Strengthen barriers for sync-fetch-op
>  builtin.
> 
>         PR target/65697
>       * config/aarch64/aarch64.c (aarch64_emit_post_barrier):New.
>       (aarch64_split_atomic_op): Check for __sync memory models, emit
>       appropriate initial loads and final barriers.
> 
> git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@223983 
> 138bc75d-0d04-0410-961f-82ee72b054a4
> 
> Conflicts:
>       gcc/ChangeLog
>       gcc/config/aarch64/aarch64.c
> 
> Change-Id: I45600c4dd0002b4c2d48de36d695c83581fe50da
> ---
>  gcc/config/aarch64/aarch64.c | 31 ++++++++++++++++++++++++++++++-
>  1 file changed, 30 insertions(+), 1 deletion(-)
> 
> diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
> index b8b37b8..708fc23 100644
> --- a/gcc/config/aarch64/aarch64.c
> +++ b/gcc/config/aarch64/aarch64.c
> @@ -9066,6 +9066,23 @@ aarch64_expand_compare_and_swap (rtx operands[])
>    emit_insn (gen_rtx_SET (VOIDmode, bval, x));
>  }
>  
> +/* Emit a barrier, that is appropriate for memory model MODEL, at the end of 
> a
> +   sequence implementing an atomic operation.  */
> +
> +static void
> +aarch64_emit_post_barrier (enum memmodel model)
> +{
> +  const enum memmodel base_model = memmodel_base (model);
> +
> +  if (is_mm_sync (model)
> +      && (base_model == MEMMODEL_ACQUIRE
> +       || base_model == MEMMODEL_ACQ_REL
> +       || base_model == MEMMODEL_SEQ_CST))
> +    {
> +      emit_insn (gen_mem_thread_fence (GEN_INT (MEMMODEL_SEQ_CST)));
> +    }
> +}
> +
>  /* Split a compare and swap pattern.  */
>  
>  void
> @@ -9128,6 +9145,8 @@ aarch64_split_atomic_op (enum rtx_code code, rtx 
> old_out, rtx new_out, rtx mem,
>  {
>    machine_mode mode = GET_MODE (mem);
>    machine_mode wmode = (mode == DImode ? DImode : SImode);
> +  const enum memmodel model = memmodel_from_int (INTVAL (model_rtx));
> +  const bool is_sync = is_mm_sync (model);
>    rtx_code_label *label;
>    rtx x;
>  
> @@ -9142,7 +9161,13 @@ aarch64_split_atomic_op (enum rtx_code code, rtx 
> old_out, rtx new_out, rtx mem,
>      old_out = new_out;
>    value = simplify_gen_subreg (wmode, value, mode, 0);
>  
> -  aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx);
> +  /* The initial load can be relaxed for a __sync operation since a final
> +     barrier will be emitted to stop code hoisting.  */
> + if (is_sync)
> +    aarch64_emit_load_exclusive (mode, old_out, mem,
> +                              GEN_INT (MEMMODEL_RELAXED));
> +  else
> +    aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx);
>  
>    switch (code)
>      {
> @@ -9178,6 +9203,10 @@ aarch64_split_atomic_op (enum rtx_code code, rtx 
> old_out, rtx new_out, rtx mem,
>    x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
>                           gen_rtx_LABEL_REF (Pmode, label), pc_rtx);
>    aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
> +
> +  /* Emit any final barrier needed for a __sync operation.  */
> +  if (is_sync)
> +    aarch64_emit_post_barrier (model);
>  }
>  
>  static void
> -- 
> 1.9.1
> 

Reply via email to