Wilco Dijkstra <wilco.dijks...@arm.com> writes:
> Allow use of SVE immediates when generating AdvSIMD code and SVE is available.
> First check for a valid AdvSIMD immediate, and if SVE is available, try using
> an SVE move or bitmask immediate.
>
> Passes bootstrap & regress, OK for commit?
>
> gcc/ChangeLog:
>
>         * config/aarch64/aarch64-simd.md (ior<mode>3<vczle><vczbe>):
>         Use aarch64_reg_or_orr_imm predicate.  Combine SVE/AdvSIMD immediates
>         and use aarch64_output_simd_orr_imm.
>         * config/aarch64/aarch64.cc (struct simd_immediate_info): Add SVE_MOV 
> enum.
>         (aarch64_sve_valid_immediate): Use SVE_MOV for SVE move immediates.
>         (aarch64_simd_valid_imm): Enable SVE SIMD immediates when possible.
>         (aarch64_output_simd_imm): Support emitting SVE SIMD immediates. 
>         * config/aarch64/predicates.md (aarch64_orr_imm_sve_advsimd): Remove.
>
> gcc/testsuite/ChangeLog:
>
>         * gcc.target/aarch64/sve/acle/asm/insr_s64.c: Allow SVE MOV imm.
>         * gcc.target/aarch64/sve/acle/asm/insr_u64.c: Likewise.

Previously we allowed a move into a GPR and an INSR from there, but I agree
that we shouldn't continue to allow that now that it isn't used.  It's
better to "defend" the lack of a cross-file transfer.

The patch also has the effect of turning things like:

typedef int v4si __attribute__((vector_size(16)));
v4si f() { return (v4si) { 0xffc, 0xffc, 0xffc, 0xffc }; }

from:

        adrp    x0, .LC0
        ldr     q0, [x0, #:lo12:.LC0]
        ret
        ...
.LC0:
        .word   4092
        .word   4092
        .word   4092
        .word   4092

to:

        mov     z0.s, #4092
        ret

I think we should have some tests for that too, again to the "defend"
the improvement.

OK with a test along those lines (for a few different variations).

Thanks,
Richard

>         * gcc.target/aarch64/sve/fneg-abs_1.c: Update to check for ORRI.
>         * gcc.target/aarch64/sve/fneg-abs_2.c: Likewise.
>
> ---
>
> diff --git a/gcc/config/aarch64/aarch64-simd.md 
> b/gcc/config/aarch64/aarch64-simd.md
> index 
> 6eeb5aa4871eceabb8e46e52bd63f0aa634b9f3d..2e9f30b9bf50eec7a575f4e5037d3350f7ebc95a
>  100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -1135,13 +1135,11 @@ (define_insn "and<mode>3<vczle><vczbe>"
>  (define_insn "ior<mode>3<vczle><vczbe>"
>    [(set (match_operand:VDQ_I 0 "register_operand")
>       (ior:VDQ_I (match_operand:VDQ_I 1 "register_operand")
> -                (match_operand:VDQ_I 2 "aarch64_orr_imm_sve_advsimd")))]
> +                (match_operand:VDQ_I 2 "aarch64_reg_or_orr_imm")))]
>    "TARGET_SIMD"
> -  {@ [ cons: =0 , 1 , 2; attrs: arch ]
> -     [ w        , w , w  ; simd      ] orr\t%0.<Vbtype>, %1.<Vbtype>, 
> %2.<Vbtype>
> -     [ w        , 0 , vsl; sve       ] orr\t%Z0.<Vetype>, %Z0.<Vetype>, #%2
> -     [ w        , 0 , Do ; simd      ] \
> -       << aarch64_output_simd_orr_imm (operands[2], <bitsize>);
> +  {@ [ cons: =0 , 1 , 2  ]
> +     [ w        , w , w  ] orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
> +     [ w        , 0 , Do ] << aarch64_output_simd_orr_imm (operands[2], 
> <bitsize>);
>    }
>    [(set_attr "type" "neon_logic<q>")]
>  )
> diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
> index 
> d38345770ebab15cf872c24b3ec8ab8cc5cce3e7..7c656476c4974529ae71a6d73328a0cd68dd5ef8
>  100644
> --- a/gcc/config/aarch64/aarch64.cc
> +++ b/gcc/config/aarch64/aarch64.cc
> @@ -140,7 +140,7 @@ enum simd_immediate_check {
>  /* Information about a legitimate vector immediate operand.  */
>  struct simd_immediate_info
>  {
> -  enum insn_type { MOV, MVN, INDEX, PTRUE };
> +  enum insn_type { MOV, MVN, INDEX, PTRUE, SVE_MOV };
>    enum modifier_type { LSL, MSL };
>  
>    simd_immediate_info () {}
> @@ -22987,14 +22987,16 @@ aarch64_sve_valid_immediate (unsigned HOST_WIDE_INT 
> val64,
>       {
>         /* DUP with no shift.  */
>         if (info)
> -         *info = simd_immediate_info (mode, val);
> +         *info = simd_immediate_info (mode, val,
> +                                      simd_immediate_info::SVE_MOV);
>         return true;
>       }
>        if ((val & 0xff) == 0 && IN_RANGE (val, -0x8000, 0x7f00))
>       {
>         /* DUP with LSL #8.  */
>         if (info)
> -         *info = simd_immediate_info (mode, val);
> +         *info = simd_immediate_info (mode, val,
> +                                      simd_immediate_info::SVE_MOV);
>         return true;
>       }
>      }
> @@ -23002,7 +23004,7 @@ aarch64_sve_valid_immediate (unsigned HOST_WIDE_INT 
> val64,
>      {
>        /* DUPM.  */
>        if (info)
> -     *info = simd_immediate_info (mode, val);
> +     *info = simd_immediate_info (mode, val, simd_immediate_info::SVE_MOV);
>        return true;
>      }
>    return false;
> @@ -23209,8 +23211,13 @@ aarch64_simd_valid_imm (rtx op, simd_immediate_info 
> *info,
>  
>    if (vec_flags & VEC_SVE_DATA)
>      return aarch64_sve_valid_immediate (val64, info, which);
> -  else
> -    return aarch64_advsimd_valid_immediate (val64, info, which);
> +
> +  if (aarch64_advsimd_valid_immediate (val64, info, which))
> +    return true;
> +
> +  if (TARGET_SVE)
> +    return aarch64_sve_valid_immediate (val64, info, which);
> +  return false;
>  }
>  
>  /* Return true if OP is a valid SIMD move immediate for SVE or AdvSIMD.  */
> @@ -25391,6 +25398,14 @@ aarch64_output_simd_imm (rtx const_vector, unsigned 
> width,
>         return templ;
>       }
>  
> +      if (info.insn == simd_immediate_info::SVE_MOV)
> +     {
> +       gcc_assert (TARGET_SVE);
> +       snprintf (templ, sizeof (templ), "mov\t%%Z0.%c, #" 
> HOST_WIDE_INT_PRINT_DEC,
> +                 element_char, INTVAL (info.u.mov.value));
> +       return templ;
> +     }
> +
>        mnemonic = info.insn == simd_immediate_info::MVN ? "mvni" : "movi";
>        shift_op = (info.u.mov.modifier == simd_immediate_info::MSL
>                 ? "msl" : "lsl");
> @@ -25410,8 +25425,18 @@ aarch64_output_simd_imm (rtx const_vector, unsigned 
> width,
>    else
>      {
>        /* AARCH64_CHECK_ORR or AARCH64_CHECK_AND.  */
> -      mnemonic = info.insn == simd_immediate_info::MVN ? "bic" : "orr";
> -      if (info.u.mov.shift)
> +      mnemonic = "orr";
> +      if (which == AARCH64_CHECK_AND)
> +     mnemonic = info.insn == simd_immediate_info::MVN ? "bic" : "and";
> +
> +      if (info.insn == simd_immediate_info::SVE_MOV)
> +     {
> +       gcc_assert (TARGET_SVE);
> +       snprintf (templ, sizeof (templ), "%s\t%%Z0.%c, %%Z0.%c, "
> +                 HOST_WIDE_INT_PRINT_DEC, mnemonic, element_char,
> +                 element_char, INTVAL (info.u.mov.value));
> +     }
> +      else if (info.u.mov.shift)
>       snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, #"
>                 HOST_WIDE_INT_PRINT_DEC ", %s #%d", mnemonic, lane_count,
>                 element_char, UINTVAL (info.u.mov.value), "lsl",
> diff --git a/gcc/config/aarch64/predicates.md 
> b/gcc/config/aarch64/predicates.md
> index 
> 0a171387b1a73b85db0ae2ccbc788a3d7f28a082..2c18af94b8eca7a7985a238a4de8c5d0b3766acb
>  100644
> --- a/gcc/config/aarch64/predicates.md
> +++ b/gcc/config/aarch64/predicates.md
> @@ -943,11 +943,6 @@ (define_predicate "aarch64_sve_logical_operand"
>    (ior (match_operand 0 "register_operand")
>         (match_operand 0 "aarch64_sve_logical_immediate")))
>  
> -(define_predicate "aarch64_orr_imm_sve_advsimd"
> -  (ior (match_operand 0 "aarch64_reg_or_orr_imm")
> -       (and (match_test "TARGET_SVE")
> -         (match_operand 0 "aarch64_sve_logical_operand"))))
> -
>  (define_predicate "aarch64_sve_gather_offset_b"
>    (ior (match_operand 0 "register_operand")
>         (match_operand 0 "aarch64_sve_gather_immediate_b")))
> diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_s64.c 
> b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_s64.c
> index 
> 32cdc8263d194729e4a89023c7602c7e3b80d022..6f36f32415ac92c2638c317844d8e62ecda7e484
>  100644
> --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_s64.c
> +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_s64.c
> @@ -43,8 +43,8 @@ TEST_UNIFORM_Z (insr_0_s64_untied, svint64_t,
>  /*
>  ** insr_1_s64:
>  ** (
> -**   mov     (x[0-9]+), #?1
> -**   insr    z0\.d, \1
> +**   mov     z([0-9]+)\.d, #?1
> +**   insr    z0\.d, d\1
>  ** |
>  **   movi    v([0-9]+)\.2d, 0x1
>  **   insr    z0\.d, d\2
> diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_u64.c 
> b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_u64.c
> index 
> ab23f677d4fc93487affc2c9095e38df36371a4b..f92059a97f576f9d4e8a03cbfcac0985c1baa489
>  100644
> --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_u64.c
> +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/insr_u64.c
> @@ -43,8 +43,8 @@ TEST_UNIFORM_Z (insr_0_u64_untied, svuint64_t,
>  /*
>  ** insr_1_u64:
>  ** (
> -**   mov     (x[0-9]+), #?1
> -**   insr    z0\.d, \1
> +**   mov     z([0-9]+)\.d, #?1
> +**   insr    z0\.d, d\1
>  ** |
>  **   movi    v([0-9]+)\.2d, 0x1
>  **   insr    z0\.d, d\2
> diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_1.c 
> b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_1.c
> index 
> a8b27199ff83d0eebadfc7dcf03f94e1229d76b8..03560008fda16b1d7c62fe2daaed8cad98127827
>  100644
> --- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_1.c
> +++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_1.c
> @@ -6,7 +6,7 @@
>  
>  /*
>  ** t1:
> -**   orr     z[0-9]+.s, z[0-9]+.s, #-2147483648
> +**   orr     v0.2s, #?128, lsl #?24
>  **   ret
>  */
>  float32x2_t t1 (float32x2_t a)
> @@ -16,7 +16,7 @@ float32x2_t t1 (float32x2_t a)
>  
>  /*
>  ** t2:
> -**   orr     z[0-9]+.s, z[0-9]+.s, #-2147483648
> +**   orr     v0.4s, #?128, lsl #?24
>  **   ret
>  */
>  float32x4_t t2 (float32x4_t a)
> @@ -26,7 +26,7 @@ float32x4_t t2 (float32x4_t a)
>  
>  /*
>  ** t3:
> -**   orr     z[0-9]+.d, z[0-9]+.d, #-9223372036854775808
> +**   orr     z[0-9]+.d, z[0-9]+.d, #?-9223372036854775808
>  **   ret
>  */
>  float64x2_t t3 (float64x2_t a)
> diff --git a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c 
> b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
> index 
> 19a7695e605bc8aced486a9c450d1cdc6be4691a..fe08fe31fe87aab4a7ce8497d05488a42fe9ae21
>  100644
> --- a/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
> +++ b/gcc/testsuite/gcc.target/aarch64/sve/fneg-abs_2.c
> @@ -7,7 +7,7 @@
>  
>  /*
>  ** f1:
> -**   orr     z0.s, z0.s, #-2147483648
> +**   orr     v0.2s, #?128, lsl #?24
>  **   ret
>  */
>  float32_t f1 (float32_t a)
> @@ -17,7 +17,7 @@ float32_t f1 (float32_t a)
>  
>  /*
>  ** f2:
> -**   orr     z0.d, z0.d, #-9223372036854775808
> +**   orr     z0.d, z0.d, #?-9223372036854775808
>  **   ret
>  */
>  float64_t f2 (float64_t a)

Reply via email to