Wilco Dijkstra <wilco.dijks...@arm.com> writes:
> Cleanup the various interfaces related to SIMD immediate generation.  
> Introduce new functions
> that make it clear which operation (AND, OR, MOV) we are testing for rather 
> than guessing the
> final instruction.  Reduce the use of overly long names, unused and default 
> parameters for
> clarity.  No changes to internals or generated code.
>
> Passes regress & bootstrap, OK for commit?

Nice cleanup!  OK with the obvious fix-ups after Tamar's patch.

Thanks,
Richard

>
> gcc/ChangeLog:
>
>         * config/aarch64/aarch64-protos.h (enum simd_immediate_check): Move 
> to aarch64.cc.
>         (aarch64_output_simd_mov_immediate): Remove.
>         (aarch64_output_simd_mov_imm): New prototype.
>         (aarch64_output_simd_orr_imm): Likewise.
>         (aarch64_output_simd_and_imm): Likewise.
>         (aarch64_simd_valid_immediate): Remove.
>         (aarch64_simd_valid_and_imm): New prototype.
>         (aarch64_simd_valid_mov_imm): Likewise.
>         (aarch64_simd_valid_orr_imm): Likewise.
>         * config/aarch64/aarch64-simd.md: Use aarch64_output_simd_mov_imm.
>         * config/aarch64/aarch64.cc (enum simd_immediate_check): Moved from 
> aarch64-protos.h.
>         Use AARCH64_CHECK_AND rather than AARCH64_CHECk_BIC.
>         (aarch64_expand_sve_const_vector): Use aarch64_simd_valid_mov_imm.
>         (aarch64_expand_mov_immediate): Likewise.
>         (aarch64_can_const_movi_rtx_p): Likewise.
>         (aarch64_secondary_reload): Likewise.
>         (aarch64_legitimate_constant_p): Likewise.
>         (aarch64_advsimd_valid_immediate): Simplify checks on 'which' param.
>         (aarch64_sve_valid_immediate): Add extra param for move vs logical.
>         (aarch64_simd_valid_immediate): Rename to aarch64_simd_valid_imm.
>         (aarch64_simd_valid_mov_imm): New function.
>         (aarch64_simd_valid_orr_imm): Likewise.
>         (aarch64_simd_valid_and_imm): Likewise.
>         (aarch64_mov_operand_p): Use aarch64_simd_valid_mov_imm.
>         (aarch64_simd_scalar_immediate_valid_for_move): Likewise.
>         (aarch64_simd_make_constant): Likewise.
>         (aarch64_expand_vector_init_fallback): Likewise.
>         (aarch64_output_simd_mov_immediate): Rename to 
> aarch64_output_simd_imm.
>         (aarch64_output_simd_orr_imm): New function.
>         (aarch64_output_simd_and_imm): Likewise.
>         (aarch64_output_simd_mov_imm): Likewise.
>         (aarch64_output_scalar_simd_mov_immediate): Use 
> aarch64_output_simd_mov_imm.
>         (aarch64_output_sve_mov_immediate): Use aarch64_simd_valid_imm.
>         (aarch64_output_sve_ptrues): Likewise.
>         * config/aarch64/constraints.md (Do): Use aarch64_simd_valid_orr_imm.
>         (Db): Use aarch64_simd_valid_and_imm.
>         * config/aarch64/predicates.md (aarch64_reg_or_bic_imm): Use 
> aarch64_simd_valid_orr_imm.
>         (aarch64_reg_or_and_imm): Use aarch64_simd_valid_and_imm.
>
> ---
>
> diff --git a/gcc/config/aarch64/aarch64-protos.h 
> b/gcc/config/aarch64/aarch64-protos.h
> index 
> d03c1fe798b2ccc2258b8581473a6eb7dc4af850..e789ca9358341363b976988f01d7c7c7aa88cfe4
>  100644
> --- a/gcc/config/aarch64/aarch64-protos.h
> +++ b/gcc/config/aarch64/aarch64-protos.h
> @@ -665,16 +665,6 @@ enum aarch64_extra_tuning_flags
>    AARCH64_EXTRA_TUNE_ALL = (1u << AARCH64_EXTRA_TUNE_index_END) - 1
>  };
>  
> -/* Enum to distinguish which type of check is to be done in
> -   aarch64_simd_valid_immediate.  This is used as a bitmask where
> -   AARCH64_CHECK_MOV has both bits set.  Thus AARCH64_CHECK_MOV will
> -   perform all checks.  Adding new types would require changes accordingly.  
> */
> -enum simd_immediate_check {
> -  AARCH64_CHECK_ORR  = 1 << 0,
> -  AARCH64_CHECK_BIC  = 1 << 1,
> -  AARCH64_CHECK_MOV  = AARCH64_CHECK_ORR | AARCH64_CHECK_BIC
> -};
> -
>  extern struct tune_params aarch64_tune_params;
>  
>  /* The available SVE predicate patterns, known in the ACLE as "svpattern".  
> */
> @@ -834,8 +824,10 @@ char *aarch64_output_sve_rdvl (rtx);
>  char *aarch64_output_sve_addvl_addpl (rtx);
>  char *aarch64_output_sve_vector_inc_dec (const char *, rtx);
>  char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
> -char *aarch64_output_simd_mov_immediate (rtx, unsigned,
> -                     enum simd_immediate_check w = AARCH64_CHECK_MOV);
> +char *aarch64_output_simd_mov_imm (rtx, unsigned);
> +char *aarch64_output_simd_orr_imm (rtx, unsigned);
> +char *aarch64_output_simd_and_imm (rtx, unsigned);
> +
>  char *aarch64_output_sve_mov_immediate (rtx);
>  char *aarch64_output_sve_ptrues (rtx);
>  bool aarch64_pad_reg_upward (machine_mode, const_tree, bool);
> @@ -849,8 +841,9 @@ bool aarch64_pars_overlap_p (rtx, rtx);
>  bool aarch64_simd_scalar_immediate_valid_for_move (rtx, scalar_int_mode);
>  bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool);
>  bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
> -bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
> -                     enum simd_immediate_check w = AARCH64_CHECK_MOV);
> +bool aarch64_simd_valid_and_imm (rtx);
> +bool aarch64_simd_valid_mov_imm (rtx);
> +bool aarch64_simd_valid_orr_imm (rtx);
>  bool aarch64_valid_sysreg_name_p (const char *);
>  const char *aarch64_retrieve_sysreg (const char *, bool, bool);
>  rtx aarch64_check_zero_based_sve_index_immediate (rtx);
> diff --git a/gcc/config/aarch64/aarch64-simd.md 
> b/gcc/config/aarch64/aarch64-simd.md
> index 
> 11d405ed640f7937f985c4bae43ecd634a096604..6eeb5aa4871eceabb8e46e52bd63f0aa634b9f3d
>  100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -160,7 +160,7 @@ (define_insn_and_split "*aarch64_simd_mov<VDMOV:mode>"
>       [?r, w ; neon_to_gp<q>      , *        , *] fmov\t%x0, %d1
>       [?w, r ; f_mcr              , *        , *] fmov\t%d0, %1
>       [?r, r ; mov_reg            , *        , *] mov\t%0, %1
> -     [w , Dn; neon_move<q>       , simd     , *] << 
> aarch64_output_simd_mov_immediate (operands[1], 64);
> +     [w , Dn; neon_move<q>       , simd     , *] << 
> aarch64_output_simd_mov_imm (operands[1], 64);
>       [w , Dz; f_mcr              , *        , *] fmov\t%d0, xzr
>       [w , Dx; neon_move          , simd     , 8] #
>    }
> @@ -189,7 +189,7 @@ (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
>       [?r , w ; multiple           , *   , 8] #
>       [?w , r ; multiple           , *   , 8] #
>       [?r , r ; multiple           , *   , 8] #
> -     [w  , Dn; neon_move<q>       , simd, 4] << 
> aarch64_output_simd_mov_immediate (operands[1], 128);
> +     [w  , Dn; neon_move<q>       , simd, 4] << aarch64_output_simd_mov_imm 
> (operands[1], 128);
>       [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
>       [w  , Dx; neon_move          , simd, 8] #
>    }
> @@ -1122,11 +1122,11 @@ (define_insn "fabd<mode>3<vczle><vczbe>"
>  (define_insn "and<mode>3<vczle><vczbe>"
>    [(set (match_operand:VDQ_I 0 "register_operand")
>       (and:VDQ_I (match_operand:VDQ_I 1 "register_operand")
> -                (match_operand:VDQ_I 2 "aarch64_reg_or_bic_imm")))]
> +                (match_operand:VDQ_I 2 "aarch64_reg_or_and_imm")))]
>    "TARGET_SIMD"
>    {@ [ cons: =0 , 1 , 2   ]
>       [ w        , w , w   ] and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
> -     [ w        , 0 , Db  ] << aarch64_output_simd_mov_immediate 
> (operands[2], <bitsize>, AARCH64_CHECK_BIC);
> +     [ w        , 0 , Db  ] << aarch64_output_simd_and_imm (operands[2], 
> <bitsize>);
>    }
>    [(set_attr "type" "neon_logic<q>")]
>  )
> @@ -1141,8 +1141,7 @@ (define_insn "ior<mode>3<vczle><vczbe>"
>       [ w        , w , w  ; simd      ] orr\t%0.<Vbtype>, %1.<Vbtype>, 
> %2.<Vbtype>
>       [ w        , 0 , vsl; sve       ] orr\t%Z0.<Vetype>, %Z0.<Vetype>, #%2
>       [ w        , 0 , Do ; simd      ] \
> -       << aarch64_output_simd_mov_immediate (operands[2], <bitsize>, \
> -                                          AARCH64_CHECK_ORR);
> +       << aarch64_output_simd_orr_imm (operands[2], <bitsize>);
>    }
>    [(set_attr "type" "neon_logic<q>")]
>  )
> diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
> index 
> 102680a0efca1ce928e6945033c01cfb68a65152..d38345770ebab15cf872c24b3ec8ab8cc5cce3e7
>  100644
> --- a/gcc/config/aarch64/aarch64.cc
> +++ b/gcc/config/aarch64/aarch64.cc
> @@ -129,6 +129,14 @@ constexpr auto AARCH64_STATE_SHARED = 1U << 0;
>  constexpr auto AARCH64_STATE_IN = 1U << 1;
>  constexpr auto AARCH64_STATE_OUT = 1U << 2;
>  
> +/* Enum to distinguish which type of check is to be done in
> +   aarch64_simd_valid_imm.  */
> +enum simd_immediate_check {
> +  AARCH64_CHECK_MOV,
> +  AARCH64_CHECK_ORR,
> +  AARCH64_CHECK_AND
> +};
> +
>  /* Information about a legitimate vector immediate operand.  */
>  struct simd_immediate_info
>  {
> @@ -5657,7 +5665,7 @@ aarch64_expand_sve_const_vector (rtx target, rtx src)
>         builder.quick_push (CONST_VECTOR_ENCODED_ELT (src, srci));
>       }
>        rtx vq_src = builder.build ();
> -      if (aarch64_simd_valid_immediate (vq_src, NULL))
> +      if (aarch64_simd_valid_mov_imm (vq_src))
>       {
>         vq_src = force_reg (vq_mode, vq_src);
>         return aarch64_expand_sve_dupq (target, mode, vq_src);
> @@ -6169,8 +6177,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
>           }
>       }
>  
> -      if (GET_CODE (imm) == HIGH
> -       || aarch64_simd_valid_immediate (imm, NULL))
> +      if (GET_CODE (imm) == HIGH || aarch64_simd_valid_mov_imm (imm))
>       {
>         emit_insn (gen_rtx_SET (dest, imm));
>         return;
> @@ -11112,7 +11119,7 @@ aarch64_can_const_movi_rtx_p (rtx x, machine_mode 
> mode)
>    vmode = aarch64_simd_container_mode (imode, width);
>    rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, ival);
>  
> -  return aarch64_simd_valid_immediate (v_op, NULL);
> +  return aarch64_simd_valid_mov_imm (v_op);
>  }
>  
>  
> @@ -12893,7 +12900,7 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, 
> rtx x,
>    unsigned int vec_flags = aarch64_classify_vector_mode (mode);
>    if (reg_class_subset_p (rclass, FP_REGS)
>        && !((REG_P (x) && HARD_REGISTER_P (x))
> -        || aarch64_simd_valid_immediate (x, NULL))
> +        || aarch64_simd_valid_mov_imm (x))
>        && mode != VNx16QImode
>        && (vec_flags & VEC_SVE_DATA)
>        && ((vec_flags & VEC_PARTIAL) || BYTES_BIG_ENDIAN))
> @@ -15479,7 +15486,7 @@ cost_plus:
>      case CONST_VECTOR:
>       {
>         /* Load using MOVI/MVNI.  */
> -       if (aarch64_simd_valid_immediate (x, NULL))
> +       if (aarch64_simd_valid_mov_imm (x))
>           *cost = extra_cost->vect.movi;
>         else /* Load using constant pool.  */
>           *cost = extra_cost->ldst.load;
> @@ -21155,7 +21162,7 @@ aarch64_legitimate_constant_p (machine_mode mode, rtx 
> x)
>       ??? It would be possible (but complex) to handle rematerialization
>       of other constants via secondary reloads.  */
>    if (!GET_MODE_SIZE (mode).is_constant ())
> -    return aarch64_simd_valid_immediate (x, NULL);
> +    return aarch64_simd_valid_mov_imm (x);
>  
>    /* Otherwise, accept any CONST_VECTOR that, if all else fails, can at
>       least be forced to memory and loaded from there.  */
> @@ -22909,12 +22916,12 @@ aarch64_advsimd_valid_immediate (unsigned 
> HOST_WIDE_INT val64,
>  
>    if (val32 == (val64 >> 32))
>      {
> -      if ((which & AARCH64_CHECK_ORR) != 0
> +      if ((which == AARCH64_CHECK_MOV || which == AARCH64_CHECK_ORR)
>         && aarch64_advsimd_valid_immediate_hs (val32, info, which,
>                                                simd_immediate_info::MOV))
>       return true;
>  
> -      if ((which & AARCH64_CHECK_BIC) != 0
> +      if ((which == AARCH64_CHECK_MOV || which == AARCH64_CHECK_AND)
>         && aarch64_advsimd_valid_immediate_hs (~val32, info, which,
>                                                simd_immediate_info::MVN))
>       return true;
> @@ -22955,7 +22962,8 @@ aarch64_advsimd_valid_immediate (unsigned 
> HOST_WIDE_INT val64,
>  
>  static bool
>  aarch64_sve_valid_immediate (unsigned HOST_WIDE_INT val64,
> -                          simd_immediate_info *info)
> +                          simd_immediate_info *info,
> +                          enum simd_immediate_check which)
>  {
>    scalar_int_mode mode = DImode;
>    unsigned int val32 = val64 & 0xffffffff;
> @@ -22972,19 +22980,23 @@ aarch64_sve_valid_immediate (unsigned HOST_WIDE_INT 
> val64,
>       }
>      }
>    HOST_WIDE_INT val = trunc_int_for_mode (val64, mode);
> -  if (IN_RANGE (val, -0x80, 0x7f))
> -    {
> -      /* DUP with no shift.  */
> -      if (info)
> -     *info = simd_immediate_info (mode, val);
> -      return true;
> -    }
> -  if ((val & 0xff) == 0 && IN_RANGE (val, -0x8000, 0x7f00))
> +
> +  if (which == AARCH64_CHECK_MOV)
>      {
> -      /* DUP with LSL #8.  */
> -      if (info)
> -     *info = simd_immediate_info (mode, val);
> -      return true;
> +      if (IN_RANGE (val, -0x80, 0x7f))
> +     {
> +       /* DUP with no shift.  */
> +       if (info)
> +         *info = simd_immediate_info (mode, val);
> +       return true;
> +     }
> +      if ((val & 0xff) == 0 && IN_RANGE (val, -0x8000, 0x7f00))
> +     {
> +       /* DUP with LSL #8.  */
> +       if (info)
> +         *info = simd_immediate_info (mode, val);
> +       return true;
> +     }
>      }
>    if (aarch64_bitmask_imm (val64, mode))
>      {
> @@ -23070,9 +23082,9 @@ aarch64_sve_pred_valid_immediate (rtx x, 
> simd_immediate_info *info)
>  /* Return true if OP is a valid SIMD immediate for the operation
>     described by WHICH.  If INFO is nonnull, use it to describe valid
>     immediates.  */
> -bool
> -aarch64_simd_valid_immediate (rtx op, simd_immediate_info *info,
> -                           enum simd_immediate_check which)
> +static bool
> +aarch64_simd_valid_imm (rtx op, simd_immediate_info *info,
> +                     enum simd_immediate_check which)
>  {
>    machine_mode mode = GET_MODE (op);
>    unsigned int vec_flags = aarch64_classify_vector_mode (mode);
> @@ -23196,11 +23208,32 @@ aarch64_simd_valid_immediate (rtx op, 
> simd_immediate_info *info,
>             << (i * BITS_PER_UNIT));
>  
>    if (vec_flags & VEC_SVE_DATA)
> -    return aarch64_sve_valid_immediate (val64, info);
> +    return aarch64_sve_valid_immediate (val64, info, which);
>    else
>      return aarch64_advsimd_valid_immediate (val64, info, which);
>  }
>  
> +/* Return true if OP is a valid SIMD move immediate for SVE or AdvSIMD.  */
> +bool
> +aarch64_simd_valid_mov_imm (rtx op)
> +{
> +  return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_MOV);
> +}
> +
> +/* Return true if OP is a valid SIMD orr immediate for SVE or AdvSIMD.  */
> +bool
> +aarch64_simd_valid_orr_imm (rtx op)
> +{
> +  return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_ORR);
> +}
> +
> +/* Return true if OP is a valid SIMD and immediate for SVE or AdvSIMD.  */
> +bool
> +aarch64_simd_valid_and_imm (rtx op)
> +{
> +  return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_AND);
> +}
> +
>  /* Check whether X is a VEC_SERIES-like constant that starts at 0 and
>     has a step in the range of INDEX.  Return the index expression if so,
>     otherwise return null.  */
> @@ -23264,7 +23297,7 @@ aarch64_mov_operand_p (rtx x, machine_mode mode)
>         && GET_MODE (x) != VNx16BImode)
>       return false;
>  
> -      return aarch64_simd_valid_immediate (x, NULL);
> +      return aarch64_simd_valid_mov_imm (x);
>      }
>  
>    /* Remove UNSPEC_SALT_ADDR before checking symbol reference.  */
> @@ -23365,7 +23398,7 @@ aarch64_simd_scalar_immediate_valid_for_move (rtx op, 
> scalar_int_mode mode)
>  
>    vmode = aarch64_simd_container_mode (mode, 64);
>    rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op));
> -  return aarch64_simd_valid_immediate (op_v, NULL);
> +  return aarch64_simd_valid_mov_imm (op_v);
>  }
>  
>  /* Construct and return a PARALLEL RTX vector with elements numbering the
> @@ -23845,7 +23878,7 @@ aarch64_simd_make_constant (rtx vals)
>      gcc_unreachable ();
>  
>    if (const_vec != NULL_RTX
> -      && aarch64_simd_valid_immediate (const_vec, NULL))
> +      && aarch64_simd_valid_mov_imm (const_vec))
>      /* Load using MOVI/MVNI.  */
>      return const_vec;
>    else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX)
> @@ -24050,7 +24083,7 @@ aarch64_expand_vector_init_fallback (rtx target, rtx 
> vals)
>        /* Load constant part of vector.  We really don't care what goes into 
> the
>        parts we will overwrite, but we're more likely to be able to load the
>        constant efficiently if it has fewer, larger, repeating parts
> -      (see aarch64_simd_valid_immediate).  */
> +      (see aarch64_simd_valid_imm).  */
>        for (int i = 0; i < n_elts; i++)
>       {
>         rtx x = XVECEXP (vals, 0, i);
> @@ -25298,12 +25331,11 @@ aarch64_float_const_representable_p (rtx x)
>    return (exponent >= 0 && exponent <= 7);
>  }
>  
> -/* Returns the string with the instruction for AdvSIMD MOVI, MVNI, ORR or BIC
> -   immediate with a CONST_VECTOR of MODE and WIDTH.  WHICH selects whether to
> -   output MOVI/MVNI, ORR or BIC immediate.  */
> +/* Returns the string with the instruction for the SIMD immediate
> +   CONST_VECTOR of WIDTH bits.  WHICH selects a move, and(bic) or orr.  */
>  char*
> -aarch64_output_simd_mov_immediate (rtx const_vector, unsigned width,
> -                                enum simd_immediate_check which)
> +aarch64_output_simd_imm (rtx const_vector, unsigned width,
> +                      enum simd_immediate_check which)
>  {
>    bool is_valid;
>    static char templ[40];
> @@ -25314,11 +25346,7 @@ aarch64_output_simd_mov_immediate (rtx const_vector, 
> unsigned width,
>  
>    struct simd_immediate_info info;
>  
> -  /* This will return true to show const_vector is legal for use as either
> -     a AdvSIMD MOVI instruction (or, implicitly, MVNI), ORR or BIC immediate.
> -     It will also update INFO to show how the immediate should be generated.
> -     WHICH selects whether to check for MOVI/MVNI, ORR or BIC.  */
> -  is_valid = aarch64_simd_valid_immediate (const_vector, &info, which);
> +  is_valid = aarch64_simd_valid_imm (const_vector, &info, which);
>    gcc_assert (is_valid);
>  
>    element_char = sizetochar (GET_MODE_BITSIZE (info.elt_mode));
> @@ -25381,7 +25409,7 @@ aarch64_output_simd_mov_immediate (rtx const_vector, 
> unsigned width,
>      }
>    else
>      {
> -      /* For AARCH64_CHECK_BIC and AARCH64_CHECK_ORR.  */
> +      /* AARCH64_CHECK_ORR or AARCH64_CHECK_AND.  */
>        mnemonic = info.insn == simd_immediate_info::MVN ? "bic" : "orr";
>        if (info.u.mov.shift)
>       snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, #"
> @@ -25396,6 +25424,30 @@ aarch64_output_simd_mov_immediate (rtx const_vector, 
> unsigned width,
>    return templ;
>  }
>  
> +/* Returns the string with the ORR instruction for the SIMD immediate
> +   CONST_VECTOR of WIDTH bits.  */
> +char*
> +aarch64_output_simd_orr_imm (rtx const_vector, unsigned width)
> +{
> +  return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_ORR);
> +}
> +
> +/* Returns the string with the AND/BIC instruction for the SIMD immediate
> +   CONST_VECTOR of WIDTH bits.  */
> +char*
> +aarch64_output_simd_and_imm (rtx const_vector, unsigned width)
> +{
> +  return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_AND);
> +}
> +
> +/* Returns the string with the MOV instruction for the SIMD immediate
> +   CONST_VECTOR of WIDTH bits.  */
> +char*
> +aarch64_output_simd_mov_imm (rtx const_vector, unsigned width)
> +{
> +  return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_MOV);
> +}
> +
>  char*
>  aarch64_output_scalar_simd_mov_immediate (rtx immediate, scalar_int_mode 
> mode)
>  {
> @@ -25417,7 +25469,7 @@ aarch64_output_scalar_simd_mov_immediate (rtx 
> immediate, scalar_int_mode mode)
>  
>    vmode = aarch64_simd_container_mode (mode, width);
>    rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate));
> -  return aarch64_output_simd_mov_immediate (v_op, width);
> +  return aarch64_output_simd_mov_imm (v_op, width);
>  }
>  
>  /* Return the output string to use for moving immediate CONST_VECTOR
> @@ -25429,8 +25481,9 @@ aarch64_output_sve_mov_immediate (rtx const_vector)
>    static char templ[40];
>    struct simd_immediate_info info;
>    char element_char;
> +  bool is_valid;
>  
> -  bool is_valid = aarch64_simd_valid_immediate (const_vector, &info);
> +  is_valid = aarch64_simd_valid_imm (const_vector, &info, AARCH64_CHECK_MOV);
>    gcc_assert (is_valid);
>  
>    element_char = sizetochar (GET_MODE_BITSIZE (info.elt_mode));
> @@ -25499,9 +25552,10 @@ char *
>  aarch64_output_sve_ptrues (rtx const_unspec)
>  {
>    static char templ[40];
> -
>    struct simd_immediate_info info;
> -  bool is_valid = aarch64_simd_valid_immediate (const_unspec, &info);
> +  bool is_valid;
> +
> +  is_valid = aarch64_simd_valid_imm (const_unspec, &info, AARCH64_CHECK_MOV);
>    gcc_assert (is_valid && info.insn == simd_immediate_info::PTRUE);
>  
>    char element_char = sizetochar (GET_MODE_BITSIZE (info.elt_mode));
> diff --git a/gcc/config/aarch64/constraints.md 
> b/gcc/config/aarch64/constraints.md
> index 
> f491e4bd6a069bc9f8a3c71bdec14496c862a94d..3f9fd92a1911e0b18a163dc6c4c2c97c871458e0
>  100644
> --- a/gcc/config/aarch64/constraints.md
> +++ b/gcc/config/aarch64/constraints.md
> @@ -464,21 +464,19 @@ (define_constraint "Do"
>    "@internal
>     A constraint that matches vector of immediates for orr."
>   (and (match_code "const_vector")
> -      (match_test "aarch64_simd_valid_immediate (op, NULL,
> -                                              AARCH64_CHECK_ORR)")))
> +      (match_test "aarch64_simd_valid_orr_imm (op)")))
>  
>  (define_constraint "Db"
>    "@internal
> -   A constraint that matches vector of immediates for bic."
> +   A constraint that matches vector of immediates for and/bic."
>   (and (match_code "const_vector")
> -      (match_test "aarch64_simd_valid_immediate (op, NULL,
> -                                              AARCH64_CHECK_BIC)")))
> +      (match_test "aarch64_simd_valid_and_imm (op)")))
>  
>  (define_constraint "Dn"
>    "@internal
>   A constraint that matches vector of immediates."
>   (and (match_code "const,const_vector")
> -      (match_test "aarch64_simd_valid_immediate (op, NULL)")))
> +      (match_test "aarch64_simd_valid_mov_imm (op)")))
>  
>  (define_constraint "Dh"
>    "@internal
> diff --git a/gcc/config/aarch64/predicates.md 
> b/gcc/config/aarch64/predicates.md
> index 
> 8f3aab2272c62d5dcc06dfd14fd00067e4db6b8e..0a171387b1a73b85db0ae2ccbc788a3d7f28a082
>  100644
> --- a/gcc/config/aarch64/predicates.md
> +++ b/gcc/config/aarch64/predicates.md
> @@ -118,14 +118,12 @@ (define_predicate "aarch64_reg_zero_or_m1_or_1"
>  (define_predicate "aarch64_reg_or_orr_imm"
>     (ior (match_operand 0 "register_operand")
>       (and (match_code "const_vector")
> -          (match_test "aarch64_simd_valid_immediate (op, NULL,
> -                                                     AARCH64_CHECK_ORR)"))))
> +          (match_test "aarch64_simd_valid_orr_imm (op)"))))
>  
> -(define_predicate "aarch64_reg_or_bic_imm"
> +(define_predicate "aarch64_reg_or_and_imm"
>     (ior (match_operand 0 "register_operand")
>       (and (match_code "const_vector")
> -          (match_test "aarch64_simd_valid_immediate (op, NULL,
> -                                                     AARCH64_CHECK_BIC)"))))
> +          (match_test "aarch64_simd_valid_and_imm (op)"))))
>  
>  (define_predicate "aarch64_fp_compare_operand"
>    (ior (match_operand 0 "register_operand")

Reply via email to