Claudio Bantaloukas <claudio.bantalou...@arm.com> writes:
> This patch adds the following intrinsics:
> - svcvt1_bf16[_mf8]_fpm
> - svcvt1_f16[_mf8]_fpm
> - svcvt2_bf16[_mf8]_fpm
> - svcvt2_f16[_mf8]_fpm
> - svcvtlt1_bf16[_mf8]_fpm
> - svcvtlt1_f16[_mf8]_fpm
> - svcvtlt2_bf16[_mf8]_fpm
> - svcvtlt2_f16[_mf8]_fpm
> - svcvtn_mf8[_f16_x2]_fpm (unpredicated)
> - svcvtnb_mf8[_f32_x2]_fpm
> - svcvtnt_mf8[_f32_x2]_fpm
>
> The underlying instructions are only available when SVE2 is enabled and the PE
> is not in streaming SVE mode. They are also available when SME2 is enabled and
> the PE is in streaming SVE mode.
>
> gcc/
>       * config/aarch64/aarch64-sve-builtins-shapes.cc
>       (parse_signature): Add an fpm_t (uint64_t) argument to functions that
>       set the fpm register.
>       (unary_convert_narrowxn_fpm_def): New class.
>       (unary_convert_narrowxn_fpm): New shape.
>       (unary_convertxn_fpm_def): New class.
>       (unary_convertxn_fpm): New shape.
>       * config/aarch64/aarch64-sve-builtins-shapes.h
>       (unary_convert_narrowxn_fpm): Declare.
>       (unary_convertxn_fpm): Likewise.
>       * config/aarch64/aarch64-sve-builtins-sve2.cc
>       (svcvt_fp8_impl): New class.
>       (svcvtn_impl): Handle fp8 cases.
>       (svcvt1, svcvt2, svcvtlt1, svcvtlt2): Add new FUNCTION.
>       (svcvtnb): Likewise.
>       * config/aarch64/aarch64-sve-builtins-sve2.def
>       (svcvt1, svcvt2, svcvtlt1, svcvtlt2): Add new DEF_SVE_FUNCTION_GS.
>       (svcvtn): Likewise.
>       (svcvtnb, svcvtnt): Likewise.
>       * config/aarch64/aarch64-sve-builtins-sve2.h
>       (svcvt1, svcvt2, svcvtlt1, svcvtlt2, svcvtnb, svcvtnt): Declare.
>       * config/aarch64/aarch64-sve-builtins.cc
>       (TYPES_cvt_mf8, TYPES_cvtn_mf8, TYPES_cvtnx_mf8): Add new types arrays.
>       (function_builder::get_name): Append _fpm to functions that set fpmr.
>       (function_resolver::check_gp_argument): Deal with the fpm_t argument.
>       (function_expander::use_exact_insn): Set the fpm register before
>       calling the insn if the function warrants it.
>       * config/aarch64/aarch64-sve2.md (@aarch64_sve2_fp8_cvt): Add new.
>       (@aarch64_sve2_fp8_cvtn): Likewise.
>       (@aarch64_sve2_fp8_cvtnb): Likewise.
>       (@aarch64_sve_cvtnt): Likewise.
>       * config/aarch64/aarch64.h (TARGET_SSVE_FP8): Add new.
>       * config/aarch64/iterators.md
>       (VNx8SF_ONLY, SVE_FULL_HFx2): New mode iterators.
>       (UNSPEC_F1CVT, UNSPEC_F1CVTLT, UNSPEC_F2CVT, UNSPEC_F2CVTLT): Add new.
>       (UNSPEC_FCVTNB, UNSPEC_FCVTNT): Likewise.
>       (UNSPEC_FP8FCVTN): Likewise.
>       (FP8CVT_UNS, fp8_cvt_uns_op): Likewise.
>
> gcc/testsuite/
>
>       * gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
>       (TEST_DUAL_Z): Add fpm0 argument
>       * gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowxn_fpm_1.c:
>       Add new tests.
>       * gcc.target/aarch64/sve/acle/general-c/unary_convertxn_fpm_1.c:
>       Likewise.
>       * gcc.target/aarch64/sve2/acle/asm/cvt_mf8.c: Likewise.
>       * gcc.target/aarch64/sve2/acle/asm/cvtlt_mf8.c: Likewise.
>       * gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c: Likewise.
>       * lib/target-supports.exp: Add aarch64_asm_fp8_ok check.
> ---
>  .../aarch64/aarch64-sve-builtins-shapes.cc    | 74 +++++++++++++++++++
>  .../aarch64/aarch64-sve-builtins-shapes.h     |  2 +
>  .../aarch64/aarch64-sve-builtins-sve2.cc      | 28 ++++++-
>  .../aarch64/aarch64-sve-builtins-sve2.def     | 12 +++
>  .../aarch64/aarch64-sve-builtins-sve2.h       |  6 ++
>  gcc/config/aarch64/aarch64-sve-builtins.cc    | 30 +++++++-
>  gcc/config/aarch64/aarch64-sve2.md            | 52 +++++++++++++
>  gcc/config/aarch64/aarch64.h                  |  5 ++
>  gcc/config/aarch64/iterators.md               | 25 +++++++
>  .../aarch64/sve/acle/asm/test_sve_acle.h      |  2 +-
>  .../general-c/unary_convert_narrowxn_fpm_1.c  | 38 ++++++++++
>  .../acle/general-c/unary_convertxn_fpm_1.c    | 60 +++++++++++++++
>  .../aarch64/sve2/acle/asm/cvt_mf8.c           | 48 ++++++++++++
>  .../aarch64/sve2/acle/asm/cvtlt_mf8.c         | 47 ++++++++++++
>  .../aarch64/sve2/acle/asm/cvtn_mf8.c          | 59 +++++++++++++++
>  gcc/testsuite/lib/target-supports.exp         |  2 +-
>  16 files changed, 485 insertions(+), 5 deletions(-)
>  create mode 100644 
> gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowxn_fpm_1.c
>  create mode 100644 
> gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convertxn_fpm_1.c
>  create mode 100644 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvt_mf8.c
>  create mode 100644 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtlt_mf8.c
>  create mode 100644 gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c
>
> diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc 
> b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
> index 51f7cfdf96f..f08c377f5e4 100644
> --- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
> +++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
> [...]
> @@ -4316,6 +4318,44 @@ struct unary_convert_narrowt_def : public 
> overloaded_base<1>
>  };
>  SHAPE (unary_convert_narrowt)
>  
> +/* sv<t0>x<g0>_t svfoo_t0[_t1_g]_fpm(sv<t0>x<g0>_t, sv<t1>x<g1>_t, fpm_t)

There's not really a g0 or g1.  Since the result and first input are
both single vectors, I think this should be:

/* sv<t0>_t svfoo_t0[_t1_g]_fpm(sv<t0>_t, sv<t1>x<g>_t, fpm_t)

> +
> +   Similar to unary_convert_narrowt but for tuple arguments with support for
> +   modal floating point.  */
> +struct unary_convert_narrowxn_fpm_def : public overloaded_base<1>

How about calling it "unary_convertxn_narrowt..." instead?
I think the "t" for "top" is worth keeping, since it explains the
extra leading argument.

Also, I think in general we should try to reuse existing shapes for _fpm
and adjust the resolve functions based on the fpm suffix (like you do
for unary_convert, thanks).  So how about removing the "_fpm" suffix
and instead:

> +{
> +  bool
> +  explicit_group_suffix_p () const override
> +  {
> +    return false;
> +  }
> +
> +  bool
> +  has_merge_argument_p (const function_instance &, unsigned int) const 
> override
> +  {
> +    return true;
> +  }
> +
> +  void
> +  build (function_builder &b, const function_group_info &group) const 
> override
> +  {
> +    b.add_overloaded_functions (group, MODE_none);
> +    build_all (b, "v0,v0,t1", group, MODE_none);
> +  }
> +  
> +  tree
> +  resolve (function_resolver &r) const override
> +  {

assert here that r.fpm_mode == FPM_set.

The prototype above would then be:

/* sv<t0>_t svfoo_t0[_t1_g](sv<t0>_t, sv<t1>x<g>_t)

with the _fpm suffix and fpm_t being added mechanically, as for predication.

> +    sve_type type;
> +    if (!r.check_num_arguments (3) || !(type = r.infer_sve_type (1))
> +     || !r.require_derived_scalar_type (2, TYPE_unsigned, 64))

Formatting nit, sorry, but: there should be one condition per line if
the condition spans multiple lines.

More importantly, I think the scalar test should instead be:

        || !r.require_scalar_type (2, "uint64_t")

There are two reasons:

(1) The scalar type is a fixed part of the signature, rather than being
    derived from previous parameters.

(2) The usual C rules for handling uint64_t arguments should apply.
    It's not an error to pass (say) a 128-bit integer to a uint64_t.
    It's not even an error to pass a uint32_t (although it would be
    a conditional warning).

Same comment for the other resolve function.

> +      return error_mark_node;
> +
> +    return r.resolve_to (r.mode_suffix_id, type);
> +  }
> +};
> +SHAPE (unary_convert_narrowxn_fpm)
> +
>  /* sv<t0>x<g0>_t svfoo_t0[_t1_g](sv<t1>x<g1>_t)
>  
>     where the target type <t0> must be specified explicitly but the
> @@ -4348,6 +4388,40 @@ struct unary_convertxn_def : public unary_convert_def
>  };
>  SHAPE (unary_convertxn)
>  
> +/* sv<t0>x<g0>_t svfoo_t0[_t1_g]_fpm(sv<t1>x<g1>_t, fpm_t)

Applying the same comments here would give:

/* sv<t0>_t svfoo_t0[_t1_g](sv<t1>x<g>_t)

> +
> +   where the target type <t0> must be specified explicitly but the
> +   source type <t1> can be inferred.
> +
> +   Functions with a group suffix are unpredicated. */
> +struct unary_convertxn_fpm_def : public unary_convert_def

Just removing the _fpm here would create a clash with the existing shape,
so how about "unary_convertxn_narrow"?

> +{
> +  bool explicit_group_suffix_p () const override { return false; }
> +
> +  void
> +  build (function_builder &b, const function_group_info &group) const 
> override
> +  {
> +    b.add_overloaded_functions (group, MODE_none);
> +    build_all (b, "v0,t1", group, MODE_none);
> +  }
> +
> +  tree
> +  resolve (function_resolver &r) const override
> +  {
> +    if (r.pred != PRED_none)
> +      return unary_convert_def::resolve (r);

I think we can leave this out until an intrinsic needs it.

The scheme above would mean asserting r.fpm_mode == FPM_set here too.

> +
> +    sve_type type;
> +    if (!r.check_num_arguments (2)
> +     || !(type = r.infer_sve_type (0))
> +     || !r.require_derived_scalar_type (1, TYPE_unsigned, 64))
> +      return error_mark_node;
> +
> +    return r.resolve_to (r.mode_suffix_id, type);
> +  }
> +};
> +SHAPE (unary_convertxn_fpm)
> +
>  /* sv<t0>_t svfoo[_t0](sv<t0:half>_t).  */
>  struct unary_long_def : public overloaded_base<0>
>  {
> diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h 
> b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
> index ea87240518d..ddb1f720c65 100644
> --- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
> +++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
> @@ -217,7 +217,9 @@ namespace aarch64_sve
>      extern const function_shape *const unary;
>      extern const function_shape *const unary_convert;
>      extern const function_shape *const unary_convert_narrowt;
> +    extern const function_shape *const unary_convert_narrowxn_fpm;
>      extern const function_shape *const unary_convertxn;
> +    extern const function_shape *const unary_convertxn_fpm;
>      extern const function_shape *const unary_long;
>      extern const function_shape *const unary_n;
>      extern const function_shape *const unary_narrowb;
> diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc 
> b/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
> index d09b75b60c2..28a5b60d4a2 100644
> --- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
> +++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
> [...]
>  class svcvtn_impl : public function_base
>  {
>  public:
>    rtx
>    expand (function_expander &e) const override
>    {
> -    return e.use_exact_insn (code_for_aarch64_sve_cvtn (e.result_mode ()));
> +    if (e.fpm_mode == FPM_set)
> +      return e
> +       .use_exact_insn (code_for_aarch64_sve2_fp8_cvtn (GET_MODE (e.args
> +                                                                      [0])));
> +    else
> +      return e.use_exact_insn (code_for_aarch64_sve_cvtn (e.result_mode ()));

Given the awkward line lengths, I think this would be easier to read as:

  insn_code icode;
  if (e.fpm_mode == FPM_set)
    icode = code_for_aarch64_sve2_fp8_cvtn (GET_MODE (e.args[0]));
  else
    icode = code_for_aarch64_sve_cvtn (e.result_mode ());
  return e.use_exact_insn (icode);

or:

  insn_code icode = (e.fpm_mode == FPM_set
                     ? code_for_aarch64_sve2_fp8_cvtn (GET_MODE (e.args[0]))
                     : code_for_aarch64_sve_cvtn (e.result_mode ()));
  return e.use_exact_insn (icode);

or something like that.

>    }
>  };
>  
> [...]
> @@ -4124,6 +4144,12 @@ function_expander::use_exact_insn (insn_code icode)
>      }
>    for (unsigned int i = 0; i < nops; ++i)
>      add_input_operand (icode, args[i]);
> +  if (fpm_mode == FPM_set)
> +    {
> +      // The last element of these functions is always an fpm_t

Formatting nit, sorry, but: the de facto convention is to use /* ... */
comments in files that currently do.  More importantly...

> +      gcc_assert(args.last()->mode == DImode);
> +      emit_move_insn (gen_rtx_REG (DImode, FPM_REGNUM), args.last ());
> +    }

...could we do this in function_expander::expand?  It seems like something
that applies mechanically to all _fpm functions, regardless of how they
generate the main instruction.

>    return generate_insn (icode);
>  }
>  
> diff --git a/gcc/config/aarch64/aarch64-sve2.md 
> b/gcc/config/aarch64/aarch64-sve2.md
> index ac27124fb74..629523e7a45 100644
> --- a/gcc/config/aarch64/aarch64-sve2.md
> +++ b/gcc/config/aarch64/aarch64-sve2.md
> @@ -2676,6 +2676,14 @@ (define_insn "@aarch64_<optab>_lane_<mode>"
>  ;; ---- [FP<-FP] Widening conversions
>  ;; -------------------------------------------------------------------------
>  ;; Includes:
> +;; - BF1CVT
> +;; - BF1CVTLT
> +;; - BF2CVT
> +;; - BF2CVTLT
> +;; - F1CVT
> +;; - F1CVTLT
> +;; - F2CVT
> +;; - F2CVTLT
>  ;; - FCVTLT
>  ;; -------------------------------------------------------------------------
>  
> @@ -2741,6 +2749,17 @@ (define_insn "*cond_<sve_fp_op><mode>_strict"
>    "<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Ventype>"
>  )
>  
> +(define_insn "@aarch64_sve2_fp8_cvt_<fp8_cvt_uns_op><SVE_FULL_HF:mode>"
> +  [(set (match_operand:SVE_FULL_HF 0 "register_operand" "=w")
> +     (unspec:SVE_FULL_HF
> +       [(match_operand:VNx16QI_ONLY 1 "register_operand" "w")
> +       (reg:DI FPM_REGNUM)]
> +       FP8CVT_UNS)
> +       )]

Formatting nit: the ")]" should be on the previous line.  (The pattern
below is right.)

There's no need to use the VNx16QI_ONLY iterator, since nothing is
parameterised on it.  It should be enough to do:

(define_insn "@aarch64_sve2_fp8_cvt_<fp8_cvt_uns_op><mode>"
  [(set (match_operand:SVE_FULL_HF 0 "register_operand" "=w")
        (unspec:SVE_FULL_HF
          [(match_operand:VNx16QI 1 "register_operand" "w")
           (reg:DI FPM_REGNUM)]
          FP8CVT_UNS))]

Same for some of the other patterns (but @aarch64_sve_cvtnt<mode>
already does it this way and looks good as-is).

> +  "TARGET_SSVE_FP8"
> +  "<b><fp8_cvt_uns_op>\t%0.h, %1.b"
> +)
> +
>  ;; -------------------------------------------------------------------------
>  ;; ---- [FP<-FP] Narrowing conversions
>  ;; -------------------------------------------------------------------------
> @@ -2865,6 +2884,8 @@ (define_insn "@aarch64_sve2_cvtxnt<mode>"
>  ;; - BFCVTN
>  ;; - FCVT
>  ;; - FCVTN
> +;; - FCVTNB
> +;; - FCVTNT
>  ;; -------------------------------------------------------------------------
>  
>  (define_insn "truncvnx8sf<mode>2"
> @@ -2884,6 +2905,37 @@ (define_insn "@aarch64_sve_cvtn<mode>"
>    "<b>fcvtn\t%0.h, %1"
>  )
>  
> +(define_insn "@aarch64_sve2_fp8_cvtn_<SVE_FULL_HFx2:mode>"
> +  [(set (match_operand:VNx16QI_ONLY 0 "register_operand" "=w")
> +     (unspec:VNx16QI_ONLY
> +       [(match_operand:SVE_FULL_HFx2 1 "aligned_register_operand" "Uw2")
> +        (reg:DI FPM_REGNUM)]
> +       UNSPEC_FP8FCVTN))]
> +  "TARGET_SSVE_FP8"
> +  "<b>fcvtn\t%0.b, %1"
> +)
> +
> +(define_insn "@aarch64_sve2_fp8_cvtnb_<VNx8SF_ONLY:mode>"
> +  [(set (match_operand:VNx16QI_ONLY 0 "register_operand" "=w")
> +     (unspec:VNx16QI_ONLY
> +       [(match_operand:VNx8SF_ONLY 1 "aligned_register_operand" "Uw2")
> +        (reg:DI FPM_REGNUM)]
> +       UNSPEC_FCVTNB))]
> +  "TARGET_SSVE_FP8"
> +  "fcvtnb\t%0.b, %1"
> +)
> +
> +(define_insn "@aarch64_sve_cvtnt<mode>"
> +  [(set (match_operand:VNx16QI_ONLY 0 "register_operand" "=w")
> +     (unspec:VNx16QI_ONLY
> +       [(match_operand:VNx16QI_ONLY 1 "register_operand" "0")
> +        (match_operand:VNx8SF 2 "aligned_register_operand" "Uw2")
> +        (reg:DI FPM_REGNUM)]
> +       UNSPEC_FCVTNT))]
> +  "TARGET_SSVE_FP8"
> +  "fcvtnt\t%0.b, %2"
> +)
> +
>  ;; -------------------------------------------------------------------------
>  ;; ---- [FP<-INT] Multi-vector conversions
>  ;; -------------------------------------------------------------------------
> [...]
> diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c 
> b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c
> new file mode 100644
> index 00000000000..82188f8ca9a
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/cvtn_mf8.c
> @@ -0,0 +1,59 @@
> +/* { dg-additional-options "-march=armv8.5-a+sve2+bf16+fp8" } */
> +/* { dg-require-effective-target aarch64_asm_fp8_ok }  */
> +/* { dg-require-effective-target aarch64_asm_bf16_ok }  */
> +/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
> +
> +/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
> +
> +#include "test_sve_acle.h"
> +
> +/*
> +** cvtn_mf8_f16_x2_fpm:
> +**   msr     fpmr, x2
> +**   fcvtn   z0\.b, {z4\.h(?:, | - )z5\.h}
> +**   ret
> +*/
> +TEST_DUAL_Z (cvtn_mf8_f16_x2_fpm, svmfloat8_t, svfloat16x2_t,
> +        z0 = svcvtn_mf8_f16_x2_fpm (z4, fpm0),
> +        z0 = svcvtn_mf8_fpm (z4, fpm0))
> +
> +/*
> +** cvtn_mf8_bf16_x2_fpm:
> +**   msr     fpmr, x2
> +**   bfcvtn  z0\.b, {z4\.h(?:, | - )z5\.h}
> +**   ret
> +*/
> +TEST_DUAL_Z (cvtn_mf8_bf16_x2_fpm, svmfloat8_t, svbfloat16x2_t,
> +        z0 = svcvtn_mf8_bf16_x2_fpm (z4, fpm0),
> +        z0 = svcvtn_mf8_fpm (z4, fpm0))
> +
> +/*
> +** cvtnb_mf8_f32_x2_fpm:
> +**   msr     fpmr, x2
> +**   fcvtnb  z0\.b, {z4\.s(?:, | - )z5\.s}
> +**   ret
> +*/
> +TEST_DUAL_Z (cvtnb_mf8_f32_x2_fpm, svmfloat8_t, svfloat32x2_t,
> +        z0 = svcvtnb_mf8_f32_x2_fpm (z4, fpm0),
> +        z0 = svcvtnb_mf8_fpm (z4, fpm0))
> +
> +/*
> +** cvtnt_mf8_f32_x2_fpm_untied:
> +**   msr     fpmr, x2
> +**   fcvtnt  z1\.b, {z4\.s(?:, | - )z5\.s}
> +**   mov     z0.d, z1.d
> +**   ret
> +*/
> +TEST_DUAL_Z (cvtnt_mf8_f32_x2_fpm_untied, svmfloat8_t, svfloat32x2_t,
> +        z0 = svcvtnt_mf8_f32_x2_fpm (z1, z4, fpm0),
> +        z0 = svcvtnt_mf8_fpm (z1, z4, fpm0))
> +
> +/*
> +** cvtnt_mf8_f32_x2_fpm_tied:
> +**   msr     fpmr, x2
> +**   fcvtnt  z0\.b, {z4\.s(?:, | - )z5\.s}
> +**   ret
> +*/
> +TEST_DUAL_Z (cvtnt_mf8_f32_x2_fpm_tied, svmfloat8_t, svfloat32x2_t,
> +        z0 = svcvtnt_mf8_f32_x2_fpm (z0, z4, fpm0),
> +        z0 = svcvtnt_mf8_fpm (z0, z4, fpm0))

The convention so far has been to name the files after the first part
of the intrinsic name, which would mean putting the svcvtnb and svcvtnt
tests in their own files.

Looks good to me otherwise.

Thanks,
Richard

> diff --git a/gcc/testsuite/lib/target-supports.exp 
> b/gcc/testsuite/lib/target-supports.exp
> index 0c2fd83f45c..ec4e6be1a12 100644
> --- a/gcc/testsuite/lib/target-supports.exp
> +++ b/gcc/testsuite/lib/target-supports.exp
> @@ -12121,7 +12121,7 @@ proc check_effective_target_aarch64_tiny { } {
>  
>  foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"
>                         "i8mm" "f32mm" "f64mm" "bf16" "sb" "sve2" "ls64"
> -                       "sme" "sme-i16i64" "sme2" } {
> +                       "sme" "sme-i16i64" "sme2" "fp8" } {
>      eval [string map [list FUNC $aarch64_ext] {
>       proc check_effective_target_aarch64_asm_FUNC_ok { } {
>         if { [istarget aarch64*-*-*] } {

Reply via email to