https://gcc.gnu.org/g:1a6a8cb1a72b282c418cd143b132de6e67b5d62b

commit r15-4261-g1a6a8cb1a72b282c418cd143b132de6e67b5d62b
Author: Saurabh Jha <saurabh....@arm.com>
Date:   Wed Sep 25 22:08:33 2024 +0000

    aarch64: Add SVE2 faminmax intrinsics
    
    The AArch64 FEAT_FAMINMAX extension introduces instructions for
    computing the floating point absolute maximum and minimum of the
    two vectors element-wise.
    
    This patch introduces SVE2 faminmax intrinsics. The intrinsics of this
    extension are implemented as the following builtin functions:
    * sva[max|min]_[m|x|z]
    * sva[max|min]_[f16|f32|f64]_[m|x|z]
    * sva[max|min]_n_[f16|f32|f64]_[m|x|z]
    
    gcc/ChangeLog:
    
            * config/aarch64/aarch64-sve-builtins-base.cc
            (svamax): Absolute maximum declaration.
            (svamin): Absolute minimum declaration.
            * config/aarch64/aarch64-sve-builtins-base.def
            (REQUIRED_EXTENSIONS): Add faminmax intrinsics behind a flag.
            (svamax): Absolute maximum declaration.
            (svamin): Absolute minimum declaration.
            * config/aarch64/aarch64-sve-builtins-base.h: Declaring function
            bases for the new intrinsics.
            * config/aarch64/aarch64.h
            (TARGET_SVE_FAMINMAX): New flag for SVE2 faminmax.
            * config/aarch64/iterators.md: New unspecs, iterators, and attrs
            for the new intrinsics.
    
    gcc/testsuite/ChangeLog:
    
            * gcc.target/aarch64/sve2/acle/asm/amax_f16.c: New test.
            * gcc.target/aarch64/sve2/acle/asm/amax_f32.c: New test.
            * gcc.target/aarch64/sve2/acle/asm/amax_f64.c: New test.
            * gcc.target/aarch64/sve2/acle/asm/amin_f16.c: New test.
            * gcc.target/aarch64/sve2/acle/asm/amin_f32.c: New test.
            * gcc.target/aarch64/sve2/acle/asm/amin_f64.c: New test.

Diff:
---
 gcc/config/aarch64/aarch64-sve-builtins-base.cc    |   4 +
 gcc/config/aarch64/aarch64-sve-builtins-base.def   |   5 +
 gcc/config/aarch64/aarch64-sve-builtins-base.h     |   2 +
 gcc/config/aarch64/aarch64.h                       |   1 +
 gcc/config/aarch64/iterators.md                    |  18 +-
 .../gcc.target/aarch64/sve2/acle/asm/amax_f16.c    | 431 +++++++++++++++++++++
 .../gcc.target/aarch64/sve2/acle/asm/amax_f32.c    | 431 +++++++++++++++++++++
 .../gcc.target/aarch64/sve2/acle/asm/amax_f64.c    | 431 +++++++++++++++++++++
 .../gcc.target/aarch64/sve2/acle/asm/amin_f16.c    | 431 +++++++++++++++++++++
 .../gcc.target/aarch64/sve2/acle/asm/amin_f32.c    | 431 +++++++++++++++++++++
 .../gcc.target/aarch64/sve2/acle/asm/amin_f64.c    | 431 +++++++++++++++++++++
 11 files changed, 2615 insertions(+), 1 deletion(-)

diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc 
b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index 4b33585d9814..b189818d6430 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -3071,6 +3071,10 @@ FUNCTION (svadrb, svadr_bhwd_impl, (0))
 FUNCTION (svadrd, svadr_bhwd_impl, (3))
 FUNCTION (svadrh, svadr_bhwd_impl, (1))
 FUNCTION (svadrw, svadr_bhwd_impl, (2))
+FUNCTION (svamax, cond_or_uncond_unspec_function,
+         (UNSPEC_COND_FAMAX, UNSPEC_FAMAX))
+FUNCTION (svamin, cond_or_uncond_unspec_function,
+         (UNSPEC_COND_FAMIN, UNSPEC_FAMIN))
 FUNCTION (svand, rtx_code_function, (AND, AND))
 FUNCTION (svandv, reduction, (UNSPEC_ANDV))
 FUNCTION (svasr, rtx_code_function, (ASHIFTRT, ASHIFTRT))
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.def 
b/gcc/config/aarch64/aarch64-sve-builtins-base.def
index 65fcba915866..95e04e4393d2 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.def
@@ -379,3 +379,8 @@ DEF_SVE_FUNCTION (svzip2q, binary, all_data, none)
 DEF_SVE_FUNCTION (svld1ro, load_replicate, all_data, implicit)
 DEF_SVE_FUNCTION (svmmla, mmla, d_float, none)
 #undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS AARCH64_FL_SVE | AARCH64_FL_FAMINMAX
+DEF_SVE_FUNCTION (svamax, binary_opt_single_n, all_float, mxz)
+DEF_SVE_FUNCTION (svamin, binary_opt_single_n, all_float, mxz)
+#undef REQUIRED_EXTENSIONS
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.h 
b/gcc/config/aarch64/aarch64-sve-builtins-base.h
index 5bbf3569c4b4..978cf7013f92 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.h
@@ -37,6 +37,8 @@ namespace aarch64_sve
     extern const function_base *const svadrd;
     extern const function_base *const svadrh;
     extern const function_base *const svadrw;
+    extern const function_base *const svamax;
+    extern const function_base *const svamin;
     extern const function_base *const svand;
     extern const function_base *const svandv;
     extern const function_base *const svasr;
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 030cffb17606..593319fd4723 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -472,6 +472,7 @@ constexpr auto AARCH64_FL_DEFAULT_ISA_MODE ATTRIBUTE_UNUSED
 /* Floating Point Absolute Maximum/Minimum extension instructions are
    enabled through +faminmax.  */
 #define TARGET_FAMINMAX AARCH64_HAVE_ISA (FAMINMAX)
+#define TARGET_SVE_FAMINMAX (TARGET_SVE && TARGET_FAMINMAX)
 
 /* Prefer different predicate registers for the output of a predicated
    operation over re-using an existing input predicate.  */
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 1322193b027c..a04f9f9eb3f9 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -840,6 +840,8 @@
     UNSPEC_COND_CMPNE_WIDE ; Used in aarch64-sve.md.
     UNSPEC_COND_FABS   ; Used in aarch64-sve.md.
     UNSPEC_COND_FADD   ; Used in aarch64-sve.md.
+    UNSPEC_COND_FAMAX  ; Used in aarch64-sve.md.
+    UNSPEC_COND_FAMIN  ; Used in aarch64-sve.md.
     UNSPEC_COND_FCADD90        ; Used in aarch64-sve.md.
     UNSPEC_COND_FCADD270 ; Used in aarch64-sve.md.
     UNSPEC_COND_FCMEQ  ; Used in aarch64-sve.md.
@@ -3084,6 +3086,8 @@
 
 (define_int_iterator SVE_COND_FP_BINARY
   [UNSPEC_COND_FADD
+   (UNSPEC_COND_FAMAX "TARGET_SVE_FAMINMAX")
+   (UNSPEC_COND_FAMIN "TARGET_SVE_FAMINMAX")
    UNSPEC_COND_FDIV
    UNSPEC_COND_FMAX
    UNSPEC_COND_FMAXNM
@@ -3123,7 +3127,9 @@
                                            UNSPEC_COND_SMIN])
 
 (define_int_iterator SVE_COND_FP_BINARY_REG
-  [UNSPEC_COND_FDIV
+  [(UNSPEC_COND_FAMAX "TARGET_SVE_FAMINMAX")
+   (UNSPEC_COND_FAMIN "TARGET_SVE_FAMINMAX")
+   UNSPEC_COND_FDIV
    UNSPEC_COND_FMULX])
 
 (define_int_iterator SVE_COND_FCADD [UNSPEC_COND_FCADD90
@@ -3700,6 +3706,8 @@
                        (UNSPEC_ZIP2Q "zip2q")
                        (UNSPEC_COND_FABS "abs")
                        (UNSPEC_COND_FADD "add")
+                       (UNSPEC_COND_FAMAX "famax")
+                       (UNSPEC_COND_FAMIN "famin")
                        (UNSPEC_COND_FCADD90 "cadd90")
                        (UNSPEC_COND_FCADD270 "cadd270")
                        (UNSPEC_COND_FCMLA "fcmla")
@@ -4236,6 +4244,8 @@
                            (UNSPEC_FTSSEL "ftssel")
                            (UNSPEC_COND_FABS "fabs")
                            (UNSPEC_COND_FADD "fadd")
+                           (UNSPEC_COND_FAMAX "famax")
+                           (UNSPEC_COND_FAMIN "famin")
                            (UNSPEC_COND_FCVTLT "fcvtlt")
                            (UNSPEC_COND_FCVTX "fcvtx")
                            (UNSPEC_COND_FDIV "fdiv")
@@ -4262,6 +4272,8 @@
                            (UNSPEC_COND_SMIN "fminnm")])
 
 (define_int_attr sve_fp_op_rev [(UNSPEC_COND_FADD "fadd")
+                               (UNSPEC_COND_FAMAX "famax")
+                               (UNSPEC_COND_FAMIN "famin")
                                (UNSPEC_COND_FDIV "fdivr")
                                (UNSPEC_COND_FMAX "fmax")
                                (UNSPEC_COND_FMAXNM "fmaxnm")
@@ -4400,6 +4412,8 @@
 ;; <optab><mode>3 pattern.
 (define_int_attr sve_pred_fp_rhs1_operand
   [(UNSPEC_COND_FADD "register_operand")
+   (UNSPEC_COND_FAMAX "register_operand")
+   (UNSPEC_COND_FAMIN "register_operand")
    (UNSPEC_COND_FDIV "register_operand")
    (UNSPEC_COND_FMAX "register_operand")
    (UNSPEC_COND_FMAXNM "register_operand")
@@ -4415,6 +4429,8 @@
 ;; <optab><mode>3 pattern.
 (define_int_attr sve_pred_fp_rhs2_operand
   [(UNSPEC_COND_FADD "aarch64_sve_float_arith_with_sub_operand")
+   (UNSPEC_COND_FAMAX "register_operand")
+   (UNSPEC_COND_FAMIN "register_operand")
    (UNSPEC_COND_FDIV "register_operand")
    (UNSPEC_COND_FMAX "aarch64_sve_float_maxmin_operand")
    (UNSPEC_COND_FMAXNM "aarch64_sve_float_maxmin_operand")
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f16.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f16.c
new file mode 100644
index 000000000000..3d99e4bd92d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f16.c
@@ -0,0 +1,431 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amax_f16_m_tied1:
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_m_tied1, svfloat16_t,
+               z0 = svamax_f16_m (p0, z0, z1),
+               z0 = svamax_m (p0, z0, z1))
+
+/*
+** amax_f16_m_tied2:
+**     mov     (z[0-9]+)\.d, z0\.d
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, \1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_m_tied2, svfloat16_t,
+               z0 = svamax_f16_m (p0, z1, z0),
+               z0 = svamax_m (p0, z1, z0))
+
+/*
+** amax_f16_m_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z2\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_m_untied, svfloat16_t,
+               z0 = svamax_f16_m (p0, z1, z2),
+               z0 = svamax_m (p0, z1, z2))
+
+/*
+** amax_h4_f16_m_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     famax   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_m_tied1, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_m (p0, z0, d4),
+                z0 = svamax_m (p0, z0, d4))
+
+/*
+** amax_h4_f16_m_untied:
+**     mov     (z[0-9]+\.h), h4
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_m_untied, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_m (p0, z1, d4),
+                z0 = svamax_m (p0, z1, d4))
+
+/*
+** amax_0_f16_m_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_m_tied1, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z0, 0),
+               z0 = svamax_m (p0, z0, 0))
+
+/*
+** amax_0_f16_m_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_m_untied, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z1, 0),
+               z0 = svamax_m (p0, z1, 0))
+
+/*
+** amax_1_f16_m_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_m_tied1, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z0, 1),
+               z0 = svamax_m (p0, z0, 1))
+
+/*
+** amax_1_f16_m_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_m_untied, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z1, 1),
+               z0 = svamax_m (p0, z1, 1))
+
+/*
+** amax_2_f16_m:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_m, svfloat16_t,
+               z0 = svamax_n_f16_m (p0, z0, 2),
+               z0 = svamax_m (p0, z0, 2))
+
+/*
+** amax_f16_z_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_z_tied1, svfloat16_t,
+               z0 = svamax_f16_z (p0, z0, z1),
+               z0 = svamax_z (p0, z0, z1))
+
+/*
+** amax_f16_z_tied2:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_z_tied2, svfloat16_t,
+               z0 = svamax_f16_z (p0, z1, z0),
+               z0 = svamax_z (p0, z1, z0))
+
+/*
+** amax_f16_z_untied:
+** (
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z2\.h
+** |
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_z_untied, svfloat16_t,
+               z0 = svamax_f16_z (p0, z1, z2),
+               z0 = svamax_z (p0, z1, z2))
+
+/*
+** amax_h4_f16_z_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_z_tied1, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_z (p0, z0, d4),
+                z0 = svamax_z (p0, z0, d4))
+
+/*
+** amax_h4_f16_z_untied:
+**     mov     (z[0-9]+\.h), h4
+** (
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, \1
+** |
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_z_untied, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_z (p0, z1, d4),
+                z0 = svamax_z (p0, z1, d4))
+
+/*
+** amax_0_f16_z_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_z_tied1, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z0, 0),
+               z0 = svamax_z (p0, z0, 0))
+
+/*
+** amax_0_f16_z_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_z_untied, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z1, 0),
+               z0 = svamax_z (p0, z1, 0))
+
+/*
+** amax_1_f16_z_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_z_tied1, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z0, 1),
+               z0 = svamax_z (p0, z0, 1))
+
+/*
+** amax_1_f16_z_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_z_untied, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z1, 1),
+               z0 = svamax_z (p0, z1, 1))
+
+/*
+** amax_2_f16_z:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_z, svfloat16_t,
+               z0 = svamax_n_f16_z (p0, z0, 2),
+               z0 = svamax_z (p0, z0, 2))
+
+/*
+** amax_f16_x_tied1:
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_x_tied1, svfloat16_t,
+               z0 = svamax_f16_x (p0, z0, z1),
+               z0 = svamax_x (p0, z0, z1))
+
+/*
+** amax_f16_x_tied2:
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_x_tied2, svfloat16_t,
+               z0 = svamax_f16_x (p0, z1, z0),
+               z0 = svamax_x (p0, z1, z0))
+
+/*
+** amax_f16_x_untied:
+** (
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z2\.h
+** |
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f16_x_untied, svfloat16_t,
+               z0 = svamax_f16_x (p0, z1, z2),
+               z0 = svamax_x (p0, z1, z2))
+
+/*
+** amax_h4_f16_x_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     famax   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_x_tied1, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_x (p0, z0, d4),
+                z0 = svamax_x (p0, z0, d4))
+
+/*
+** amax_h4_f16_x_untied:
+**     mov     z0\.h, h4
+**     famax   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_h4_f16_x_untied, svfloat16_t, __fp16,
+                z0 = svamax_n_f16_x (p0, z1, d4),
+                z0 = svamax_x (p0, z1, d4))
+
+/*
+** amax_0_f16_x_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z0, 0),
+               z0 = svamax_x (p0, z0, 0))
+
+/*
+** amax_0_f16_x_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z1, 0),
+               z0 = svamax_x (p0, z1, 0))
+
+/*
+** amax_1_f16_x_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z0, 1),
+               z0 = svamax_x (p0, z0, 1))
+
+/*
+** amax_1_f16_x_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z1, 1),
+               z0 = svamax_x (p0, z1, 1))
+
+/*
+** amax_2_f16_x_tied1:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z0, 2),
+               z0 = svamax_x (p0, z0, 2))
+
+/*
+** amax_2_f16_x_untied:
+**     ...
+**     famax   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (p0, z1, 2),
+               z0 = svamax_x (p0, z1, 2))
+
+/*
+** ptrue_amax_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f16_x_tied1, svfloat16_t,
+               z0 = svamax_f16_x (svptrue_b16 (), z0, z1),
+               z0 = svamax_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_amax_f16_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f16_x_tied2, svfloat16_t,
+               z0 = svamax_f16_x (svptrue_b16 (), z1, z0),
+               z0 = svamax_x (svptrue_b16 (), z1, z0))
+
+/*
+** ptrue_amax_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f16_x_untied, svfloat16_t,
+               z0 = svamax_f16_x (svptrue_b16 (), z1, z2),
+               z0 = svamax_x (svptrue_b16 (), z1, z2))
+
+/*
+** ptrue_amax_0_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z0, 0),
+               z0 = svamax_x (svptrue_b16 (), z0, 0))
+
+/*
+** ptrue_amax_0_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z1, 0),
+               z0 = svamax_x (svptrue_b16 (), z1, 0))
+
+/*
+** ptrue_amax_1_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z0, 1),
+               z0 = svamax_x (svptrue_b16 (), z0, 1))
+
+/*
+** ptrue_amax_1_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z1, 1),
+               z0 = svamax_x (svptrue_b16 (), z1, 1))
+
+/*
+** ptrue_amax_2_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f16_x_tied1, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z0, 2),
+               z0 = svamax_x (svptrue_b16 (), z0, 2))
+
+/*
+** ptrue_amax_2_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f16_x_untied, svfloat16_t,
+               z0 = svamax_n_f16_x (svptrue_b16 (), z1, 2),
+               z0 = svamax_x (svptrue_b16 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f32.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f32.c
new file mode 100644
index 000000000000..686996625fba
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f32.c
@@ -0,0 +1,431 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amax_f32_m_tied1:
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_m_tied1, svfloat32_t,
+               z0 = svamax_f32_m (p0, z0, z1),
+               z0 = svamax_m (p0, z0, z1))
+
+/*
+** amax_f32_m_tied2:
+**     mov     (z[0-9]+)\.d, z0\.d
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, \1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_m_tied2, svfloat32_t,
+               z0 = svamax_f32_m (p0, z1, z0),
+               z0 = svamax_m (p0, z1, z0))
+
+/*
+** amax_f32_m_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z2\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_m_untied, svfloat32_t,
+               z0 = svamax_f32_m (p0, z1, z2),
+               z0 = svamax_m (p0, z1, z2))
+
+/*
+** amax_s4_f32_m_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     famax   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_m_tied1, svfloat32_t, float,
+                z0 = svamax_n_f32_m (p0, z0, d4),
+                z0 = svamax_m (p0, z0, d4))
+
+/*
+** amax_s4_f32_m_untied:
+**     mov     (z[0-9]+\.s), s4
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_m_untied, svfloat32_t, float,
+                z0 = svamax_n_f32_m (p0, z1, d4),
+                z0 = svamax_m (p0, z1, d4))
+
+/*
+** amax_0_f32_m_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_m_tied1, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z0, 0),
+               z0 = svamax_m (p0, z0, 0))
+
+/*
+** amax_0_f32_m_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_m_untied, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z1, 0),
+               z0 = svamax_m (p0, z1, 0))
+
+/*
+** amax_1_f32_m_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_m_tied1, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z0, 1),
+               z0 = svamax_m (p0, z0, 1))
+
+/*
+** amax_1_f32_m_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_m_untied, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z1, 1),
+               z0 = svamax_m (p0, z1, 1))
+
+/*
+** amax_2_f32_m:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_m, svfloat32_t,
+               z0 = svamax_n_f32_m (p0, z0, 2),
+               z0 = svamax_m (p0, z0, 2))
+
+/*
+** amax_f32_z_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_z_tied1, svfloat32_t,
+               z0 = svamax_f32_z (p0, z0, z1),
+               z0 = svamax_z (p0, z0, z1))
+
+/*
+** amax_f32_z_tied2:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_z_tied2, svfloat32_t,
+               z0 = svamax_f32_z (p0, z1, z0),
+               z0 = svamax_z (p0, z1, z0))
+
+/*
+** amax_f32_z_untied:
+** (
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z2\.s
+** |
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_z_untied, svfloat32_t,
+               z0 = svamax_f32_z (p0, z1, z2),
+               z0 = svamax_z (p0, z1, z2))
+
+/*
+** amax_s4_f32_z_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_z_tied1, svfloat32_t, float,
+                z0 = svamax_n_f32_z (p0, z0, d4),
+                z0 = svamax_z (p0, z0, d4))
+
+/*
+** amax_s4_f32_z_untied:
+**     mov     (z[0-9]+\.s), s4
+** (
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, \1
+** |
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_z_untied, svfloat32_t, float,
+                z0 = svamax_n_f32_z (p0, z1, d4),
+                z0 = svamax_z (p0, z1, d4))
+
+/*
+** amax_0_f32_z_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_z_tied1, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z0, 0),
+               z0 = svamax_z (p0, z0, 0))
+
+/*
+** amax_0_f32_z_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_z_untied, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z1, 0),
+               z0 = svamax_z (p0, z1, 0))
+
+/*
+** amax_1_f32_z_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_z_tied1, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z0, 1),
+               z0 = svamax_z (p0, z0, 1))
+
+/*
+** amax_1_f32_z_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_z_untied, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z1, 1),
+               z0 = svamax_z (p0, z1, 1))
+
+/*
+** amax_2_f32_z:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_z, svfloat32_t,
+               z0 = svamax_n_f32_z (p0, z0, 2),
+               z0 = svamax_z (p0, z0, 2))
+
+/*
+** amax_f32_x_tied1:
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_x_tied1, svfloat32_t,
+               z0 = svamax_f32_x (p0, z0, z1),
+               z0 = svamax_x (p0, z0, z1))
+
+/*
+** amax_f32_x_tied2:
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_x_tied2, svfloat32_t,
+               z0 = svamax_f32_x (p0, z1, z0),
+               z0 = svamax_x (p0, z1, z0))
+
+/*
+** amax_f32_x_untied:
+** (
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z2\.s
+** |
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f32_x_untied, svfloat32_t,
+               z0 = svamax_f32_x (p0, z1, z2),
+               z0 = svamax_x (p0, z1, z2))
+
+/*
+** amax_s4_f32_x_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     famax   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_x_tied1, svfloat32_t, float,
+                z0 = svamax_n_f32_x (p0, z0, d4),
+                z0 = svamax_x (p0, z0, d4))
+
+/*
+** amax_s4_f32_x_untied:
+**     mov     z0\.s, s4
+**     famax   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_s4_f32_x_untied, svfloat32_t, float,
+                z0 = svamax_n_f32_x (p0, z1, d4),
+                z0 = svamax_x (p0, z1, d4))
+
+/*
+** amax_0_f32_x_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z0, 0),
+               z0 = svamax_x (p0, z0, 0))
+
+/*
+** amax_0_f32_x_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z1, 0),
+               z0 = svamax_x (p0, z1, 0))
+
+/*
+** amax_1_f32_x_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z0, 1),
+               z0 = svamax_x (p0, z0, 1))
+
+/*
+** amax_1_f32_x_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z1, 1),
+               z0 = svamax_x (p0, z1, 1))
+
+/*
+** amax_2_f32_x_tied1:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z0, 2),
+               z0 = svamax_x (p0, z0, 2))
+
+/*
+** amax_2_f32_x_untied:
+**     ...
+**     famax   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (p0, z1, 2),
+               z0 = svamax_x (p0, z1, 2))
+
+/*
+** ptrue_amax_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f32_x_tied1, svfloat32_t,
+               z0 = svamax_f32_x (svptrue_b32 (), z0, z1),
+               z0 = svamax_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_amax_f32_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f32_x_tied2, svfloat32_t,
+               z0 = svamax_f32_x (svptrue_b32 (), z1, z0),
+               z0 = svamax_x (svptrue_b32 (), z1, z0))
+
+/*
+** ptrue_amax_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f32_x_untied, svfloat32_t,
+               z0 = svamax_f32_x (svptrue_b32 (), z1, z2),
+               z0 = svamax_x (svptrue_b32 (), z1, z2))
+
+/*
+** ptrue_amax_0_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z0, 0),
+               z0 = svamax_x (svptrue_b32 (), z0, 0))
+
+/*
+** ptrue_amax_0_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z1, 0),
+               z0 = svamax_x (svptrue_b32 (), z1, 0))
+
+/*
+** ptrue_amax_1_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z0, 1),
+               z0 = svamax_x (svptrue_b32 (), z0, 1))
+
+/*
+** ptrue_amax_1_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z1, 1),
+               z0 = svamax_x (svptrue_b32 (), z1, 1))
+
+/*
+** ptrue_amax_2_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f32_x_tied1, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z0, 2),
+               z0 = svamax_x (svptrue_b32 (), z0, 2))
+
+/*
+** ptrue_amax_2_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f32_x_untied, svfloat32_t,
+               z0 = svamax_n_f32_x (svptrue_b32 (), z1, 2),
+               z0 = svamax_x (svptrue_b32 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f64.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f64.c
new file mode 100644
index 000000000000..e0f0ac32e54d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amax_f64.c
@@ -0,0 +1,431 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amax_f64_m_tied1:
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_m_tied1, svfloat64_t,
+               z0 = svamax_f64_m (p0, z0, z1),
+               z0 = svamax_m (p0, z0, z1))
+
+/*
+** amax_f64_m_tied2:
+**     mov     (z[0-9]+\.d), z0\.d
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_m_tied2, svfloat64_t,
+               z0 = svamax_f64_m (p0, z1, z0),
+               z0 = svamax_m (p0, z1, z0))
+
+/*
+** amax_f64_m_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z2\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_m_untied, svfloat64_t,
+               z0 = svamax_f64_m (p0, z1, z2),
+               z0 = svamax_m (p0, z1, z2))
+
+/*
+** amax_d4_f64_m_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_m_tied1, svfloat64_t, double,
+                z0 = svamax_n_f64_m (p0, z0, d4),
+                z0 = svamax_m (p0, z0, d4))
+
+/*
+** amax_d4_f64_m_untied:
+**     mov     (z[0-9]+\.d), d4
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_m_untied, svfloat64_t, double,
+                z0 = svamax_n_f64_m (p0, z1, d4),
+                z0 = svamax_m (p0, z1, d4))
+
+/*
+** amax_0_f64_m_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_m_tied1, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z0, 0),
+               z0 = svamax_m (p0, z0, 0))
+
+/*
+** amax_0_f64_m_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_m_untied, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z1, 0),
+               z0 = svamax_m (p0, z1, 0))
+
+/*
+** amax_1_f64_m_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_m_tied1, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z0, 1),
+               z0 = svamax_m (p0, z0, 1))
+
+/*
+** amax_1_f64_m_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_m_untied, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z1, 1),
+               z0 = svamax_m (p0, z1, 1))
+
+/*
+** amax_2_f64_m:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_m, svfloat64_t,
+               z0 = svamax_n_f64_m (p0, z0, 2),
+               z0 = svamax_m (p0, z0, 2))
+
+/*
+** amax_f64_z_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_z_tied1, svfloat64_t,
+               z0 = svamax_f64_z (p0, z0, z1),
+               z0 = svamax_z (p0, z0, z1))
+
+/*
+** amax_f64_z_tied2:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_z_tied2, svfloat64_t,
+               z0 = svamax_f64_z (p0, z1, z0),
+               z0 = svamax_z (p0, z1, z0))
+
+/*
+** amax_f64_z_untied:
+** (
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z2\.d
+** |
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_z_untied, svfloat64_t,
+               z0 = svamax_f64_z (p0, z1, z2),
+               z0 = svamax_z (p0, z1, z2))
+
+/*
+** amax_d4_f64_z_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_z_tied1, svfloat64_t, double,
+                z0 = svamax_n_f64_z (p0, z0, d4),
+                z0 = svamax_z (p0, z0, d4))
+
+/*
+** amax_d4_f64_z_untied:
+**     mov     (z[0-9]+\.d), d4
+** (
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, \1
+** |
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_z_untied, svfloat64_t, double,
+                z0 = svamax_n_f64_z (p0, z1, d4),
+                z0 = svamax_z (p0, z1, d4))
+
+/*
+** amax_0_f64_z_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_z_tied1, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z0, 0),
+               z0 = svamax_z (p0, z0, 0))
+
+/*
+** amax_0_f64_z_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_z_untied, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z1, 0),
+               z0 = svamax_z (p0, z1, 0))
+
+/*
+** amax_1_f64_z_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_z_tied1, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z0, 1),
+               z0 = svamax_z (p0, z0, 1))
+
+/*
+** amax_1_f64_z_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_z_untied, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z1, 1),
+               z0 = svamax_z (p0, z1, 1))
+
+/*
+** amax_2_f64_z:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_z, svfloat64_t,
+               z0 = svamax_n_f64_z (p0, z0, 2),
+               z0 = svamax_z (p0, z0, 2))
+
+/*
+** amax_f64_x_tied1:
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_x_tied1, svfloat64_t,
+               z0 = svamax_f64_x (p0, z0, z1),
+               z0 = svamax_x (p0, z0, z1))
+
+/*
+** amax_f64_x_tied2:
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_x_tied2, svfloat64_t,
+               z0 = svamax_f64_x (p0, z1, z0),
+               z0 = svamax_x (p0, z1, z0))
+
+/*
+** amax_f64_x_untied:
+** (
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z2\.d
+** |
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amax_f64_x_untied, svfloat64_t,
+               z0 = svamax_f64_x (p0, z1, z2),
+               z0 = svamax_x (p0, z1, z2))
+
+/*
+** amax_d4_f64_x_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     famax   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_x_tied1, svfloat64_t, double,
+                z0 = svamax_n_f64_x (p0, z0, d4),
+                z0 = svamax_x (p0, z0, d4))
+
+/*
+** amax_d4_f64_x_untied:
+**     mov     z0\.d, d4
+**     famax   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_ZD (amax_d4_f64_x_untied, svfloat64_t, double,
+                z0 = svamax_n_f64_x (p0, z1, d4),
+                z0 = svamax_x (p0, z1, d4))
+
+/*
+** amax_0_f64_x_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z0, 0),
+               z0 = svamax_x (p0, z0, 0))
+
+/*
+** amax_0_f64_x_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_0_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z1, 0),
+               z0 = svamax_x (p0, z1, 0))
+
+/*
+** amax_1_f64_x_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z0, 1),
+               z0 = svamax_x (p0, z0, 1))
+
+/*
+** amax_1_f64_x_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_1_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z1, 1),
+               z0 = svamax_x (p0, z1, 1))
+
+/*
+** amax_2_f64_x_tied1:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z0, 2),
+               z0 = svamax_x (p0, z0, 2))
+
+/*
+** amax_2_f64_x_untied:
+**     ...
+**     famax   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amax_2_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (p0, z1, 2),
+               z0 = svamax_x (p0, z1, 2))
+
+/*
+** ptrue_amax_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f64_x_tied1, svfloat64_t,
+               z0 = svamax_f64_x (svptrue_b64 (), z0, z1),
+               z0 = svamax_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_amax_f64_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f64_x_tied2, svfloat64_t,
+               z0 = svamax_f64_x (svptrue_b64 (), z1, z0),
+               z0 = svamax_x (svptrue_b64 (), z1, z0))
+
+/*
+** ptrue_amax_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_f64_x_untied, svfloat64_t,
+               z0 = svamax_f64_x (svptrue_b64 (), z1, z2),
+               z0 = svamax_x (svptrue_b64 (), z1, z2))
+
+/*
+** ptrue_amax_0_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z0, 0),
+               z0 = svamax_x (svptrue_b64 (), z0, 0))
+
+/*
+** ptrue_amax_0_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_0_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z1, 0),
+               z0 = svamax_x (svptrue_b64 (), z1, 0))
+
+/*
+** ptrue_amax_1_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z0, 1),
+               z0 = svamax_x (svptrue_b64 (), z0, 1))
+
+/*
+** ptrue_amax_1_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_1_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z1, 1),
+               z0 = svamax_x (svptrue_b64 (), z1, 1))
+
+/*
+** ptrue_amax_2_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f64_x_tied1, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z0, 2),
+               z0 = svamax_x (svptrue_b64 (), z0, 2))
+
+/*
+** ptrue_amax_2_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amax_2_f64_x_untied, svfloat64_t,
+               z0 = svamax_n_f64_x (svptrue_b64 (), z1, 2),
+               z0 = svamax_x (svptrue_b64 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f16.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f16.c
new file mode 100644
index 000000000000..f93aed6cba53
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f16.c
@@ -0,0 +1,431 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amin_f16_m_tied1:
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_m_tied1, svfloat16_t,
+               z0 = svamin_f16_m (p0, z0, z1),
+               z0 = svamin_m (p0, z0, z1))
+
+/*
+** amin_f16_m_tied2:
+**     mov     (z[0-9]+)\.d, z0\.d
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, \1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_m_tied2, svfloat16_t,
+               z0 = svamin_f16_m (p0, z1, z0),
+               z0 = svamin_m (p0, z1, z0))
+
+/*
+** amin_f16_m_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z2\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_m_untied, svfloat16_t,
+               z0 = svamin_f16_m (p0, z1, z2),
+               z0 = svamin_m (p0, z1, z2))
+
+/*
+** amin_h4_f16_m_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     famin   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_m_tied1, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_m (p0, z0, d4),
+                z0 = svamin_m (p0, z0, d4))
+
+/*
+** amin_h4_f16_m_untied:
+**     mov     (z[0-9]+\.h), h4
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_m_untied, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_m (p0, z1, d4),
+                z0 = svamin_m (p0, z1, d4))
+
+/*
+** amin_0_f16_m_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_m_tied1, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z0, 0),
+               z0 = svamin_m (p0, z0, 0))
+
+/*
+** amin_0_f16_m_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_m_untied, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z1, 0),
+               z0 = svamin_m (p0, z1, 0))
+
+/*
+** amin_1_f16_m_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_m_tied1, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z0, 1),
+               z0 = svamin_m (p0, z0, 1))
+
+/*
+** amin_1_f16_m_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_m_untied, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z1, 1),
+               z0 = svamin_m (p0, z1, 1))
+
+/*
+** amin_2_f16_m:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_m, svfloat16_t,
+               z0 = svamin_n_f16_m (p0, z0, 2),
+               z0 = svamin_m (p0, z0, 2))
+
+/*
+** amin_f16_z_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_z_tied1, svfloat16_t,
+               z0 = svamin_f16_z (p0, z0, z1),
+               z0 = svamin_z (p0, z0, z1))
+
+/*
+** amin_f16_z_tied2:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_z_tied2, svfloat16_t,
+               z0 = svamin_f16_z (p0, z1, z0),
+               z0 = svamin_z (p0, z1, z0))
+
+/*
+** amin_f16_z_untied:
+** (
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z2\.h
+** |
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_z_untied, svfloat16_t,
+               z0 = svamin_f16_z (p0, z1, z2),
+               z0 = svamin_z (p0, z1, z2))
+
+/*
+** amin_h4_f16_z_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_z_tied1, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_z (p0, z0, d4),
+                z0 = svamin_z (p0, z0, d4))
+
+/*
+** amin_h4_f16_z_untied:
+**     mov     (z[0-9]+\.h), h4
+** (
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, \1
+** |
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_z_untied, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_z (p0, z1, d4),
+                z0 = svamin_z (p0, z1, d4))
+
+/*
+** amin_0_f16_z_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_z_tied1, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z0, 0),
+               z0 = svamin_z (p0, z0, 0))
+
+/*
+** amin_0_f16_z_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_z_untied, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z1, 0),
+               z0 = svamin_z (p0, z1, 0))
+
+/*
+** amin_1_f16_z_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_z_tied1, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z0, 1),
+               z0 = svamin_z (p0, z0, 1))
+
+/*
+** amin_1_f16_z_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_z_untied, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z1, 1),
+               z0 = svamin_z (p0, z1, 1))
+
+/*
+** amin_2_f16_z:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_z, svfloat16_t,
+               z0 = svamin_n_f16_z (p0, z0, 2),
+               z0 = svamin_z (p0, z0, 2))
+
+/*
+** amin_f16_x_tied1:
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_x_tied1, svfloat16_t,
+               z0 = svamin_f16_x (p0, z0, z1),
+               z0 = svamin_x (p0, z0, z1))
+
+/*
+** amin_f16_x_tied2:
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_x_tied2, svfloat16_t,
+               z0 = svamin_f16_x (p0, z1, z0),
+               z0 = svamin_x (p0, z1, z0))
+
+/*
+** amin_f16_x_untied:
+** (
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z2\.h
+** |
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f16_x_untied, svfloat16_t,
+               z0 = svamin_f16_x (p0, z1, z2),
+               z0 = svamin_x (p0, z1, z2))
+
+/*
+** amin_h4_f16_x_tied1:
+**     mov     (z[0-9]+\.h), h4
+**     famin   z0\.h, p0/m, z0\.h, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_x_tied1, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_x (p0, z0, d4),
+                z0 = svamin_x (p0, z0, d4))
+
+/*
+** amin_h4_f16_x_untied:
+**     mov     z0\.h, h4
+**     famin   z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_h4_f16_x_untied, svfloat16_t, __fp16,
+                z0 = svamin_n_f16_x (p0, z1, d4),
+                z0 = svamin_x (p0, z1, d4))
+
+/*
+** amin_0_f16_x_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z0, 0),
+               z0 = svamin_x (p0, z0, 0))
+
+/*
+** amin_0_f16_x_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z1, 0),
+               z0 = svamin_x (p0, z1, 0))
+
+/*
+** amin_1_f16_x_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z0, 1),
+               z0 = svamin_x (p0, z0, 1))
+
+/*
+** amin_1_f16_x_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z1, 1),
+               z0 = svamin_x (p0, z1, 1))
+
+/*
+** amin_2_f16_x_tied1:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z0, 2),
+               z0 = svamin_x (p0, z0, 2))
+
+/*
+** amin_2_f16_x_untied:
+**     ...
+**     famin   z0\.h, p0/m, z0\.h, z[0-9]+\.h
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (p0, z1, 2),
+               z0 = svamin_x (p0, z1, 2))
+
+/*
+** ptrue_amin_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f16_x_tied1, svfloat16_t,
+               z0 = svamin_f16_x (svptrue_b16 (), z0, z1),
+               z0 = svamin_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_amin_f16_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f16_x_tied2, svfloat16_t,
+               z0 = svamin_f16_x (svptrue_b16 (), z1, z0),
+               z0 = svamin_x (svptrue_b16 (), z1, z0))
+
+/*
+** ptrue_amin_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f16_x_untied, svfloat16_t,
+               z0 = svamin_f16_x (svptrue_b16 (), z1, z2),
+               z0 = svamin_x (svptrue_b16 (), z1, z2))
+
+/*
+** ptrue_amin_0_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z0, 0),
+               z0 = svamin_x (svptrue_b16 (), z0, 0))
+
+/*
+** ptrue_amin_0_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z1, 0),
+               z0 = svamin_x (svptrue_b16 (), z1, 0))
+
+/*
+** ptrue_amin_1_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z0, 1),
+               z0 = svamin_x (svptrue_b16 (), z0, 1))
+
+/*
+** ptrue_amin_1_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z1, 1),
+               z0 = svamin_x (svptrue_b16 (), z1, 1))
+
+/*
+** ptrue_amin_2_f16_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f16_x_tied1, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z0, 2),
+               z0 = svamin_x (svptrue_b16 (), z0, 2))
+
+/*
+** ptrue_amin_2_f16_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f16_x_untied, svfloat16_t,
+               z0 = svamin_n_f16_x (svptrue_b16 (), z1, 2),
+               z0 = svamin_x (svptrue_b16 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f32.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f32.c
new file mode 100644
index 000000000000..cc1a343160eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f32.c
@@ -0,0 +1,431 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amin_f32_m_tied1:
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_m_tied1, svfloat32_t,
+               z0 = svamin_f32_m (p0, z0, z1),
+               z0 = svamin_m (p0, z0, z1))
+
+/*
+** amin_f32_m_tied2:
+**     mov     (z[0-9]+)\.d, z0\.d
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, \1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_m_tied2, svfloat32_t,
+               z0 = svamin_f32_m (p0, z1, z0),
+               z0 = svamin_m (p0, z1, z0))
+
+/*
+** amin_f32_m_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z2\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_m_untied, svfloat32_t,
+               z0 = svamin_f32_m (p0, z1, z2),
+               z0 = svamin_m (p0, z1, z2))
+
+/*
+** amin_s4_f32_m_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     famin   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_m_tied1, svfloat32_t, float,
+                z0 = svamin_n_f32_m (p0, z0, d4),
+                z0 = svamin_m (p0, z0, d4))
+
+/*
+** amin_s4_f32_m_untied:
+**     mov     (z[0-9]+\.s), s4
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_m_untied, svfloat32_t, float,
+                z0 = svamin_n_f32_m (p0, z1, d4),
+                z0 = svamin_m (p0, z1, d4))
+
+/*
+** amin_0_f32_m_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_m_tied1, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z0, 0),
+               z0 = svamin_m (p0, z0, 0))
+
+/*
+** amin_0_f32_m_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_m_untied, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z1, 0),
+               z0 = svamin_m (p0, z1, 0))
+
+/*
+** amin_1_f32_m_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_m_tied1, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z0, 1),
+               z0 = svamin_m (p0, z0, 1))
+
+/*
+** amin_1_f32_m_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_m_untied, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z1, 1),
+               z0 = svamin_m (p0, z1, 1))
+
+/*
+** amin_2_f32_m:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_m, svfloat32_t,
+               z0 = svamin_n_f32_m (p0, z0, 2),
+               z0 = svamin_m (p0, z0, 2))
+
+/*
+** amin_f32_z_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_z_tied1, svfloat32_t,
+               z0 = svamin_f32_z (p0, z0, z1),
+               z0 = svamin_z (p0, z0, z1))
+
+/*
+** amin_f32_z_tied2:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_z_tied2, svfloat32_t,
+               z0 = svamin_f32_z (p0, z1, z0),
+               z0 = svamin_z (p0, z1, z0))
+
+/*
+** amin_f32_z_untied:
+** (
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z2\.s
+** |
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_z_untied, svfloat32_t,
+               z0 = svamin_f32_z (p0, z1, z2),
+               z0 = svamin_z (p0, z1, z2))
+
+/*
+** amin_s4_f32_z_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_z_tied1, svfloat32_t, float,
+                z0 = svamin_n_f32_z (p0, z0, d4),
+                z0 = svamin_z (p0, z0, d4))
+
+/*
+** amin_s4_f32_z_untied:
+**     mov     (z[0-9]+\.s), s4
+** (
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, \1
+** |
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_z_untied, svfloat32_t, float,
+                z0 = svamin_n_f32_z (p0, z1, d4),
+                z0 = svamin_z (p0, z1, d4))
+
+/*
+** amin_0_f32_z_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_z_tied1, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z0, 0),
+               z0 = svamin_z (p0, z0, 0))
+
+/*
+** amin_0_f32_z_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_z_untied, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z1, 0),
+               z0 = svamin_z (p0, z1, 0))
+
+/*
+** amin_1_f32_z_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_z_tied1, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z0, 1),
+               z0 = svamin_z (p0, z0, 1))
+
+/*
+** amin_1_f32_z_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_z_untied, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z1, 1),
+               z0 = svamin_z (p0, z1, 1))
+
+/*
+** amin_2_f32_z:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_z, svfloat32_t,
+               z0 = svamin_n_f32_z (p0, z0, 2),
+               z0 = svamin_z (p0, z0, 2))
+
+/*
+** amin_f32_x_tied1:
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_x_tied1, svfloat32_t,
+               z0 = svamin_f32_x (p0, z0, z1),
+               z0 = svamin_x (p0, z0, z1))
+
+/*
+** amin_f32_x_tied2:
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_x_tied2, svfloat32_t,
+               z0 = svamin_f32_x (p0, z1, z0),
+               z0 = svamin_x (p0, z1, z0))
+
+/*
+** amin_f32_x_untied:
+** (
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z2\.s
+** |
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f32_x_untied, svfloat32_t,
+               z0 = svamin_f32_x (p0, z1, z2),
+               z0 = svamin_x (p0, z1, z2))
+
+/*
+** amin_s4_f32_x_tied1:
+**     mov     (z[0-9]+\.s), s4
+**     famin   z0\.s, p0/m, z0\.s, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_x_tied1, svfloat32_t, float,
+                z0 = svamin_n_f32_x (p0, z0, d4),
+                z0 = svamin_x (p0, z0, d4))
+
+/*
+** amin_s4_f32_x_untied:
+**     mov     z0\.s, s4
+**     famin   z0\.s, p0/m, z0\.s, z1\.s
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_s4_f32_x_untied, svfloat32_t, float,
+                z0 = svamin_n_f32_x (p0, z1, d4),
+                z0 = svamin_x (p0, z1, d4))
+
+/*
+** amin_0_f32_x_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z0, 0),
+               z0 = svamin_x (p0, z0, 0))
+
+/*
+** amin_0_f32_x_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z1, 0),
+               z0 = svamin_x (p0, z1, 0))
+
+/*
+** amin_1_f32_x_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z0, 1),
+               z0 = svamin_x (p0, z0, 1))
+
+/*
+** amin_1_f32_x_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z1, 1),
+               z0 = svamin_x (p0, z1, 1))
+
+/*
+** amin_2_f32_x_tied1:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z0, 2),
+               z0 = svamin_x (p0, z0, 2))
+
+/*
+** amin_2_f32_x_untied:
+**     ...
+**     famin   z0\.s, p0/m, z0\.s, z[0-9]+\.s
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (p0, z1, 2),
+               z0 = svamin_x (p0, z1, 2))
+
+/*
+** ptrue_amin_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f32_x_tied1, svfloat32_t,
+               z0 = svamin_f32_x (svptrue_b32 (), z0, z1),
+               z0 = svamin_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_amin_f32_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f32_x_tied2, svfloat32_t,
+               z0 = svamin_f32_x (svptrue_b32 (), z1, z0),
+               z0 = svamin_x (svptrue_b32 (), z1, z0))
+
+/*
+** ptrue_amin_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f32_x_untied, svfloat32_t,
+               z0 = svamin_f32_x (svptrue_b32 (), z1, z2),
+               z0 = svamin_x (svptrue_b32 (), z1, z2))
+
+/*
+** ptrue_amin_0_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z0, 0),
+               z0 = svamin_x (svptrue_b32 (), z0, 0))
+
+/*
+** ptrue_amin_0_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z1, 0),
+               z0 = svamin_x (svptrue_b32 (), z1, 0))
+
+/*
+** ptrue_amin_1_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z0, 1),
+               z0 = svamin_x (svptrue_b32 (), z0, 1))
+
+/*
+** ptrue_amin_1_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z1, 1),
+               z0 = svamin_x (svptrue_b32 (), z1, 1))
+
+/*
+** ptrue_amin_2_f32_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f32_x_tied1, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z0, 2),
+               z0 = svamin_x (svptrue_b32 (), z0, 2))
+
+/*
+** ptrue_amin_2_f32_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f32_x_untied, svfloat32_t,
+               z0 = svamin_n_f32_x (svptrue_b32 (), z1, 2),
+               z0 = svamin_x (svptrue_b32 (), z1, 2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f64.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f64.c
new file mode 100644
index 000000000000..b5133f12950a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/amin_f64.c
@@ -0,0 +1,431 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve+faminmax"
+
+/*
+** amin_f64_m_tied1:
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_m_tied1, svfloat64_t,
+               z0 = svamin_f64_m (p0, z0, z1),
+               z0 = svamin_m (p0, z0, z1))
+
+/*
+** amin_f64_m_tied2:
+**     mov     (z[0-9]+\.d), z0\.d
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_m_tied2, svfloat64_t,
+               z0 = svamin_f64_m (p0, z1, z0),
+               z0 = svamin_m (p0, z1, z0))
+
+/*
+** amin_f64_m_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z2\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_m_untied, svfloat64_t,
+               z0 = svamin_f64_m (p0, z1, z2),
+               z0 = svamin_m (p0, z1, z2))
+
+/*
+** amin_d4_f64_m_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_m_tied1, svfloat64_t, double,
+                z0 = svamin_n_f64_m (p0, z0, d4),
+                z0 = svamin_m (p0, z0, d4))
+
+/*
+** amin_d4_f64_m_untied:
+**     mov     (z[0-9]+\.d), d4
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_m_untied, svfloat64_t, double,
+                z0 = svamin_n_f64_m (p0, z1, d4),
+                z0 = svamin_m (p0, z1, d4))
+
+/*
+** amin_0_f64_m_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_m_tied1, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z0, 0),
+               z0 = svamin_m (p0, z0, 0))
+
+/*
+** amin_0_f64_m_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_m_untied, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z1, 0),
+               z0 = svamin_m (p0, z1, 0))
+
+/*
+** amin_1_f64_m_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_m_tied1, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z0, 1),
+               z0 = svamin_m (p0, z0, 1))
+
+/*
+** amin_1_f64_m_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_m_untied, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z1, 1),
+               z0 = svamin_m (p0, z1, 1))
+
+/*
+** amin_2_f64_m:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_m, svfloat64_t,
+               z0 = svamin_n_f64_m (p0, z0, 2),
+               z0 = svamin_m (p0, z0, 2))
+
+/*
+** amin_f64_z_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_z_tied1, svfloat64_t,
+               z0 = svamin_f64_z (p0, z0, z1),
+               z0 = svamin_z (p0, z0, z1))
+
+/*
+** amin_f64_z_tied2:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_z_tied2, svfloat64_t,
+               z0 = svamin_f64_z (p0, z1, z0),
+               z0 = svamin_z (p0, z1, z0))
+
+/*
+** amin_f64_z_untied:
+** (
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z2\.d
+** |
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_z_untied, svfloat64_t,
+               z0 = svamin_f64_z (p0, z1, z2),
+               z0 = svamin_z (p0, z1, z2))
+
+/*
+** amin_d4_f64_z_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_z_tied1, svfloat64_t, double,
+                z0 = svamin_n_f64_z (p0, z0, d4),
+                z0 = svamin_z (p0, z0, d4))
+
+/*
+** amin_d4_f64_z_untied:
+**     mov     (z[0-9]+\.d), d4
+** (
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, \1
+** |
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_z_untied, svfloat64_t, double,
+                z0 = svamin_n_f64_z (p0, z1, d4),
+                z0 = svamin_z (p0, z1, d4))
+
+/*
+** amin_0_f64_z_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_z_tied1, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z0, 0),
+               z0 = svamin_z (p0, z0, 0))
+
+/*
+** amin_0_f64_z_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_z_untied, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z1, 0),
+               z0 = svamin_z (p0, z1, 0))
+
+/*
+** amin_1_f64_z_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_z_tied1, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z0, 1),
+               z0 = svamin_z (p0, z0, 1))
+
+/*
+** amin_1_f64_z_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_z_untied, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z1, 1),
+               z0 = svamin_z (p0, z1, 1))
+
+/*
+** amin_2_f64_z:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_z, svfloat64_t,
+               z0 = svamin_n_f64_z (p0, z0, 2),
+               z0 = svamin_z (p0, z0, 2))
+
+/*
+** amin_f64_x_tied1:
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_x_tied1, svfloat64_t,
+               z0 = svamin_f64_x (p0, z0, z1),
+               z0 = svamin_x (p0, z0, z1))
+
+/*
+** amin_f64_x_tied2:
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_x_tied2, svfloat64_t,
+               z0 = svamin_f64_x (p0, z1, z0),
+               z0 = svamin_x (p0, z1, z0))
+
+/*
+** amin_f64_x_untied:
+** (
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z2\.d
+** |
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+** )
+**     ret
+*/
+TEST_UNIFORM_Z (amin_f64_x_untied, svfloat64_t,
+               z0 = svamin_f64_x (p0, z1, z2),
+               z0 = svamin_x (p0, z1, z2))
+
+/*
+** amin_d4_f64_x_tied1:
+**     mov     (z[0-9]+\.d), d4
+**     famin   z0\.d, p0/m, z0\.d, \1
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_x_tied1, svfloat64_t, double,
+                z0 = svamin_n_f64_x (p0, z0, d4),
+                z0 = svamin_x (p0, z0, d4))
+
+/*
+** amin_d4_f64_x_untied:
+**     mov     z0\.d, d4
+**     famin   z0\.d, p0/m, z0\.d, z1\.d
+**     ret
+*/
+TEST_UNIFORM_ZD (amin_d4_f64_x_untied, svfloat64_t, double,
+                z0 = svamin_n_f64_x (p0, z1, d4),
+                z0 = svamin_x (p0, z1, d4))
+
+/*
+** amin_0_f64_x_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z0, 0),
+               z0 = svamin_x (p0, z0, 0))
+
+/*
+** amin_0_f64_x_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_0_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z1, 0),
+               z0 = svamin_x (p0, z1, 0))
+
+/*
+** amin_1_f64_x_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z0, 1),
+               z0 = svamin_x (p0, z0, 1))
+
+/*
+** amin_1_f64_x_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_1_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z1, 1),
+               z0 = svamin_x (p0, z1, 1))
+
+/*
+** amin_2_f64_x_tied1:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z0, 2),
+               z0 = svamin_x (p0, z0, 2))
+
+/*
+** amin_2_f64_x_untied:
+**     ...
+**     famin   z0\.d, p0/m, z0\.d, z[0-9]+\.d
+**     ret
+*/
+TEST_UNIFORM_Z (amin_2_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (p0, z1, 2),
+               z0 = svamin_x (p0, z1, 2))
+
+/*
+** ptrue_amin_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f64_x_tied1, svfloat64_t,
+               z0 = svamin_f64_x (svptrue_b64 (), z0, z1),
+               z0 = svamin_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_amin_f64_x_tied2:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f64_x_tied2, svfloat64_t,
+               z0 = svamin_f64_x (svptrue_b64 (), z1, z0),
+               z0 = svamin_x (svptrue_b64 (), z1, z0))
+
+/*
+** ptrue_amin_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_f64_x_untied, svfloat64_t,
+               z0 = svamin_f64_x (svptrue_b64 (), z1, z2),
+               z0 = svamin_x (svptrue_b64 (), z1, z2))
+
+/*
+** ptrue_amin_0_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z0, 0),
+               z0 = svamin_x (svptrue_b64 (), z0, 0))
+
+/*
+** ptrue_amin_0_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_0_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z1, 0),
+               z0 = svamin_x (svptrue_b64 (), z1, 0))
+
+/*
+** ptrue_amin_1_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z0, 1),
+               z0 = svamin_x (svptrue_b64 (), z0, 1))
+
+/*
+** ptrue_amin_1_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_1_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z1, 1),
+               z0 = svamin_x (svptrue_b64 (), z1, 1))
+
+/*
+** ptrue_amin_2_f64_x_tied1:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f64_x_tied1, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z0, 2),
+               z0 = svamin_x (svptrue_b64 (), z0, 2))
+
+/*
+** ptrue_amin_2_f64_x_untied:
+**     ...
+**     ptrue   p[0-9]+\.b[^\n]*
+**     ...
+**     ret
+*/
+TEST_UNIFORM_Z (ptrue_amin_2_f64_x_untied, svfloat64_t,
+               z0 = svamin_n_f64_x (svptrue_b64 (), z1, 2),
+               z0 = svamin_x (svptrue_b64 (), z1, 2))

Reply via email to