https://gcc.gnu.org/g:8682fcbf3ced5a415d3ff9a27d6c1fa0392bb187

commit r15-6949-g8682fcbf3ced5a415d3ff9a27d6c1fa0392bb187
Author: Liao Shihua <shi...@iscas.ac.cn>
Date:   Fri Dec 13 20:38:29 2024 +0800

    RISC-V: Update Xsfvqmacc and Xsfvfnrclip's testcases
    
    Update Sifive Xsfvqmacc and Xsfvfnrclip extension's testcases.
    
    version log:
            Update synchronize LMUL settings with return type.
    
    gcc/ChangeLog:
    
            * config/riscv/vector.md: New attr set.
    
    gcc/testsuite/ChangeLog:
    
            * gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c: Add vsetivli 
checking.
            * gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c: Ditto.
            * gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c: Ditto.
            * gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c: Ditto.
            * gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c: Ditto.
            * gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c: Ditto.
            * gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c: Ditto.
            * gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c: Ditto.
            * gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c: Ditto.
            * gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c: Ditto.

Diff:
---
 gcc/config/riscv/vector.md                         |  7 +--
 .../riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c       | 60 +++++++++++++++++++++
 .../riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c      | 63 +++++++++++++++++++++-
 .../riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c          | 16 ++++++
 .../riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c          | 16 ++++++
 .../riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c        | 17 ++++++
 .../riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c        | 17 ++++++
 .../riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c         | 16 ++++++
 .../riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c         | 17 ++++++
 .../riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c        | 17 ++++++
 .../riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c        | 17 ++++++
 11 files changed, 259 insertions(+), 4 deletions(-)

diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 4658db2653fe..243f3f76d28a 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -56,7 +56,8 @@
                          
vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vcpop,vclz,vctz,vrol,\
                          
vror,vwsll,vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
                          
vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,\
-                         vfncvtbf16,vfwcvtbf16,vfwmaccbf16")
+                         vfncvtbf16,vfwcvtbf16,vfwmaccbf16,\
+                         sf_vqmacc,sf_vfnrclip")
         (const_string "true")]
        (const_string "false")))
 
@@ -893,7 +894,7 @@
                          
vfredo,vfwredu,vfwredo,vslideup,vslidedown,vislide1up,\
                          
vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
                          
vlsegds,vlsegdux,vlsegdox,vandn,vrol,vror,vwsll,vclmul,vclmulh,\
-                         vfwmaccbf16")
+                         vfwmaccbf16,sf_vqmacc,sf_vfnrclip")
           (symbol_ref "riscv_vector::get_ta(operands[6])")
 
         (eq_attr "type" "vimuladd,vfmuladd")
@@ -924,7 +925,7 @@
                          vfwalu,vfwmul,vfsgnj,vfcmp,vslideup,vslidedown,\
                          
vislide1up,vislide1down,vfslide1up,vfslide1down,vgather,\
                          
viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox,vandn,vrol,\
-                          vror,vwsll,vclmul,vclmulh,vfwmaccbf16")
+                         
vror,vwsll,vclmul,vclmulh,vfwmaccbf16,sf_vqmacc,sf_vfnrclip")
           (symbol_ref "riscv_vector::get_ma(operands[7])")
 
         (eq_attr "type" "vimuladd,vfmuladd")
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c
index 813f7860f645..a4193b5aea97 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf8_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -17,6 +18,7 @@ vint8mf8_t 
test_sf_vfnrclip_x_f_qf_i8mf8_vint8mf8_t(vfloat32mf2_t vs2, float rs1
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf4_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -27,6 +29,7 @@ vint8mf4_t 
test_sf_vfnrclip_x_f_qf_i8mf4_vint8mf4_t(vfloat32m1_t vs2, float rs1,
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf2_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -37,6 +40,7 @@ vint8mf2_t 
test_sf_vfnrclip_x_f_qf_i8mf2_vint8mf2_t(vfloat32m2_t vs2, float rs1,
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m1_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -47,6 +51,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_vint8m1_t(vfloat32m4_t 
vs2, float rs1, si
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m2_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -57,6 +62,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_vint8m2_t(vfloat32m8_t 
vs2, float rs1, si
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf8_m_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -67,6 +73,7 @@ vint8mf8_t 
test_sf_vfnrclip_x_f_qf_i8mf8_m_vint8mf8_t(vbool64_t mask, vfloat32mf
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf4_m_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -77,6 +84,7 @@ vint8mf4_t 
test_sf_vfnrclip_x_f_qf_i8mf4_m_vint8mf4_t(vbool32_t mask, vfloat32m1
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf2_m_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -87,6 +95,7 @@ vint8mf2_t 
test_sf_vfnrclip_x_f_qf_i8mf2_m_vint8mf2_t(vbool16_t mask, vfloat32m2
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m1_m_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -97,6 +106,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m_vint8m1_t(vbool8_t 
mask, vfloat32m4_t v
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m2_m_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -107,6 +117,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m_vint8m2_t(vbool4_t 
mask, vfloat32m8_t v
 /*
 ** test_sf_vfnrclip_x_f_qf_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -117,6 +128,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_vint8mf8_t(vfloat32mf2_t 
vs2, float rs1, size
 /*
 ** test_sf_vfnrclip_x_f_qf_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -127,6 +139,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_vint8mf4_t(vfloat32m1_t 
vs2, float rs1, size_
 /*
 ** test_sf_vfnrclip_x_f_qf_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -137,6 +150,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_vint8mf2_t(vfloat32m2_t 
vs2, float rs1, size_
 /*
 ** test_sf_vfnrclip_x_f_qf_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -147,6 +161,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_vint8m1_t(vfloat32m4_t 
vs2, float rs1, size_t
 /*
 ** test_sf_vfnrclip_x_f_qf_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -157,6 +172,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_vint8m2_t(vfloat32m8_t 
vs2, float rs1, size_t
 /*
 ** test_sf_vfnrclip_x_f_qf_mask_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -167,6 +183,7 @@ vint8mf8_t 
test_sf_vfnrclip_x_f_qf_mask_vint8mf8_t(vbool64_t mask, vfloat32mf2_t
 /*
 ** test_sf_vfnrclip_x_f_qf_mask_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -177,6 +194,7 @@ vint8mf4_t 
test_sf_vfnrclip_x_f_qf_mask_vint8mf4_t(vbool32_t mask, vfloat32m1_t
 /*
 ** test_sf_vfnrclip_x_f_qf_mask_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -187,6 +205,7 @@ vint8mf2_t 
test_sf_vfnrclip_x_f_qf_mask_vint8mf2_t(vbool16_t mask, vfloat32m2_t
 /*
 ** test_sf_vfnrclip_x_f_qf_mask_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -197,6 +216,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_mask_vint8m1_t(vbool8_t 
mask, vfloat32m4_t vs2
 /*
 ** test_sf_vfnrclip_x_f_qf_mask_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -207,6 +227,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_mask_vint8m2_t(vbool4_t 
mask,vfloat32m8_t vs2,
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf8_tu_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -217,6 +238,7 @@ vint8mf8_t 
test_sf_vfnrclip_x_f_qf_i8mf8_tu_vint8mf8_t(vint8mf8_t maskedoff, vfl
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf4_tu_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -227,6 +249,7 @@ vint8mf4_t 
test_sf_vfnrclip_x_f_qf_i8mf4_tu_vint8mf4_t(vint8mf4_t maskedoff, vfl
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf2_tu_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -237,6 +260,7 @@ vint8mf2_t 
test_sf_vfnrclip_x_f_qf_i8mf2_tu_vint8mf2_t(vint8mf2_t maskedoff, vfl
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m1_tu_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -247,6 +271,7 @@ vint8m1_t 
test_sf_vfnrclip_x_f_qf_i8m1_tu_vint8m1_t(vint8m1_t maskedoff, vfloat3
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m2_tu_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -257,6 +282,7 @@ vint8m2_t 
test_sf_vfnrclip_x_f_qf_i8m2_tu_vint8m2_t(vint8m2_t maskedoff, vfloat3
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf8_tum_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -267,6 +293,7 @@ vint8mf8_t 
test_sf_vfnrclip_x_f_qf_i8mf8_tum_vint8mf8_t(vbool64_t mask, vint8mf8
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf4_tum_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -277,6 +304,7 @@ vint8mf4_t 
test_sf_vfnrclip_x_f_qf_i8mf4_tum_vint8mf4_t(vbool32_t mask, vint8mf4
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf2_tum_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -287,6 +315,7 @@ vint8mf2_t 
test_sf_vfnrclip_x_f_qf_i8mf2_tum_vint8mf2_t(vbool16_t mask, vint8mf2
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m1_tum_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -297,6 +326,7 @@ vint8m1_t 
test_sf_vfnrclip_x_f_qf_i8m1_tum_vint8m1_t(vbool8_t mask, vint8m1_t ma
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m2_tum_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -307,6 +337,7 @@ vint8m2_t 
test_sf_vfnrclip_x_f_qf_i8m2_tum_vint8m2_t(vbool4_t mask, vint8m2_t ma
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf8_tumu_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -317,6 +348,7 @@ vint8mf8_t 
test_sf_vfnrclip_x_f_qf_i8mf8_tumu_vint8mf8_t(vbool64_t mask, vint8mf
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf4_tumu_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -327,6 +359,7 @@ vint8mf4_t 
test_sf_vfnrclip_x_f_qf_i8mf4_tumu_vint8mf4_t(vbool32_t mask, vint8mf
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf2_tumu_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -337,6 +370,7 @@ vint8mf2_t 
test_sf_vfnrclip_x_f_qf_i8mf2_tumu_vint8mf2_t(vbool16_t mask, vint8mf
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m1_tumu_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -347,6 +381,7 @@ vint8m1_t 
test_sf_vfnrclip_x_f_qf_i8m1_tumu_vint8m1_t(vbool8_t mask, vint8m1_t m
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m2_tumu_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -357,6 +392,7 @@ vint8m2_t 
test_sf_vfnrclip_x_f_qf_i8m2_tumu_vint8m2_t(vbool4_t mask, vint8m2_t m
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf8_mu_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -367,6 +403,7 @@ vint8mf8_t 
test_sf_vfnrclip_x_f_qf_i8mf8_mu_vint8mf8_t(vbool64_t mask, vint8mf8_
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf4_mu_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -377,6 +414,7 @@ vint8mf4_t 
test_sf_vfnrclip_x_f_qf_i8mf4_mu_vint8mf4_t(vbool32_t mask, vint8mf4_
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf2_mu_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -387,6 +425,7 @@ vint8mf2_t 
test_sf_vfnrclip_x_f_qf_i8mf2_mu_vint8mf2_t(vbool16_t mask, vint8mf2_
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m1_mu_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -397,6 +436,7 @@ vint8m1_t 
test_sf_vfnrclip_x_f_qf_i8m1_mu_vint8m1_t(vbool8_t mask, vint8m1_t mas
 /*
 ** test_sf_vfnrclip_x_f_qf_i8m2_mu_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -407,6 +447,7 @@ vint8m2_t 
test_sf_vfnrclip_x_f_qf_i8m2_mu_vint8m2_t(vbool4_t mask, vint8m2_t mas
 /*
 ** test_sf_vfnrclip_x_f_qf_tu_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -417,6 +458,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_tu_vint8mf8_t(vint8mf8_t 
maskedoff, vfloat32m
 /*
 ** test_sf_vfnrclip_x_f_qf_tu_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -427,6 +469,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_tu_vint8mf4_t(vint8mf4_t 
maskedoff, vfloat32m
 /*
 ** test_sf_vfnrclip_x_f_qf_tu_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -437,6 +480,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_tu_vint8mf2_t(vint8mf2_t 
maskedoff, vfloat32m
 /*
 ** test_sf_vfnrclip_x_f_qf_tu_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -447,6 +491,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_tu_vint8m1_t(vint8m1_t 
maskedoff, vfloat32m4_t
 /*
 ** test_sf_vfnrclip_x_f_qf_tu_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -457,6 +502,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_tu_vint8m2_t(vint8m2_t 
maskedoff, vfloat32m8_t
 /*
 ** test_sf_vfnrclip_x_f_qf_tum_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -467,6 +513,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_tum_vint8mf8_t(vbool64_t 
mask, vint8mf8_t mas
 /*
 ** test_sf_vfnrclip_x_f_qf_tum_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -477,6 +524,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_tum_vint8mf4_t(vbool32_t 
mask, vint8mf4_t mas
 /*
 ** test_sf_vfnrclip_x_f_qf_tum_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -487,6 +535,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_tum_vint8mf2_t(vbool16_t 
mask, vint8mf2_t mas
 /*
 ** test_sf_vfnrclip_x_f_qf_tum_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -497,6 +546,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_tum_vint8m1_t(vbool8_t 
mask, vint8m1_t maskedo
 /*
 ** test_sf_vfnrclip_x_f_qf_tum_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -508,6 +558,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_tum_vint8m2_t(vbool4_t 
mask, vint8m2_t maskedo
 /*
 ** test_sf_vfnrclip_x_f_qf_tumu_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -518,6 +569,7 @@ vint8mf8_t 
test_sf_vfnrclip_x_f_qf_tumu_vint8mf8_t(vbool64_t mask, vint8mf8_t ma
 /*
 ** test_sf_vfnrclip_x_f_qf_i8mf4_tumu_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -528,6 +580,7 @@ vint8mf4_t 
test_sf_vfnrclip_x_f_qf_tumu_vint8mf4_t(vbool32_t mask, vint8mf4_t ma
 /*
 ** test_sf_vfnrclip_x_f_qf_tumu_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -538,6 +591,7 @@ vint8mf2_t 
test_sf_vfnrclip_x_f_qf_tumu_vint8mf2_t(vbool16_t mask, vint8mf2_t ma
 /*
 ** test_sf_vfnrclip_x_f_qf_tumu_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -548,6 +602,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_tumu_vint8m1_t(vbool8_t 
mask, vint8m1_t masked
 /*
 ** test_sf_vfnrclip_x_f_qf_tumu_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -558,6 +613,7 @@ vint8m2_t test_sf_vfnrclip_x_f_qf_tumu_vint8m2_t(vbool4_t 
mask, vint8m2_t masked
 /*
 ** test_sf_vfnrclip_x_f_qf_mu_vint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -568,6 +624,7 @@ vint8mf8_t test_sf_vfnrclip_x_f_qf_mu_vint8mf8_t(vbool64_t 
mask, vint8mf8_t mask
 /*
 ** test_sf_vfnrclip_x_f_qf_mu_vint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -578,6 +635,7 @@ vint8mf4_t test_sf_vfnrclip_x_f_qf_mu_vint8mf4_t(vbool32_t 
mask, vint8mf4_t mask
 /*
 ** test_sf_vfnrclip_x_f_qf_mu_vint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -588,6 +646,7 @@ vint8mf2_t test_sf_vfnrclip_x_f_qf_mu_vint8mf2_t(vbool16_t 
mask, vint8mf2_t mask
 /*
 ** test_sf_vfnrclip_x_f_qf_mu_vint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -598,6 +657,7 @@ vint8m1_t test_sf_vfnrclip_x_f_qf_mu_vint8m1_t(vbool8_t 
mask, vint8m1_t maskedof
 /*
 ** test_sf_vfnrclip_x_f_qf_mu_vint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,mu+
 ** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c
index f5a22966a995..c126746d5812 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf8_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -17,6 +18,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_u8mf8_vuint8mf8_t(vfloat32mf2_t vs2, float
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf4_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -27,6 +29,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_u8mf4_vuint8mf4_t(vfloat32m1_t vs2, float r
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf2_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -37,6 +40,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_u8mf2_vuint8mf2_t(vfloat32m2_t vs2, float r
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m1_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -47,6 +51,7 @@ vuint8m1_t 
test_sf_vfnrclip_xu_f_qf_u8m1_vuint8m1_t(vfloat32m4_t vs2, float rs1,
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m2_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -57,6 +62,7 @@ vuint8m2_t 
test_sf_vfnrclip_xu_f_qf_u8m2_vuint8m2_t(vfloat32m8_t vs2, float rs1,
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf8_m_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -67,6 +73,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_u8mf8_m_vuint8mf8_t(vbool64_t mask, vfloat3
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf4_m_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -77,6 +84,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_u8mf4_m_vuint8mf4_t(vbool32_t mask, vfloat3
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf2_m_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -87,6 +95,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_u8mf2_m_vuint8mf2_t(vbool16_t mask, vfloat3
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m1_m_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -97,6 +106,7 @@ vuint8m1_t 
test_sf_vfnrclip_xu_f_qf_u8m1_m_vuint8m1_t(vbool8_t mask, vfloat32m4_
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m2_m_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -107,6 +117,7 @@ vuint8m2_t 
test_sf_vfnrclip_xu_f_qf_u8m2_m_vuint8m2_t(vbool4_t mask, vfloat32m8_
 /*
 ** test_sf_vfnrclip_xu_f_qf_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -117,6 +128,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_vuint8mf8_t(vfloat32mf2_t vs2, float rs1, s
 /*
 ** test_sf_vfnrclip_xu_f_qf_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -127,6 +139,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_vuint8mf4_t(vfloat32m1_t vs2, float rs1, si
 /*
 ** test_sf_vfnrclip_xu_f_qf_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -137,6 +150,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_vuint8mf2_t(vfloat32m2_t vs2, float rs1, si
 /*
 ** test_sf_vfnrclip_xu_f_qf_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -147,6 +161,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_vuint8m1_t(vfloat32m4_t 
vs2, float rs1, size
 /*
 ** test_sf_vfnrclip_xu_f_qf_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -157,6 +172,7 @@ vuint8m2_t test_sf_vfnrclip_xu_f_qf_vuint8m2_t(vfloat32m8_t 
vs2, float rs1, size
 /*
 ** test_sf_vfnrclip_xu_f_qf_mask_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -167,6 +183,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_mask_vuint8mf8_t(vbool64_t mask, vfloat32mf
 /*
 ** test_sf_vfnrclip_xu_f_qf_mask_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -177,6 +194,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_mask_vuint8mf4_t(vbool32_t mask, vfloat32m1
 /*
 ** test_sf_vfnrclip_xu_f_qf_mask_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -187,6 +205,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_mask_vuint8mf2_t(vbool16_t mask, vfloat32m2
 /*
 ** test_sf_vfnrclip_xu_f_qf_mask_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -197,16 +216,18 @@ vuint8m1_t 
test_sf_vfnrclip_xu_f_qf_mask_vuint8m1_t(vbool8_t mask, vfloat32m4_t
 /*
 ** test_sf_vfnrclip_xu_f_qf_mask_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
-vuint8m2_t test_sf_vfnrclip_xu_f_qf_mask_vuint8m2_t(vbool4_t mask, 
vfloat32m8_t vs2, float rs1, size_t vl) {
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_mask_vuint8m2_t(vbool4_t mask,vfloat32m8_t 
vs2, float rs1, size_t vl) {
     return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
 }
 
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf8_tu_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -217,6 +238,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_u8mf8_tu_vuint8mf8_t(vuint8mf8_t maskedoff,
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf4_tu_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -227,6 +249,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_u8mf4_tu_vuint8mf4_t(vuint8mf4_t maskedoff,
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf2_tu_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -237,6 +260,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_u8mf2_tu_vuint8mf2_t(vuint8mf2_t maskedoff,
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m1_tu_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -247,6 +271,7 @@ vuint8m1_t 
test_sf_vfnrclip_xu_f_qf_u8m1_tu_vuint8m1_t(vuint8m1_t maskedoff, vfl
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m2_tu_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -257,6 +282,7 @@ vuint8m2_t 
test_sf_vfnrclip_xu_f_qf_u8m2_tu_vuint8m2_t(vuint8m2_t maskedoff, vfl
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf8_tum_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -267,6 +293,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_u8mf8_tum_vuint8mf8_t(vbool64_t mask, vuint
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf4_tum_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -277,6 +304,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_u8mf4_tum_vuint8mf4_t(vbool32_t mask, vuint
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf2_tum_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -287,6 +315,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_u8mf2_tum_vuint8mf2_t(vbool16_t mask, vuint
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m1_tum_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -297,6 +326,7 @@ vuint8m1_t 
test_sf_vfnrclip_xu_f_qf_u8m1_tum_vuint8m1_t(vbool8_t mask, vuint8m1_
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m2_tum_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -307,6 +337,7 @@ vuint8m2_t 
test_sf_vfnrclip_xu_f_qf_u8m2_tum_vuint8m2_t(vbool4_t mask, vuint8m2_
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf8_tumu_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -317,6 +348,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_u8mf8_tumu_vuint8mf8_t(vbool64_t mask, vuin
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf4_tumu_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -327,6 +359,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_u8mf4_tumu_vuint8mf4_t(vbool32_t mask, vuin
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf2_tumu_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -337,6 +370,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_u8mf2_tumu_vuint8mf2_t(vbool16_t mask, vuin
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m1_tumu_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -347,6 +381,7 @@ vuint8m1_t 
test_sf_vfnrclip_xu_f_qf_u8m1_tumu_vuint8m1_t(vbool8_t mask, vuint8m1
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m2_tumu_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -357,6 +392,7 @@ vuint8m2_t 
test_sf_vfnrclip_xu_f_qf_u8m2_tumu_vuint8m2_t(vbool4_t mask, vuint8m2
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf8_mu_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -367,6 +403,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_u8mf8_mu_vuint8mf8_t(vbool64_t mask, vuint8
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf4_mu_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -377,6 +414,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_u8mf4_mu_vuint8mf4_t(vbool32_t mask, vuint8
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf2_mu_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -387,6 +425,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_u8mf2_mu_vuint8mf2_t(vbool16_t mask, vuint8
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m1_mu_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -397,6 +436,7 @@ vuint8m1_t 
test_sf_vfnrclip_xu_f_qf_u8m1_mu_vuint8m1_t(vbool8_t mask, vuint8m1_t
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8m2_mu_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -407,6 +447,7 @@ vuint8m2_t 
test_sf_vfnrclip_xu_f_qf_u8m2_mu_vuint8m2_t(vbool4_t mask, vuint8m2_t
 /*
 ** test_sf_vfnrclip_xu_f_qf_tu_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -417,6 +458,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_tu_vuint8mf8_t(vuint8mf8_t maskedoff, vfloa
 /*
 ** test_sf_vfnrclip_xu_f_qf_tu_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -427,6 +469,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_tu_vuint8mf4_t(vuint8mf4_t maskedoff, vfloa
 /*
 ** test_sf_vfnrclip_xu_f_qf_tu_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -437,6 +480,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_tu_vuint8mf2_t(vuint8mf2_t maskedoff, vfloa
 /*
 ** test_sf_vfnrclip_xu_f_qf_tu_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -447,6 +491,7 @@ vuint8m1_t 
test_sf_vfnrclip_xu_f_qf_tu_vuint8m1_t(vuint8m1_t maskedoff, vfloat32
 /*
 ** test_sf_vfnrclip_xu_f_qf_tu_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+
 ** ...
 */
@@ -457,6 +502,7 @@ vuint8m2_t 
test_sf_vfnrclip_xu_f_qf_tu_vuint8m2_t(vuint8m2_t maskedoff, vfloat32
 /*
 ** test_sf_vfnrclip_xu_f_qf_tum_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -467,6 +513,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_tum_vuint8mf8_t(vbool64_t mask, vuint8mf8_t
 /*
 ** test_sf_vfnrclip_xu_f_qf_tum_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -477,6 +524,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_tum_vuint8mf4_t(vbool32_t mask, vuint8mf4_t
 /*
 ** test_sf_vfnrclip_xu_f_qf_tum_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -487,6 +535,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_tum_vuint8mf2_t(vbool16_t mask, vuint8mf2_t
 /*
 ** test_sf_vfnrclip_xu_f_qf_tum_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -497,6 +546,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_tum_vuint8m1_t(vbool8_t 
mask, vuint8m1_t mas
 /*
 ** test_sf_vfnrclip_xu_f_qf_tum_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,ma+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -504,9 +554,11 @@ vuint8m2_t 
test_sf_vfnrclip_xu_f_qf_tum_vuint8m2_t(vbool4_t mask, vuint8m2_t mas
     return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
 }
 
+
 /*
 ** test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -517,6 +569,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf8_t(vbool64_t mask, vuint8mf8_
 /*
 ** test_sf_vfnrclip_xu_f_qf_u8mf4_tumu_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -527,6 +580,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf4_t(vbool32_t mask, vuint8mf4_
 /*
 ** test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -537,6 +591,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_tumu_vuint8mf2_t(vbool16_t mask, vuint8mf2_
 /*
 ** test_sf_vfnrclip_xu_f_qf_tumu_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -547,6 +602,7 @@ vuint8m1_t 
test_sf_vfnrclip_xu_f_qf_tumu_vuint8m1_t(vbool8_t mask, vuint8m1_t ma
 /*
 ** test_sf_vfnrclip_xu_f_qf_tumu_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,tu+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -557,6 +613,7 @@ vuint8m2_t 
test_sf_vfnrclip_xu_f_qf_tumu_vuint8m2_t(vbool4_t mask, vuint8m2_t ma
 /*
 ** test_sf_vfnrclip_xu_f_qf_mu_vuint8mf8_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf8+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -567,6 +624,7 @@ vuint8mf8_t 
test_sf_vfnrclip_xu_f_qf_mu_vuint8mf8_t(vbool64_t mask, vuint8mf8_t
 /*
 ** test_sf_vfnrclip_xu_f_qf_mu_vuint8mf4_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf4+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -577,6 +635,7 @@ vuint8mf4_t 
test_sf_vfnrclip_xu_f_qf_mu_vuint8mf4_t(vbool32_t mask, vuint8mf4_t
 /*
 ** test_sf_vfnrclip_xu_f_qf_mu_vuint8mf2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,mf2+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -587,6 +646,7 @@ vuint8mf2_t 
test_sf_vfnrclip_xu_f_qf_mu_vuint8mf2_t(vbool16_t mask, vuint8mf2_t
 /*
 ** test_sf_vfnrclip_xu_f_qf_mu_vuint8m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m1+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
@@ -597,6 +657,7 @@ vuint8m1_t test_sf_vfnrclip_xu_f_qf_mu_vuint8m1_t(vbool8_t 
mask, vuint8m1_t mask
 /*
 ** test_sf_vfnrclip_xu_f_qf_mu_vuint8m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e8+,m2+,ta+,mu+
 ** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t
 ** ...
 */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c
index f2058a14779b..6bb659b5d233 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vqmacc_2x8x2_i32m1_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -20,6 +21,7 @@ test_sf_vqmacc_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_2x8x2_i32m2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -33,6 +35,7 @@ test_sf_vqmacc_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_2x8x2_i32m4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -46,6 +49,7 @@ test_sf_vqmacc_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_2x8x2_i32m8_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -59,6 +63,7 @@ test_sf_vqmacc_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_2x8x2_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -72,6 +77,7 @@ test_sf_vqmacc_2x8x2_vint32m1_t (vint32m1_t vd, vint8m1_t 
vs1, vint8m1_t vs2,
 /*
 ** test_sf_vqmacc_2x8x2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -85,6 +91,7 @@ test_sf_vqmacc_2x8x2_vint32m2_t (vint32m2_t vd, vint8m1_t 
vs1, vint8m2_t vs2,
 /*
 ** test_sf_vqmacc_2x8x2_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -98,6 +105,7 @@ test_sf_vqmacc_2x8x2_vint32m4_t (vint32m4_t vd, vint8m1_t 
vs1, vint8m4_t vs2,
 /*
 ** test_sf_vqmacc_2x8x2_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -111,6 +119,7 @@ test_sf_vqmacc_2x8x2_vint32m8_t (vint32m8_t vd, vint8m1_t 
vs1, vint8m8_t vs2,
 /*
 ** test_sf_vqmacc_2x8x2_i32m1_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -124,6 +133,7 @@ test_sf_vqmacc_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_2x8x2_i32m2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -137,6 +147,7 @@ test_sf_vqmacc_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_2x8x2_i32m4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -150,6 +161,7 @@ test_sf_vqmacc_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_2x8x2_i32m8_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -163,6 +175,7 @@ test_sf_vqmacc_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_2x8x2_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -176,6 +189,7 @@ test_sf_vqmacc_2x8x2_tu_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1, vint8m1_t vs2,
 /*
 ** test_sf_vqmacc_2x8x2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -189,6 +203,7 @@ test_sf_vqmacc_2x8x2_tu_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1, vint8m2_t vs2,
 /*
 ** test_sf_vqmacc_2x8x2_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -202,6 +217,7 @@ test_sf_vqmacc_2x8x2_tu_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1, vint8m4_t vs2,
 /*
 ** test_sf_vqmacc_2x8x2_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c
index 3bd6f1c273cd..8106d0dbbaba 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vqmacc_4x8x4_i32m1_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -20,6 +21,7 @@ test_sf_vqmacc_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_4x8x4_i32m2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -33,6 +35,7 @@ test_sf_vqmacc_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_4x8x4_i32m4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -46,6 +49,7 @@ test_sf_vqmacc_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_4x8x4_i32m8_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -59,6 +63,7 @@ test_sf_vqmacc_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_4x8x4_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -72,6 +77,7 @@ test_sf_vqmacc_4x8x4_vint32m1_t (vint32m1_t vd, vint8m1_t 
vs1, vint8mf2_t vs2,
 /*
 ** test_sf_vqmacc_4x8x4_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -85,6 +91,7 @@ test_sf_vqmacc_4x8x4_vint32m2_t (vint32m2_t vd, vint8m1_t 
vs1, vint8m1_t vs2,
 /*
 ** test_sf_vqmacc_4x8x4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -98,6 +105,7 @@ test_sf_vqmacc_4x8x4_vint32m4_t (vint32m4_t vd, vint8m1_t 
vs1, vint8m2_t vs2,
 /*
 ** test_sf_vqmacc_4x8x4_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -111,6 +119,7 @@ test_sf_vqmacc_4x8x4_vint32m8_t (vint32m8_t vd, vint8m1_t 
vs1, vint8m4_t vs2,
 /*
 ** test_sf_vqmacc_4x8x4_i32m1_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -124,6 +133,7 @@ test_sf_vqmacc_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_4x8x4_i32m2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -137,6 +147,7 @@ test_sf_vqmacc_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_4x8x4_i32m4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -150,6 +161,7 @@ test_sf_vqmacc_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_4x8x4_i32m8_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -163,6 +175,7 @@ test_sf_vqmacc_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_4x8x4_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -176,6 +189,7 @@ test_sf_vqmacc_4x8x4_tu_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmacc_4x8x4_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -189,6 +203,7 @@ test_sf_vqmacc_4x8x4_tu_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1, vint8m1_t vs2,
 /*
 ** test_sf_vqmacc_4x8x4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -202,6 +217,7 @@ test_sf_vqmacc_4x8x4_tu_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1, vint8m2_t vs2,
 /*
 ** test_sf_vqmacc_4x8x4_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c
index 663c7634ebfa..c51b53f7b176 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vqmaccsu_2x8x2_i32m1_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -20,6 +21,7 @@ test_sf_vqmaccsu_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_i32m2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -33,6 +35,7 @@ test_sf_vqmaccsu_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_i32m4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -46,6 +49,7 @@ test_sf_vqmaccsu_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_i32m8_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -59,6 +63,7 @@ test_sf_vqmaccsu_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -72,6 +77,7 @@ test_sf_vqmaccsu_2x8x2_vint32m1_t (vint32m1_t vd, vint8m1_t 
vs1, vuint8m1_t vs2,
 /*
 ** test_sf_vqmaccsu_2x8x2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -85,6 +91,7 @@ test_sf_vqmaccsu_2x8x2_vint32m2_t (vint32m2_t vd, vint8m1_t 
vs1, vuint8m2_t vs2,
 /*
 ** test_sf_vqmaccsu_2x8x2_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -98,6 +105,7 @@ test_sf_vqmaccsu_2x8x2_vint32m4_t (vint32m4_t vd, vint8m1_t 
vs1, vuint8m4_t vs2,
 /*
 ** test_sf_vqmaccsu_2x8x2_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -111,6 +119,7 @@ test_sf_vqmaccsu_2x8x2_vint32m8_t (vint32m8_t vd, vint8m1_t 
vs1, vuint8m8_t vs2,
 /*
 ** test_sf_vqmaccsu_2x8x2_i32m1_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -124,6 +133,7 @@ test_sf_vqmaccsu_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_i32m2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -137,6 +147,7 @@ test_sf_vqmaccsu_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_i32m4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -150,6 +161,7 @@ test_sf_vqmaccsu_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_i32m8_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -163,6 +175,7 @@ test_sf_vqmaccsu_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -176,6 +189,7 @@ test_sf_vqmaccsu_2x8x2_tu_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -189,6 +203,7 @@ test_sf_vqmaccsu_2x8x2_tu_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -202,6 +217,7 @@ test_sf_vqmaccsu_2x8x2_tu_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_2x8x2_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -211,3 +227,4 @@ test_sf_vqmaccsu_2x8x2_tu_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 {
   return __riscv_sf_vqmaccsu_2x8x2_tu (vd, vs1, vs2, vl);
 }
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c
index 0554e5642533..6625af7886b3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vqmaccsu_4x8x4_i32m1_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -20,6 +21,7 @@ test_sf_vqmaccsu_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_i32m2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -33,6 +35,7 @@ test_sf_vqmaccsu_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_i32m4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -46,6 +49,7 @@ test_sf_vqmaccsu_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_i32m8_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -59,6 +63,7 @@ test_sf_vqmaccsu_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -72,6 +77,7 @@ test_sf_vqmaccsu_4x8x4_vint32m1_t (vint32m1_t vd, vint8m1_t 
vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -85,6 +91,7 @@ test_sf_vqmaccsu_4x8x4_vint32m2_t (vint32m2_t vd, vint8m1_t 
vs1, vuint8m1_t vs2,
 /*
 ** test_sf_vqmaccsu_4x8x4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -98,6 +105,7 @@ test_sf_vqmaccsu_4x8x4_vint32m4_t (vint32m4_t vd, vint8m1_t 
vs1, vuint8m2_t vs2,
 /*
 ** test_sf_vqmaccsu_4x8x4_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -111,6 +119,7 @@ test_sf_vqmaccsu_4x8x4_vint32m8_t (vint32m8_t vd, vint8m1_t 
vs1, vuint8m4_t vs2,
 /*
 ** test_sf_vqmaccsu_4x8x4_i32m1_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -124,6 +133,7 @@ test_sf_vqmaccsu_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_i32m2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -137,6 +147,7 @@ test_sf_vqmaccsu_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_i32m4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -150,6 +161,7 @@ test_sf_vqmaccsu_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_i32m8_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -163,6 +175,7 @@ test_sf_vqmaccsu_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -176,6 +189,7 @@ test_sf_vqmaccsu_4x8x4_tu_vint32m1_t (vint32m1_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -189,6 +203,7 @@ test_sf_vqmaccsu_4x8x4_tu_vint32m2_t (vint32m2_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -202,6 +217,7 @@ test_sf_vqmaccsu_4x8x4_tu_vint32m4_t (vint32m4_t vd, 
vint8m1_t vs1,
 /*
 ** test_sf_vqmaccsu_4x8x4_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -211,3 +227,4 @@ test_sf_vqmaccsu_4x8x4_tu_vint32m8_t (vint32m8_t vd, 
vint8m1_t vs1,
 {
   return __riscv_sf_vqmaccsu_4x8x4_tu (vd, vs1, vs2, vl);
 }
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c
index dd15cc2d5449..46cbc0c0b09a 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vqmaccu_2x8x2_i32m1_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -20,6 +21,7 @@ test_sf_vqmaccu_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_i32m2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -33,6 +35,7 @@ test_sf_vqmaccu_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_i32m4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -46,6 +49,7 @@ test_sf_vqmaccu_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_i32m8_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -59,6 +63,7 @@ test_sf_vqmaccu_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -72,6 +77,7 @@ test_sf_vqmaccu_2x8x2_vint32m1_t (vint32m1_t vd, vuint8m1_t 
vs1, vuint8m1_t vs2,
 /*
 ** test_sf_vqmaccu_2x8x2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -85,6 +91,7 @@ test_sf_vqmaccu_2x8x2_vint32m2_t (vint32m2_t vd, vuint8m1_t 
vs1, vuint8m2_t vs2,
 /*
 ** test_sf_vqmaccu_2x8x2_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -98,6 +105,7 @@ test_sf_vqmaccu_2x8x2_vint32m4_t (vint32m4_t vd, vuint8m1_t 
vs1, vuint8m4_t vs2,
 /*
 ** test_sf_vqmaccu_2x8x2_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -111,6 +119,7 @@ test_sf_vqmaccu_2x8x2_vint32m8_t (vint32m8_t vd, vuint8m1_t 
vs1, vuint8m8_t vs2,
 /*
 ** test_sf_vqmaccu_2x8x2_i32m1_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -124,6 +133,7 @@ test_sf_vqmaccu_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_i32m2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -137,6 +147,7 @@ test_sf_vqmaccu_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_i32m4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -150,6 +161,7 @@ test_sf_vqmaccu_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_i32m8_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -163,6 +175,7 @@ test_sf_vqmaccu_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -176,6 +189,7 @@ test_sf_vqmaccu_2x8x2_tu_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -189,6 +203,7 @@ test_sf_vqmaccu_2x8x2_tu_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -202,6 +217,7 @@ test_sf_vqmaccu_2x8x2_tu_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_2x8x2_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c
index c386b4ee79e4..fb20e460da8d 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vqmaccu_4x8x4_i32m1_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -20,6 +21,7 @@ test_sf_vqmaccu_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_i32m2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -33,6 +35,7 @@ test_sf_vqmaccu_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_i32m4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -46,6 +49,7 @@ test_sf_vqmaccu_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_i32m8_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -59,6 +63,7 @@ test_sf_vqmaccu_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -72,6 +77,7 @@ test_sf_vqmaccu_4x8x4_vint32m1_t (vint32m1_t vd, vuint8m1_t 
vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -85,6 +91,7 @@ test_sf_vqmaccu_4x8x4_vint32m2_t (vint32m2_t vd, vuint8m1_t 
vs1, vuint8m1_t vs2,
 /*
 ** test_sf_vqmaccu_4x8x4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -98,6 +105,7 @@ test_sf_vqmaccu_4x8x4_vint32m4_t (vint32m4_t vd, vuint8m1_t 
vs1, vuint8m2_t vs2,
 /*
 ** test_sf_vqmaccu_4x8x4_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -111,6 +119,7 @@ test_sf_vqmaccu_4x8x4_vint32m8_t (vint32m8_t vd, vuint8m1_t 
vs1, vuint8m4_t vs2,
 /*
 ** test_sf_vqmaccu_4x8x4_i32m1_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -124,6 +133,7 @@ test_sf_vqmaccu_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_i32m2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -137,6 +147,7 @@ test_sf_vqmaccu_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_i32m4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -150,6 +161,7 @@ test_sf_vqmaccu_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_i32m8_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -163,6 +175,7 @@ test_sf_vqmaccu_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -176,6 +189,7 @@ test_sf_vqmaccu_4x8x4_tu_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -189,6 +203,7 @@ test_sf_vqmaccu_4x8x4_tu_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -202,6 +217,7 @@ test_sf_vqmaccu_4x8x4_tu_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccu_4x8x4_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -211,3 +227,4 @@ test_sf_vqmaccu_4x8x4_tu_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 {
   return __riscv_sf_vqmaccu_4x8x4_tu (vd, vs1, vs2, vl);
 }
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c
index db1650eb6add..4a25b1a598a2 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vqmaccus_2x8x2_i32m1_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -20,6 +21,7 @@ test_sf_vqmaccus_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_i32m2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -33,6 +35,7 @@ test_sf_vqmaccus_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_i32m4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -46,6 +49,7 @@ test_sf_vqmaccus_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_i32m8_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -59,6 +63,7 @@ test_sf_vqmaccus_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -72,6 +77,7 @@ test_sf_vqmaccus_2x8x2_vint32m1_t (vint32m1_t vd, vuint8m1_t 
vs1, vint8m1_t vs2,
 /*
 ** test_sf_vqmaccus_2x8x2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -85,6 +91,7 @@ test_sf_vqmaccus_2x8x2_vint32m2_t (vint32m2_t vd, vuint8m1_t 
vs1, vint8m2_t vs2,
 /*
 ** test_sf_vqmaccus_2x8x2_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -98,6 +105,7 @@ test_sf_vqmaccus_2x8x2_vint32m4_t (vint32m4_t vd, vuint8m1_t 
vs1, vint8m4_t vs2,
 /*
 ** test_sf_vqmaccus_2x8x2_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -111,6 +119,7 @@ test_sf_vqmaccus_2x8x2_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1, vint8m8_t vs2,
 /*
 ** test_sf_vqmaccus_2x8x2_i32m1_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -124,6 +133,7 @@ test_sf_vqmaccus_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_i32m2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -137,6 +147,7 @@ test_sf_vqmaccus_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_i32m4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -150,6 +161,7 @@ test_sf_vqmaccus_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_i32m8_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -163,6 +175,7 @@ test_sf_vqmaccus_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -176,6 +189,7 @@ test_sf_vqmaccus_2x8x2_tu_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -189,6 +203,7 @@ test_sf_vqmaccus_2x8x2_tu_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -202,6 +217,7 @@ test_sf_vqmaccus_2x8x2_tu_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_2x8x2_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -211,3 +227,4 @@ test_sf_vqmaccus_2x8x2_tu_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 {
   return __riscv_sf_vqmaccus_2x8x2_tu (vd, vs1, vs2, vl);
 }
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c
index 5c5e1a043bcf..c82621cbe6ed 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c
@@ -7,6 +7,7 @@
 /*
 ** test_sf_vqmaccus_4x8x4_i32m1_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -20,6 +21,7 @@ test_sf_vqmaccus_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_i32m2_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -33,6 +35,7 @@ test_sf_vqmaccus_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_i32m4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -46,6 +49,7 @@ test_sf_vqmaccus_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_i32m8_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -59,6 +63,7 @@ test_sf_vqmaccus_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -72,6 +77,7 @@ test_sf_vqmaccus_4x8x4_vint32m1_t (vint32m1_t vd, vuint8m1_t 
vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -85,6 +91,7 @@ test_sf_vqmaccus_4x8x4_vint32m2_t (vint32m2_t vd, vuint8m1_t 
vs1, vint8m1_t vs2,
 /*
 ** test_sf_vqmaccus_4x8x4_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,ta,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -98,6 +105,7 @@ test_sf_vqmaccus_4x8x4_vint32m4_t (vint32m4_t vd, vuint8m1_t 
vs1, vint8m2_t vs2,
 /*
 ** test_sf_vqmaccus_4x8x4_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -111,6 +119,7 @@ test_sf_vqmaccus_4x8x4_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1, vint8m4_t vs2,
 /*
 ** test_sf_vqmaccus_4x8x4_i32m1_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -124,6 +133,7 @@ test_sf_vqmaccus_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_i32m2_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -137,6 +147,7 @@ test_sf_vqmaccus_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_i32m4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -150,6 +161,7 @@ test_sf_vqmaccus_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_i32m8_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -163,6 +175,7 @@ test_sf_vqmaccus_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_tu_vint32m1_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m1,tu,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -176,6 +189,7 @@ test_sf_vqmaccus_4x8x4_tu_vint32m1_t (vint32m1_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_tu_vint32m2_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m2,tu,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -189,6 +203,7 @@ test_sf_vqmaccus_4x8x4_tu_vint32m2_t (vint32m2_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_tu_vint32m4_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m4,tu,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -202,6 +217,7 @@ test_sf_vqmaccus_4x8x4_tu_vint32m4_t (vint32m4_t vd, 
vuint8m1_t vs1,
 /*
 ** test_sf_vqmaccus_4x8x4_tu_vint32m8_t:
 ** ...
+** vsetivli\s+zero+,0+,e32+,m8,tu,ma+
 ** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+
 ** ...
 */
@@ -211,3 +227,4 @@ test_sf_vqmaccus_4x8x4_tu_vint32m8_t (vint32m8_t vd, 
vuint8m1_t vs1,
 {
   return __riscv_sf_vqmaccus_4x8x4_tu (vd, vs1, vs2, vl);
 }
+

Reply via email to