We had a function called aarch64_vq_mode, where "vq" stood for "vector
quadword".  It was used by aarch64_simd_container_mode (from which it
originated) and in preparation for various SVE ...Q instructions.

It's useful for follow-on patches if we also split out the handling
of 64-bit modes from aarch64_simd_container_mode.  Keeping to the
same naming scheme would replace "q" with "d", but that has
unfortunate connotations, and doesn't AFAIK correspond to any
actual SVE mnemonics.

This patch therefore splits the handling out into a function called
aarch64_v64_mode and renames aarch64_vq_mode to aarch64_v128_mode for
consistency.  I didn't rename the "vq" local variables, since I think
those names make sense in context.

Bootstrapped & regression-tested on aarch64-linux-gnu.

This is a prerequisite for the comments I have about the LUTI support
(which was originally posted in stage 1).  I'll commit tomorrow if there
are no comments before then.

Richard


gcc/
        * config/aarch64/aarch64-protos.h (aarch64_v64_mode): Declare.
        (aarch64_vq_mode): Rename to...
        (aarch64_v128_mode): ...this.
        * config/aarch64/aarch64.cc (aarch64_v64_mode): New function,
        split out from...
        (aarch64_simd_container_mode): ...here.
        (aarch64_vq_mode): Rename to...
        (aarch64_v128_mode): ...this and update callers.
        * config/aarch64/aarch64-sve-builtins-base.cc: Likewise update calls.
---
 gcc/config/aarch64/aarch64-protos.h           |  3 +-
 .../aarch64/aarch64-sve-builtins-base.cc      |  8 +--
 gcc/config/aarch64/aarch64.cc                 | 52 +++++++++++--------
 3 files changed, 36 insertions(+), 27 deletions(-)

diff --git a/gcc/config/aarch64/aarch64-protos.h 
b/gcc/config/aarch64/aarch64-protos.h
index cad6e0b0a6f..8644d29a0a6 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -853,7 +853,8 @@ bool aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT 
val_in, machine_mode mode);
 int aarch64_branch_cost (bool, bool);
 enum aarch64_symbol_type aarch64_classify_symbolic_expression (rtx);
 bool aarch64_advsimd_struct_mode_p (machine_mode mode);
-opt_machine_mode aarch64_vq_mode (scalar_mode);
+opt_machine_mode aarch64_v64_mode (scalar_mode);
+opt_machine_mode aarch64_v128_mode (scalar_mode);
 opt_machine_mode aarch64_full_sve_mode (scalar_mode);
 bool aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode);
 bool aarch64_const_vec_all_same_int_p (rtx, HOST_WIDE_INT);
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc 
b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index b97941932ab..13e020b5345 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -996,7 +996,7 @@ private:
     tree lhs_type = TREE_TYPE (lhs);
     tree elt_type = TREE_TYPE (lhs_type);
     scalar_mode elt_mode = SCALAR_TYPE_MODE (elt_type);
-    machine_mode vq_mode = aarch64_vq_mode (elt_mode).require ();
+    machine_mode vq_mode = aarch64_v128_mode (elt_mode).require ();
     tree vq_type = build_vector_type_for_mode (elt_type, vq_mode);
 
     unsigned nargs = gimple_call_num_args (f.call);
@@ -1067,7 +1067,7 @@ public:
 
     /* Get the 128-bit Advanced SIMD vector for this data size.  */
     scalar_mode element_mode = GET_MODE_INNER (mode);
-    machine_mode vq_mode = aarch64_vq_mode (element_mode).require ();
+    machine_mode vq_mode = aarch64_v128_mode (element_mode).require ();
     gcc_assert (known_eq (elements_per_vq, GET_MODE_NUNITS (vq_mode)));
 
     /* Put the arguments into a 128-bit Advanced SIMD vector.  We want
@@ -1651,7 +1651,7 @@ public:
   machine_mode
   memory_vector_mode (const function_instance &fi) const override
   {
-    return aarch64_vq_mode (GET_MODE_INNER (fi.vector_mode (0))).require ();
+    return aarch64_v128_mode (GET_MODE_INNER (fi.vector_mode (0))).require ();
   }
 
   rtx
@@ -1685,7 +1685,7 @@ public:
        tree eltype = TREE_TYPE (lhs_type);
 
        scalar_mode elmode = GET_MODE_INNER (TYPE_MODE (lhs_type));
-       machine_mode vq_mode = aarch64_vq_mode (elmode).require ();
+       machine_mode vq_mode = aarch64_v128_mode (elmode).require ();
        tree vectype = build_vector_type_for_mode (eltype, vq_mode);
 
        tree elt_ptr_type
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index cc401befde4..43238aefef2 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -5737,7 +5737,7 @@ aarch64_expand_sve_const_vector (rtx target, rtx src)
         targets, the layout of the 128-bit vector in an Advanced SIMD
         register would be different from its layout in an SVE register,
         but this 128-bit vector is a memory value only.  */
-      machine_mode vq_mode = aarch64_vq_mode (elt_mode).require ();
+      machine_mode vq_mode = aarch64_v128_mode (elt_mode).require ();
       rtx vq_value = simplify_gen_subreg (vq_mode, src, mode, 0);
       if (vq_value && aarch64_expand_sve_ld1rq (target, vq_value))
        return target;
@@ -5749,7 +5749,7 @@ aarch64_expand_sve_const_vector (rtx target, rtx src)
         See if we can load them using an Advanced SIMD move and then
         duplicate it to fill a vector.  This is better than using a GPR
         move because it keeps everything in the same register file.  */
-      machine_mode vq_mode = aarch64_vq_mode (elt_mode).require ();
+      machine_mode vq_mode = aarch64_v128_mode (elt_mode).require ();
       rtx_vector_builder builder (vq_mode, npatterns, 1);
       for (unsigned int i = 0; i < npatterns; ++i)
        {
@@ -22509,10 +22509,34 @@ aarch64_full_sve_mode (scalar_mode mode)
     }
 }
 
+/* Return the 64-bit Advanced SIMD vector mode for element mode MODE,
+   if it exists.  */
+opt_machine_mode
+aarch64_v64_mode (scalar_mode mode)
+{
+  switch (mode)
+    {
+    case E_SFmode:
+      return V2SFmode;
+    case E_HFmode:
+      return V4HFmode;
+    case E_BFmode:
+      return V4BFmode;
+    case E_SImode:
+      return V2SImode;
+    case E_HImode:
+      return V4HImode;
+    case E_QImode:
+      return V8QImode;
+    default:
+      return {};
+    }
+}
+
 /* Return the 128-bit Advanced SIMD vector mode for element mode MODE,
    if it exists.  */
 opt_machine_mode
-aarch64_vq_mode (scalar_mode mode)
+aarch64_v128_mode (scalar_mode mode)
 {
   switch (mode)
     {
@@ -22551,25 +22575,9 @@ aarch64_simd_container_mode (scalar_mode mode, 
poly_int64 width)
   if (TARGET_BASE_SIMD)
     {
       if (known_eq (width, 128))
-       return aarch64_vq_mode (mode).else_mode (word_mode);
+       return aarch64_v128_mode (mode).else_mode (word_mode);
       else
-       switch (mode)
-         {
-         case E_SFmode:
-           return V2SFmode;
-         case E_HFmode:
-           return V4HFmode;
-         case E_BFmode:
-           return V4BFmode;
-         case E_SImode:
-           return V2SImode;
-         case E_HImode:
-           return V4HImode;
-         case E_QImode:
-           return V8QImode;
-         default:
-           break;
-         }
+       return aarch64_v64_mode (mode).else_mode (word_mode);
     }
   return word_mode;
 }
@@ -22629,7 +22637,7 @@ aarch64_preferred_simd_mode (scalar_mode mode)
   if (TARGET_SVE && aarch64_cmp_autovec_modes (VNx16QImode, V16QImode))
     return aarch64_full_sve_mode (mode).else_mode (word_mode);
   if (TARGET_SIMD)
-    return aarch64_vq_mode (mode).else_mode (word_mode);
+    return aarch64_v128_mode (mode).else_mode (word_mode);
   return word_mode;
 }
 
-- 
2.25.1

Reply via email to