This is the 4th path to enhance vec_extract on 64-bit power8/power9 machines.

This patch uses the load from memory support and the variable elment patch that
were part of the previous patches for vector long/vector double, and adds the
same support for vector float, vector int, vector short, and vector char.

I have tested these patches with bootstrap builds and running make check on:

    1) Big endian power7 (both -m32 and -m64 tests done)
    2) Big endian power8 (only -m64 tests were done)
    3) Little endian power8

There were no regressions.  Can I check these patches into the trunk?

One further optimization would be to add support for constant element extracts
if the vector is currently in GPRs rather than vector registers on 64-bit
systems.  I'm not sure if it would be a win in general, or if it would cause
the register allocators to generate more moves between the GPR and vector
register banks.

[gcc]
2016-08-01  Michael Meissner  <meiss...@linux.vnet.ibm.com>

        * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin):
        Add support for vec_extract on vector float, vector int, vector
        short, and vector char vector types.
        * config/rs6000/rs6000.c (rs6000_expand_vector_extract): Add
        vector float, vector int, vector short, and vector char
        optimizations on 64-bit ISA 2.07 systems for both constant and
        variable element numbers.
        (rs6000_split_vec_extract_var): Likewise.
        * config/rs6000/vsx.md (vsx_xscvspdp_scalar2): Allow SFmode to be
        Altivec registers on ISA 2.07 and above.
        (vsx_extract_v4sf): Delete alternative that hard coded element 0,
        which never was matched due to the split occuring before register
        allocation (and the code would not have worked on little endian
        systems if it did match).  Allow extracts to go to the Altivec
        registers if ISA 2.07 (power8).  Change from using "" around the
        C++ code to using {}'s.
        (vsx_extract_v4sf_<mode>_load): New insn to optimize vector float
        vec_extracts when the vector is in memory.
        (vsx_extract_v4sf_var): New insn to optimize vector float
        vec_extracts when the element number is variable on 64-bit ISA
        2.07 systems.
        (vsx_extract_<mode>, VSX_EXTRACT_I iterator): Add optimizations
        for 64-bit ISA 2.07 as well as ISA 3.0.
        (vsx_extract_<mode>_p9, VSX_EXTRACT_I iterator): Likewise.
        (vsx_extract_<mode>_p8, VSX_EXTRACT_I iterator): Likewise.
        (vsx_extract_<mode>_load, VSX_EXTRACT_I iterator): New insn to
        optimize vector int, vector short, and vector char vec_extracts
        when the vector is in memory.
        (vsx_extract_<mode>_var, VSX_EXTRACT_I iterator): New insn to
        optimize vector int, vector short, and vector char vec_extracts
        when the element number is variable.

[gcc/testsuite]
2016-08-01  Michael Meissner  <meiss...@linux.vnet.ibm.com>

        * gcc.target/powerpc/vec-extract-5.c: New tests to test
        vec_extract for vector float, vector int, vector short, and vector
        char.
        * gcc.target/powerpc/vec-extract-6.c: Likewise.
        * gcc.target/powerpc/vec-extract-7.c: Likewise.
        * gcc.target/powerpc/vec-extract-8.c: Likewise.
        * gcc.target/powerpc/vec-extract-9.c: Likewise.

-- 
Michael Meissner, IBM
IBM, M/S 2506R, 550 King Street, Littleton, MA 01460-6245, USA
email: meiss...@linux.vnet.ibm.com, phone: +1 (978) 899-4797
Index: gcc/config/rs6000/rs6000-c.c
===================================================================
--- gcc/config/rs6000/rs6000-c.c        (revision 238892)
+++ gcc/config/rs6000/rs6000-c.c        (working copy)
@@ -5135,6 +5135,25 @@ altivec_resolve_overloaded_builtin (loca
                case V2DImode:
                  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
                  break;
+
+               case V4SFmode:
+                 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
+                 break;
+
+               case V4SImode:
+                 if (TARGET_DIRECT_MOVE_64BIT)
+                   call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
+                 break;
+
+               case V8HImode:
+                 if (TARGET_DIRECT_MOVE_64BIT)
+                   call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
+                 break;
+
+               case V16QImode:
+                 if (TARGET_DIRECT_MOVE_64BIT)
+                   call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
+                 break;
                }
            }
 
@@ -5154,6 +5173,22 @@ altivec_resolve_overloaded_builtin (loca
                case V2DImode:
                  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
                  break;
+
+               case V4SFmode:
+                 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
+                 break;
+
+               case V4SImode:
+                 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
+                 break;
+
+               case V8HImode:
+                 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
+                 break;
+
+               case V16QImode:
+                 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
+                 break;
                }
            }
 
Index: gcc/config/rs6000/rs6000.c
===================================================================
--- gcc/config/rs6000/rs6000.c  (revision 238899)
+++ gcc/config/rs6000/rs6000.c  (working copy)
@@ -6938,7 +6938,7 @@ rs6000_expand_vector_extract (rtx target
          emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
          return;
        case V16QImode:
-         if (TARGET_VEXTRACTUB)
+         if (TARGET_DIRECT_MOVE_64BIT)
            {
              emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
              return;
@@ -6946,7 +6946,7 @@ rs6000_expand_vector_extract (rtx target
          else
            break;
        case V8HImode:
-         if (TARGET_VEXTRACTUB)
+         if (TARGET_DIRECT_MOVE_64BIT)
            {
              emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
              return;
@@ -6954,7 +6954,7 @@ rs6000_expand_vector_extract (rtx target
          else
            break;
        case V4SImode:
-         if (TARGET_VEXTRACTUB)
+         if (TARGET_DIRECT_MOVE_64BIT)
            {
              emit_insn (gen_vsx_extract_v4si (target, vec, elt));
              return;
@@ -6982,6 +6982,26 @@ rs6000_expand_vector_extract (rtx target
          emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
          return;
 
+       case V4SFmode:
+         if (TARGET_UPPER_REGS_SF)
+           {
+             emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
+             return;
+           }
+         break;
+
+       case V4SImode:
+         emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
+         return;
+
+       case V8HImode:
+         emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
+         return;
+
+       case V16QImode:
+         emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
+         return;
+
        default:
          gcc_unreachable ();
        }
@@ -7253,6 +7273,33 @@ rs6000_split_vec_extract_var (rtx dest, 
          emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
          return;
 
+       case V4SFmode:
+         {
+           rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
+           rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
+           rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
+           emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
+                                         tmp_altivec));
+
+           emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
+           return;
+         }
+
+       case V4SImode:
+       case V8HImode:
+       case V16QImode:
+         {
+           rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
+           rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
+           rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
+           emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
+                                         tmp_altivec));
+           emit_move_insn (tmp_gpr_di, tmp_altivec_di);
+           emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
+                                   GEN_INT (64 - (8 * scalar_size))));
+           return;
+         }
+
        default:
          gcc_unreachable ();
        }
Index: gcc/config/rs6000/vsx.md
===================================================================
--- gcc/config/rs6000/vsx.md    (revision 238899)
+++ gcc/config/rs6000/vsx.md    (working copy)
@@ -1663,7 +1663,7 @@ (define_insn "vsx_xscvdpsp_scalar"
 
 ;; Same as vsx_xscvspdp, but use SF as the type
 (define_insn "vsx_xscvspdp_scalar2"
-  [(set (match_operand:SF 0 "vsx_register_operand" "=f")
+  [(set (match_operand:SF 0 "vsx_register_operand" "=ww")
        (unspec:SF [(match_operand:V4SF 1 "vsx_register_operand" "wa")]
                   UNSPEC_VSX_CVSPDP))]
   "VECTOR_UNIT_VSX_P (V4SFmode)"
@@ -2237,18 +2237,15 @@ (define_insn_and_split "vsx_extract_<mod
 
 ;; Extract a SF element from V4SF
 (define_insn_and_split "vsx_extract_v4sf"
-  [(set (match_operand:SF 0 "vsx_register_operand" "=f,f")
+  [(set (match_operand:SF 0 "vsx_register_operand" "=ww")
        (vec_select:SF
-        (match_operand:V4SF 1 "vsx_register_operand" "wa,wa")
-        (parallel [(match_operand:QI 2 "u5bit_cint_operand" "O,i")])))
-   (clobber (match_scratch:V4SF 3 "=X,0"))]
+        (match_operand:V4SF 1 "vsx_register_operand" "wa")
+        (parallel [(match_operand:QI 2 "u5bit_cint_operand" "n")])))
+   (clobber (match_scratch:V4SF 3 "=0"))]
   "VECTOR_UNIT_VSX_P (V4SFmode)"
-  "@
-   xscvspdp %x0,%x1
-   #"
-  ""
+  "#"
+  "&& 1"
   [(const_int 0)]
-  "
 {
   rtx op0 = operands[0];
   rtx op1 = operands[1];
@@ -2268,10 +2265,46 @@ (define_insn_and_split "vsx_extract_v4sf
     }
   emit_insn (gen_vsx_xscvspdp_scalar2 (op0, tmp));
   DONE;
-}"
-  [(set_attr "length" "4,8")
+}
+  [(set_attr "length" "8")
    (set_attr "type" "fp")])
 
+(define_insn_and_split "*vsx_extract_v4sf_<mode>_load"
+  [(set (match_operand:SF 0 "register_operand" "=f,wv,wb,?r")
+       (vec_select:SF
+        (match_operand:V4SF 1 "memory_operand" "m,Z,m,m")
+        (parallel [(match_operand:QI 2 "const_0_to_3_operand" "n,n,n,n")])))
+   (clobber (match_scratch:P 3 "=&b,&b,&b,&b"))]
+  "VECTOR_MEM_VSX_P (V4SFmode)"
+  "#"
+  "&& reload_completed"
+  [(set (match_dup 0) (match_dup 4))]
+{
+  operands[4] = rs6000_adjust_vec_address (operands[0], operands[1], 
operands[2],
+                                          operands[3], SFmode);
+}
+  [(set_attr "type" "fpload,fpload,fpload,load")
+   (set_attr "length" "8")])
+
+;; Variable V4SF extract
+(define_insn_and_split "vsx_extract_v4sf_var"
+  [(set (match_operand:SF 0 "gpc_reg_operand" "=ww,ww,?r")
+       (unspec:SF [(match_operand:V4SF 1 "input_operand" "v,m,m")
+                   (match_operand:DI 2 "gpc_reg_operand" "r,r,r")]
+                  UNSPEC_VSX_EXTRACT))
+   (clobber (match_scratch:DI 3 "=r,&b,&b"))
+   (clobber (match_scratch:V2DI 4 "=&v,X,X"))]
+  "VECTOR_MEM_VSX_P (V4SFmode) && TARGET_DIRECT_MOVE_64BIT
+   && TARGET_UPPER_REGS_SF"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  rs6000_split_vec_extract_var (operands[0], operands[1], operands[2],
+                               operands[3], operands[4]);
+  DONE;
+})
+
 ;; Expand the builtin form of xxpermdi to canonical rtl.
 (define_expand "vsx_xxpermdi_<mode>"
   [(match_operand:VSX_L 0 "vsx_register_operand" "")
@@ -2370,7 +2403,21 @@ (define_expand "vec_perm_const<mode>"
 ;; Extraction of a single element in a small integer vector.  None of the small
 ;; types are currently allowed in a vector register, so we extract to a DImode
 ;; and either do a direct move or store.
-(define_insn_and_split  "vsx_extract_<mode>"
+(define_expand  "vsx_extract_<mode>"
+  [(parallel [(set (match_operand:<VS_scalar> 0 "nonimmediate_operand" "")
+                  (vec_select:<VS_scalar>
+                   (match_operand:VSX_EXTRACT_I 1 "gpc_reg_operand" "")
+                   (parallel [(match_operand:QI 2 "const_int_operand" "")])))
+             (clobber (match_dup 3))])]
+  "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_DIRECT_MOVE_64BIT"
+{
+  operands[3] = gen_rtx_SCRATCH ((TARGET_VEXTRACTUB) ? DImode : <MODE>mode);
+})
+
+;; Under ISA 3.0, we can use the byte/half-word/word integer stores if we are
+;; extracting a vector element and storing it to memory, rather than using
+;; direct move to a GPR and a GPR store.
+(define_insn_and_split  "*vsx_extract_<mode>_p9"
   [(set (match_operand:<VS_scalar> 0 "nonimmediate_operand" "=r,Z")
        (vec_select:<VS_scalar>
         (match_operand:VSX_EXTRACT_I 1 "gpc_reg_operand" "<VSX_EX>,<VSX_EX>")
@@ -2438,6 +2485,95 @@ (define_insn  "vsx_extract_<mode>_di"
 }
   [(set_attr "type" "vecsimple")])
 
+(define_insn_and_split  "*vsx_extract_<mode>_p8"
+  [(set (match_operand:<VS_scalar> 0 "nonimmediate_operand" "=r")
+       (vec_select:<VS_scalar>
+        (match_operand:VSX_EXTRACT_I 1 "gpc_reg_operand" "v")
+        (parallel [(match_operand:QI 2 "<VSX_EXTRACT_PREDICATE>" "n")])))
+   (clobber (match_scratch:VSX_EXTRACT_I 3 "=v"))]
+  "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_DIRECT_MOVE_64BIT"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  rtx dest = operands[0];
+  rtx src = operands[1];
+  rtx element = operands[2];
+  rtx vec_tmp = operands[3];
+  int value;
+
+  if (!VECTOR_ELT_ORDER_BIG)
+    element = GEN_INT (GET_MODE_NUNITS (<MODE>mode) - 1 - INTVAL (element));
+
+  /* If the value is in the correct position, we can avoid doing the VSPLT<x>
+     instruction.  */
+  value = INTVAL (element);
+  if (<MODE>mode == V16QImode)
+    {
+      if (value != 7)
+       emit_insn (gen_altivec_vspltb_direct (vec_tmp, src, element));
+      else
+       vec_tmp = src;
+    }
+  else if (<MODE>mode == V8HImode)
+    {
+      if (value != 3)
+       emit_insn (gen_altivec_vsplth_direct (vec_tmp, src, element));
+      else
+       vec_tmp = src;
+    }
+  else if (<MODE>mode == V4SImode)
+    {
+      if (value != 1)
+       emit_insn (gen_altivec_vspltw_direct (vec_tmp, src, element));
+      else
+       vec_tmp = src;
+    }
+  else
+    gcc_unreachable ();
+
+  emit_move_insn (gen_rtx_REG (DImode, REGNO (dest)),
+                 gen_rtx_REG (DImode, REGNO (vec_tmp)));
+  DONE;
+}
+  [(set_attr "type" "mftgpr")])
+
+;; Optimize extracting a single scalar element from memory.
+(define_insn_and_split "*vsx_extract_<mode>_load"
+  [(set (match_operand:<VS_scalar> 0 "register_operand" "=r")
+       (vec_select:<VS_scalar>
+        (match_operand:VSX_EXTRACT_I 1 "memory_operand" "m")
+        (parallel [(match_operand:QI 2 "<VSX_EXTRACT_PREDICATE>" "n")])))
+   (clobber (match_scratch:DI 3 "=&b"))]
+  "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_DIRECT_MOVE_64BIT"
+  "#"
+  "&& reload_completed"
+  [(set (match_dup 0) (match_dup 4))]
+{
+  operands[4] = rs6000_adjust_vec_address (operands[0], operands[1], 
operands[2],
+                                          operands[3], <VS_scalar>mode);
+}
+  [(set_attr "type" "load")
+   (set_attr "length" "8")])
+
+;; Variable V16QI/V8HI/V4SI extract
+(define_insn_and_split "vsx_extract_<mode>_var"
+  [(set (match_operand:<VS_scalar> 0 "gpc_reg_operand" "=r,r")
+       (unspec:<VS_scalar>
+        [(match_operand:VSX_EXTRACT_I 1 "input_operand" "v,m")
+         (match_operand:DI 2 "gpc_reg_operand" "r,r")]
+        UNSPEC_VSX_EXTRACT))
+   (clobber (match_scratch:DI 3 "=r,&b"))
+   (clobber (match_scratch:V2DI 4 "=&v,X"))]
+  "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_DIRECT_MOVE_64BIT"
+  "#"
+  "&& reload_completed"
+  [(const_int 0)]
+{
+  rs6000_split_vec_extract_var (operands[0], operands[1], operands[2],
+                               operands[3], operands[4]);
+  DONE;
+})
 
 ;; Expanders for builtins
 (define_expand "vsx_mergel_<mode>"
Index: gcc/testsuite/gcc.target/powerpc/vec-extract-6.c
===================================================================
--- gcc/testsuite/gcc.target/powerpc/vec-extract-6.c    (revision 0)
+++ gcc/testsuite/gcc.target/powerpc/vec-extract-6.c    (revision 0)
@@ -0,0 +1,68 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { 
"-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+unsigned char
+add_unsigned_char_0 (vector unsigned char *p)
+{
+  return vec_extract (*p, 0) + 1;
+}
+
+unsigned char
+add_unsigned_char_1 (vector unsigned char *p)
+{
+  return vec_extract (*p, 1) + 1;
+}
+
+unsigned char
+add_unsigned_char_2 (vector unsigned char *p)
+{
+  return vec_extract (*p, 2) + 1;
+}
+
+unsigned char
+add_unsigned_char_3 (vector unsigned char *p)
+{
+  return vec_extract (*p, 3) + 1;
+}
+
+unsigned char
+add_unsigned_char_4 (vector unsigned char *p)
+{
+  return vec_extract (*p, 4) + 1;
+}
+
+unsigned char
+add_unsigned_char_5 (vector unsigned char *p)
+{
+  return vec_extract (*p, 5) + 1;
+}
+
+unsigned char
+add_unsigned_char_6 (vector unsigned char *p)
+{
+  return vec_extract (*p, 6) + 1;
+}
+
+unsigned char
+add_unsigned_char_7 (vector unsigned char *p)
+{
+  return vec_extract (*p, 7) + 1;
+}
+
+unsigned char
+add_unsigned_char_n (vector unsigned char *p, int n)
+{
+  return vec_extract (*p, n) + 1;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x"   } } */
+/* { dg-final { scan-assembler-not "lxvw4x"   } } */
+/* { dg-final { scan-assembler-not "lxvx"     } } */
+/* { dg-final { scan-assembler-not "lxv"      } } */
+/* { dg-final { scan-assembler-not "lvx"      } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */
Index: gcc/testsuite/gcc.target/powerpc/vec-extract-7.c
===================================================================
--- gcc/testsuite/gcc.target/powerpc/vec-extract-7.c    (revision 0)
+++ gcc/testsuite/gcc.target/powerpc/vec-extract-7.c    (revision 0)
@@ -0,0 +1,44 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { 
"-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+float
+add_float_0 (vector float *p)
+{
+  return vec_extract (*p, 0) + 1.0f;
+}
+
+float
+add_float_1 (vector float *p)
+{
+  return vec_extract (*p, 1) + 1.0f;
+}
+
+float
+add_float_2 (vector float *p)
+{
+  return vec_extract (*p, 2) + 1.0f;
+}
+
+float
+add_float_3 (vector float *p)
+{
+  return vec_extract (*p, 3) + 1.0f;
+}
+
+float
+add_float_n (vector float *p, long n)
+{
+  return vec_extract (*p, n) + 1.0f;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x"   } } */
+/* { dg-final { scan-assembler-not "lxvw4x"   } } */
+/* { dg-final { scan-assembler-not "lxvx"     } } */
+/* { dg-final { scan-assembler-not "lxv"      } } */
+/* { dg-final { scan-assembler-not "lvx"      } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */
Index: gcc/testsuite/gcc.target/powerpc/vec-extract-8.c
===================================================================
--- gcc/testsuite/gcc.target/powerpc/vec-extract-8.c    (revision 0)
+++ gcc/testsuite/gcc.target/powerpc/vec-extract-8.c    (revision 0)
@@ -0,0 +1,44 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { 
"-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+int
+add_int_0 (vector int *p)
+{
+  return vec_extract (*p, 0) + 1;
+}
+
+int
+add_int_1 (vector int *p)
+{
+  return vec_extract (*p, 1) + 1;
+}
+
+int
+add_int_2 (vector int *p)
+{
+  return vec_extract (*p, 2) + 1;
+}
+
+int
+add_int_3 (vector int *p)
+{
+  return vec_extract (*p, 3) + 1;
+}
+
+int
+add_int_n (vector int *p, int n)
+{
+  return vec_extract (*p, n) + 1;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x"   } } */
+/* { dg-final { scan-assembler-not "lxvw4x"   } } */
+/* { dg-final { scan-assembler-not "lxvx"     } } */
+/* { dg-final { scan-assembler-not "lxv"      } } */
+/* { dg-final { scan-assembler-not "lvx"      } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */
Index: gcc/testsuite/gcc.target/powerpc/vec-extract-9.c
===================================================================
--- gcc/testsuite/gcc.target/powerpc/vec-extract-9.c    (revision 0)
+++ gcc/testsuite/gcc.target/powerpc/vec-extract-9.c    (revision 0)
@@ -0,0 +1,68 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { 
"-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+short
+add_short_0 (vector short *p)
+{
+  return vec_extract (*p, 0) + 1;
+}
+
+short
+add_short_1 (vector short *p)
+{
+  return vec_extract (*p, 1) + 1;
+}
+
+short
+add_short_2 (vector short *p)
+{
+  return vec_extract (*p, 2) + 1;
+}
+
+short
+add_short_3 (vector short *p)
+{
+  return vec_extract (*p, 3) + 1;
+}
+
+short
+add_short_4 (vector short *p)
+{
+  return vec_extract (*p, 4) + 1;
+}
+
+short
+add_short_5 (vector short *p)
+{
+  return vec_extract (*p, 5) + 1;
+}
+
+short
+add_short_6 (vector short *p)
+{
+  return vec_extract (*p, 6) + 1;
+}
+
+short
+add_short_7 (vector short *p)
+{
+  return vec_extract (*p, 7) + 1;
+}
+
+short
+add_short_n (vector short *p, int n)
+{
+  return vec_extract (*p, n) + 1;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x"   } } */
+/* { dg-final { scan-assembler-not "lxvw4x"   } } */
+/* { dg-final { scan-assembler-not "lxvx"     } } */
+/* { dg-final { scan-assembler-not "lxv"      } } */
+/* { dg-final { scan-assembler-not "lvx"      } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */
Index: gcc/testsuite/gcc.target/powerpc/vec-extract-5.c
===================================================================
--- gcc/testsuite/gcc.target/powerpc/vec-extract-5.c    (revision 0)
+++ gcc/testsuite/gcc.target/powerpc/vec-extract-5.c    (revision 0)
@@ -0,0 +1,68 @@
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { 
"-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+signed char
+add_signed_char_0 (vector signed char *p)
+{
+  return vec_extract (*p, 0) + 1;
+}
+
+signed char
+add_signed_char_1 (vector signed char *p)
+{
+  return vec_extract (*p, 1) + 1;
+}
+
+signed char
+add_signed_char_2 (vector signed char *p)
+{
+  return vec_extract (*p, 2) + 1;
+}
+
+signed char
+add_signed_char_3 (vector signed char *p)
+{
+  return vec_extract (*p, 3) + 1;
+}
+
+signed char
+add_signed_char_4 (vector signed char *p)
+{
+  return vec_extract (*p, 4) + 1;
+}
+
+signed char
+add_signed_char_5 (vector signed char *p)
+{
+  return vec_extract (*p, 5) + 1;
+}
+
+signed char
+add_signed_char_6 (vector signed char *p)
+{
+  return vec_extract (*p, 6) + 1;
+}
+
+signed char
+add_signed_char_7 (vector signed char *p)
+{
+  return vec_extract (*p, 7) + 1;
+}
+
+signed char
+add_signed_char_n (vector signed char *p, int n)
+{
+  return vec_extract (*p, n) + 1;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x"   } } */
+/* { dg-final { scan-assembler-not "lxvw4x"   } } */
+/* { dg-final { scan-assembler-not "lxvx"     } } */
+/* { dg-final { scan-assembler-not "lxv"      } } */
+/* { dg-final { scan-assembler-not "lvx"      } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */

Reply via email to