On Thu, May 05, 2016 at 01:34:07PM +0300, Kirill Yukhin wrote: > > So, is the patch ok for trunk with the two testcases turned into > > dg-do assemble tests, or do you want me to repost with that, or add the > > Yv constraint right away, something else? > Nope. Patch is pre-OK. Thanks!
Actually, it isn't all that hard to add the new constraint and use it. So here is a so far untested patch, though because of the scan-assembler it depends on the PR target/70927 fix. 2016-05-03 Jakub Jelinek <ja...@redhat.com> * config/i386/constraints.md (Yv): New constraint. * config/i386/i386.h (VALID_AVX512VL_128_REG_MODE): Allow TFmode and V1TImode in xmm16+ registers for TARGET_AVX512VL. * config/i386/i386.md (avx512fvecmode): New mode attr. (*pushtf): Use v constraint instead of x. (*movtf_internal): Likewise. For TARGET_AVX512VL and xmm16+ registers, use vmovdqu64 or vmovdqa64 instructions. (*absneg<mode>2): Use Yv constraint instead of x constraint. (*absnegtf2_sse): Likewise. (copysign<mode>3_const, copysign<mode>3_var): Likewise. * config/i386/sse.md (*andnot<mode>3): Add avx512vl and avx512f alternatives. (*andnottf3, *<code><mode>3, *<code>tf3): Likewise. * gcc.target/i386/avx512dq-abs-copysign-1.c: New test. * gcc.target/i386/avx512vl-abs-copysign-1.c: New test. * gcc.target/i386/avx512vl-abs-copysign-2.c: New test. --- gcc/config/i386/constraints.md.jj 2016-05-03 13:44:31.000000000 +0200 +++ gcc/config/i386/constraints.md 2016-05-05 12:03:50.197071618 +0200 @@ -145,6 +145,10 @@ (define_register_constraint "Yr" "TARGET_SSE ? (X86_TUNE_AVOID_4BYTE_PREFIXES ? NO_REX_SSE_REGS : ALL_SSE_REGS) : NO_REGS" "@internal Lower SSE register when avoiding REX prefix and all SSE registers otherwise.") +(define_register_constraint "Yv" + "TARGET_AVX512VL ? ALL_SSE_REGS : TARGET_SSE ? SSE_REGS : NO_REGS" + "@internal For AVX512VL, any EVEX encodable SSE register (@code{%xmm0-%xmm31}), otherwise any SSE register.") + ;; We use the B prefix to denote any number of internal operands: ;; f FLAGS_REG ;; g GOT memory operand. --- gcc/config/i386/i386.h.jj 2016-05-03 21:27:41.253864955 +0200 +++ gcc/config/i386/i386.h 2016-05-05 12:04:06.627852607 +0200 @@ -1126,7 +1126,8 @@ extern const char *host_detect_local_cpu #define VALID_AVX512VL_128_REG_MODE(MODE) \ ((MODE) == V2DImode || (MODE) == V2DFmode || (MODE) == V16QImode \ - || (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode) + || (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode \ + || (MODE) == TFmode || (MODE) == V1TImode) #define VALID_SSE2_REG_MODE(MODE) \ ((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \ --- gcc/config/i386/i386.md.jj 2016-05-03 21:27:45.560807504 +0200 +++ gcc/config/i386/i386.md 2016-05-05 12:13:21.355458467 +0200 @@ -1165,6 +1165,10 @@ (define_mode_attr ssevecmode (define_mode_attr ssevecmodelower [(QI "v16qi") (HI "v8hi") (SI "v4si") (DI "v2di") (SF "v4sf") (DF "v2df")]) +;; AVX512F vector mode corresponding to a scalar mode +(define_mode_attr avx512fvecmode + [(QI "V64QI") (HI "V32HI") (SI "V16SI") (DI "V8DI") (SF "V16SF") (DF "V8DF")]) + ;; Instruction suffix for REX 64bit operators. (define_mode_attr rex64suffix [(SI "") (DI "{q}")]) @@ -2928,7 +2932,7 @@ (define_insn "*insvqi" (define_insn "*pushtf" [(set (match_operand:TF 0 "push_operand" "=<,<") - (match_operand:TF 1 "general_no_elim_operand" "x,*roF"))] + (match_operand:TF 1 "general_no_elim_operand" "v,*roF"))] "TARGET_64BIT || TARGET_SSE" { /* This insn should be already split before reg-stack. */ @@ -3107,8 +3111,8 @@ (define_expand "mov<mode>" "ix86_expand_move (<MODE>mode, operands); DONE;") (define_insn "*movtf_internal" - [(set (match_operand:TF 0 "nonimmediate_operand" "=x,x ,m,?*r ,!o") - (match_operand:TF 1 "general_operand" "C ,xm,x,*roF,*rC"))] + [(set (match_operand:TF 0 "nonimmediate_operand" "=v,v ,m,?*r ,!o") + (match_operand:TF 1 "general_operand" "C ,vm,v,*roF,*rC"))] "(TARGET_64BIT || TARGET_SSE) && !(MEM_P (operands[0]) && MEM_P (operands[1])) && (!can_create_pseudo_p () @@ -3133,6 +3137,10 @@ (define_insn "*movtf_internal" { if (get_attr_mode (insn) == MODE_V4SF) return "%vmovups\t{%1, %0|%0, %1}"; + else if (TARGET_AVX512VL + && (EXT_REX_SSE_REG_P (operands[0]) + || EXT_REX_SSE_REG_P (operands[1]))) + return "vmovdqu64\t{%1, %0|%0, %1}"; else return "%vmovdqu\t{%1, %0|%0, %1}"; } @@ -3140,6 +3148,10 @@ (define_insn "*movtf_internal" { if (get_attr_mode (insn) == MODE_V4SF) return "%vmovaps\t{%1, %0|%0, %1}"; + else if (TARGET_AVX512VL + && (EXT_REX_SSE_REG_P (operands[0]) + || EXT_REX_SSE_REG_P (operands[1]))) + return "vmovdqa64\t{%1, %0|%0, %1}"; else return "%vmovdqa\t{%1, %0|%0, %1}"; } @@ -9251,10 +9263,10 @@ (define_expand "<code><mode>2" "ix86_expand_fp_absneg_operator (<CODE>, <MODE>mode, operands); DONE;") (define_insn "*absneg<mode>2" - [(set (match_operand:MODEF 0 "register_operand" "=x,x,f,!r") + [(set (match_operand:MODEF 0 "register_operand" "=Yv,Yv,f,!r") (match_operator:MODEF 3 "absneg_operator" - [(match_operand:MODEF 1 "register_operand" "0,x,0,0")])) - (use (match_operand:<ssevecmode> 2 "nonimmediate_operand" "xm,0,X,X")) + [(match_operand:MODEF 1 "register_operand" "0,Yv,0,0")])) + (use (match_operand:<ssevecmode> 2 "nonimmediate_operand" "Yvm,0,X,X")) (clobber (reg:CC FLAGS_REG))] "TARGET_80387 || (SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH)" "#" @@ -9286,10 +9298,10 @@ (define_expand "<code>tf2" "ix86_expand_fp_absneg_operator (<CODE>, TFmode, operands); DONE;") (define_insn "*absnegtf2_sse" - [(set (match_operand:TF 0 "register_operand" "=x,x") + [(set (match_operand:TF 0 "register_operand" "=Yv,Yv") (match_operator:TF 3 "absneg_operator" - [(match_operand:TF 1 "register_operand" "0,x")])) - (use (match_operand:TF 2 "nonimmediate_operand" "xm,0")) + [(match_operand:TF 1 "register_operand" "0,Yv")])) + (use (match_operand:TF 2 "nonimmediate_operand" "Yvm,0")) (clobber (reg:CC FLAGS_REG))] "TARGET_SSE" "#") @@ -9470,11 +9482,11 @@ (define_expand "copysign<mode>3" "ix86_expand_copysign (operands); DONE;") (define_insn_and_split "copysign<mode>3_const" - [(set (match_operand:CSGNMODE 0 "register_operand" "=x") + [(set (match_operand:CSGNMODE 0 "register_operand" "=Yv") (unspec:CSGNMODE - [(match_operand:<CSGNVMODE> 1 "vector_move_operand" "xmC") + [(match_operand:<CSGNVMODE> 1 "vector_move_operand" "YvmC") (match_operand:CSGNMODE 2 "register_operand" "0") - (match_operand:<CSGNVMODE> 3 "nonimmediate_operand" "xm")] + (match_operand:<CSGNVMODE> 3 "nonimmediate_operand" "Yvm")] UNSPEC_COPYSIGN))] "(SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH) || (TARGET_SSE && (<MODE>mode == TFmode))" @@ -9484,14 +9496,16 @@ (define_insn_and_split "copysign<mode>3_ "ix86_split_copysign_const (operands); DONE;") (define_insn "copysign<mode>3_var" - [(set (match_operand:CSGNMODE 0 "register_operand" "=x,x,x,x,x") + [(set (match_operand:CSGNMODE 0 "register_operand" "=Yv,Yv,Yv,Yv,Yv") (unspec:CSGNMODE - [(match_operand:CSGNMODE 2 "register_operand" "x,0,0,x,x") - (match_operand:CSGNMODE 3 "register_operand" "1,1,x,1,x") - (match_operand:<CSGNVMODE> 4 "nonimmediate_operand" "X,xm,xm,0,0") - (match_operand:<CSGNVMODE> 5 "nonimmediate_operand" "0,xm,1,xm,1")] + [(match_operand:CSGNMODE 2 "register_operand" "Yv,0,0,Yv,Yv") + (match_operand:CSGNMODE 3 "register_operand" "1,1,Yv,1,Yv") + (match_operand:<CSGNVMODE> 4 + "nonimmediate_operand" "X,Yvm,Yvm,0,0") + (match_operand:<CSGNVMODE> 5 + "nonimmediate_operand" "0,Yvm,1,Yvm,1")] UNSPEC_COPYSIGN)) - (clobber (match_scratch:<CSGNVMODE> 1 "=x,x,x,x,x"))] + (clobber (match_scratch:<CSGNVMODE> 1 "=Yv,Yv,Yv,Yv,Yv"))] "(SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH) || (TARGET_SSE && (<MODE>mode == TFmode))" "#") --- gcc/config/i386/sse.md.jj 2016-05-04 21:56:02.542559210 +0200 +++ gcc/config/i386/sse.md 2016-05-05 12:04:06.639852447 +0200 @@ -3000,11 +3000,11 @@ (define_expand "copysign<mode>3" ;; because the native instructions read the full 128-bits. (define_insn "*andnot<mode>3" - [(set (match_operand:MODEF 0 "register_operand" "=x,x") + [(set (match_operand:MODEF 0 "register_operand" "=x,x,v,v") (and:MODEF (not:MODEF - (match_operand:MODEF 1 "register_operand" "0,x")) - (match_operand:MODEF 2 "register_operand" "x,x")))] + (match_operand:MODEF 1 "register_operand" "0,x,v,v")) + (match_operand:MODEF 2 "register_operand" "x,x,v,v")))] "SSE_FLOAT_MODE_P (<MODE>mode)" { static char buf[32]; @@ -3020,6 +3020,24 @@ (define_insn "*andnot<mode>3" case 1: ops = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; + case 2: + if (TARGET_AVX512DQ) + ops = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; + else + { + suffix = <MODE>mode == DFmode ? "q" : "d"; + ops = "vpandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; + } + break; + case 3: + if (TARGET_AVX512DQ) + ops = "vandn%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; + else + { + suffix = <MODE>mode == DFmode ? "q" : "d"; + ops = "vpandn%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; + } + break; default: gcc_unreachable (); } @@ -3027,11 +3045,19 @@ (define_insn "*andnot<mode>3" snprintf (buf, sizeof (buf), ops, suffix); return buf; } - [(set_attr "isa" "noavx,avx") + [(set_attr "isa" "noavx,avx,avx512vl,avx512f") (set_attr "type" "sselog") - (set_attr "prefix" "orig,vex") + (set_attr "prefix" "orig,vex,evex,evex") (set (attr "mode") - (cond [(and (match_test "<MODE_SIZE> == 16") + (cond [(eq_attr "alternative" "2") + (if_then_else (match_test "TARGET_AVX512DQ") + (const_string "<ssevecmode>") + (const_string "TI")) + (eq_attr "alternative" "3") + (if_then_else (match_test "TARGET_AVX512DQ") + (const_string "<avx512fvecmode>") + (const_string "XI")) + (and (match_test "<MODE_SIZE> == 16") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")) (const_string "V4SF") (match_test "TARGET_AVX") @@ -3042,16 +3068,17 @@ (define_insn "*andnot<mode>3" (const_string "<ssevecmode>")))]) (define_insn "*andnottf3" - [(set (match_operand:TF 0 "register_operand" "=x,x") + [(set (match_operand:TF 0 "register_operand" "=x,x,v,v") (and:TF - (not:TF (match_operand:TF 1 "register_operand" "0,x")) - (match_operand:TF 2 "vector_operand" "xBm,xm")))] + (not:TF (match_operand:TF 1 "register_operand" "0,x,v,v")) + (match_operand:TF 2 "vector_operand" "xBm,xm,vm,v")))] "TARGET_SSE" { static char buf[32]; const char *ops; const char *tmp - = (get_attr_mode (insn) == MODE_V4SF) ? "andnps" : "pandn"; + = (which_alternative >= 2 ? "pandnq" + : get_attr_mode (insn) == MODE_V4SF ? "andnps" : "pandn"); switch (which_alternative) { @@ -3059,8 +3086,12 @@ (define_insn "*andnottf3" ops = "%s\t{%%2, %%0|%%0, %%2}"; break; case 1: + case 2: ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; + case 3: + ops = "v%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; + break; default: gcc_unreachable (); } @@ -3068,7 +3099,7 @@ (define_insn "*andnottf3" snprintf (buf, sizeof (buf), ops, tmp); return buf; } - [(set_attr "isa" "noavx,avx") + [(set_attr "isa" "noavx,avx,avx512vl,avx512f") (set_attr "type" "sselog") (set (attr "prefix_data16") (if_then_else @@ -3076,9 +3107,13 @@ (define_insn "*andnottf3" (eq_attr "mode" "TI")) (const_string "1") (const_string "*"))) - (set_attr "prefix" "orig,vex") + (set_attr "prefix" "orig,vex,evex,evex") (set (attr "mode") - (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") + (cond [(eq_attr "alternative" "2") + (const_string "TI") + (eq_attr "alternative" "3") + (const_string "XI") + (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") (const_string "V4SF") (match_test "TARGET_AVX") (const_string "TI") @@ -3089,10 +3124,10 @@ (define_insn "*andnottf3" (const_string "TI")))]) (define_insn "*<code><mode>3" - [(set (match_operand:MODEF 0 "register_operand" "=x,x") + [(set (match_operand:MODEF 0 "register_operand" "=x,x,v,v") (any_logic:MODEF - (match_operand:MODEF 1 "register_operand" "%0,x") - (match_operand:MODEF 2 "register_operand" "x,x")))] + (match_operand:MODEF 1 "register_operand" "%0,x,v,v") + (match_operand:MODEF 2 "register_operand" "x,x,v,v")))] "SSE_FLOAT_MODE_P (<MODE>mode)" { static char buf[32]; @@ -3105,9 +3140,26 @@ (define_insn "*<code><mode>3" case 0: ops = "<logic>%s\t{%%2, %%0|%%0, %%2}"; break; + case 2: + if (!TARGET_AVX512DQ) + { + suffix = <MODE>mode == DFmode ? "q" : "d"; + ops = "vp<logic>%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; + break; + } + /* FALLTHRU */ case 1: ops = "v<logic>%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; + case 3: + if (TARGET_AVX512DQ) + ops = "v<logic>%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; + else + { + suffix = <MODE>mode == DFmode ? "q" : "d"; + ops = "vp<logic>%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; + } + break; default: gcc_unreachable (); } @@ -3115,11 +3167,19 @@ (define_insn "*<code><mode>3" snprintf (buf, sizeof (buf), ops, suffix); return buf; } - [(set_attr "isa" "noavx,avx") + [(set_attr "isa" "noavx,avx,avx512vl,avx512f") (set_attr "type" "sselog") - (set_attr "prefix" "orig,vex") + (set_attr "prefix" "orig,vex,evex,evex") (set (attr "mode") - (cond [(and (match_test "<MODE_SIZE> == 16") + (cond [(eq_attr "alternative" "2") + (if_then_else (match_test "TARGET_AVX512DQ") + (const_string "<ssevecmode>") + (const_string "TI")) + (eq_attr "alternative" "3") + (if_then_else (match_test "TARGET_AVX512DQ") + (const_string "<avx512fvecmode>") + (const_string "XI")) + (and (match_test "<MODE_SIZE> == 16") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")) (const_string "V4SF") (match_test "TARGET_AVX") @@ -3138,17 +3198,18 @@ (define_expand "<code>tf3" "ix86_fixup_binary_operands_no_copy (<CODE>, TFmode, operands);") (define_insn "*<code>tf3" - [(set (match_operand:TF 0 "register_operand" "=x,x") + [(set (match_operand:TF 0 "register_operand" "=x,x,v,v") (any_logic:TF - (match_operand:TF 1 "vector_operand" "%0,x") - (match_operand:TF 2 "vector_operand" "xBm,xm")))] + (match_operand:TF 1 "vector_operand" "%0,x,v,v") + (match_operand:TF 2 "vector_operand" "xBm,xm,vm,v")))] "TARGET_SSE && ix86_binary_operator_ok (<CODE>, TFmode, operands)" { static char buf[32]; const char *ops; const char *tmp - = (get_attr_mode (insn) == MODE_V4SF) ? "<logic>ps" : "p<logic>"; + = (which_alternative >= 2 ? "p<logic>q" + : get_attr_mode (insn) == MODE_V4SF ? "<logic>ps" : "p<logic>"); switch (which_alternative) { @@ -3156,8 +3217,12 @@ (define_insn "*<code>tf3" ops = "%s\t{%%2, %%0|%%0, %%2}"; break; case 1: + case 2: ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; + case 3: + ops = "v%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; + break; default: gcc_unreachable (); } @@ -3165,7 +3230,7 @@ (define_insn "*<code>tf3" snprintf (buf, sizeof (buf), ops, tmp); return buf; } - [(set_attr "isa" "noavx,avx") + [(set_attr "isa" "noavx,avx,avx512vl,avx512f") (set_attr "type" "sselog") (set (attr "prefix_data16") (if_then_else @@ -3173,9 +3238,13 @@ (define_insn "*<code>tf3" (eq_attr "mode" "TI")) (const_string "1") (const_string "*"))) - (set_attr "prefix" "orig,vex") + (set_attr "prefix" "orig,vex,evex,evex") (set (attr "mode") - (cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") + (cond [(eq_attr "alternative" "2") + (const_string "TI") + (eq_attr "alternative" "3") + (const_string "QI") + (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") (const_string "V4SF") (match_test "TARGET_AVX") (const_string "TI") --- gcc/testsuite/gcc.target/i386/avx512dq-abs-copysign-1.c.jj 2016-05-05 12:45:16.798943439 +0200 +++ gcc/testsuite/gcc.target/i386/avx512dq-abs-copysign-1.c 2016-05-05 12:46:06.758277777 +0200 @@ -0,0 +1,71 @@ +/* { dg-do compile { target { avx512vl && { avx512dq && { ! ia32 } } } } } */ +/* { dg-options "-Ofast -mavx512vl -mavx512dq" } */ + +void +f1 (float x) +{ + register float a __asm ("xmm16"); + a = x; + asm volatile ("" : "+v" (a)); + a = __builtin_fabsf (a); + asm volatile ("" : "+v" (a)); +} + +void +f2 (float x, float y) +{ + register float a __asm ("xmm16"), b __asm ("xmm17"); + a = x; + b = y; + asm volatile ("" : "+v" (a), "+v" (b)); + a = __builtin_copysignf (a, b); + asm volatile ("" : "+v" (a)); +} + +void +f3 (float x) +{ + register float a __asm ("xmm16"); + a = x; + asm volatile ("" : "+v" (a)); + a = -a; + asm volatile ("" : "+v" (a)); +} + +void +f4 (double x) +{ + register double a __asm ("xmm18"); + a = x; + asm volatile ("" : "+v" (a)); + a = __builtin_fabs (a); + asm volatile ("" : "+v" (a)); +} + +void +f5 (double x, double y) +{ + register double a __asm ("xmm18"), b __asm ("xmm19"); + a = x; + b = y; + asm volatile ("" : "+v" (a), "+v" (b)); + a = __builtin_copysign (a, b); + asm volatile ("" : "+v" (a)); +} + +void +f6 (double x) +{ + register double a __asm ("xmm18"); + a = x; + asm volatile ("" : "+v" (a)); + a = -a; + asm volatile ("" : "+v" (a)); +} + +/* { dg-final { scan-assembler "vandps\[^\n\r\]*xmm16" } } */ +/* { dg-final { scan-assembler "vorps\[^\n\r\]*xmm16" } } */ +/* { dg-final { scan-assembler "vxorps\[^\n\r\]*xmm16" } } */ +/* { dg-final { scan-assembler "vandpd\[^\n\r\]*xmm18" } } */ +/* { dg-final { scan-assembler "vorpd\[^\n\r\]*xmm18" } } */ +/* { dg-final { scan-assembler "vxorpd\[^\n\r\]*xmm18" } } */ --- gcc/testsuite/gcc.target/i386/avx512vl-abs-copysign-1.c.jj 2016-05-05 12:33:18.271517159 +0200 +++ gcc/testsuite/gcc.target/i386/avx512vl-abs-copysign-1.c 2016-05-05 12:44:53.190258003 +0200 @@ -0,0 +1,71 @@ +/* { dg-do compile { target { avx512vl && { ! ia32 } } } } */ +/* { dg-options "-Ofast -mavx512vl -mno-avx512dq" } */ + +void +f1 (float x) +{ + register float a __asm ("xmm16"); + a = x; + asm volatile ("" : "+v" (a)); + a = __builtin_fabsf (a); + asm volatile ("" : "+v" (a)); +} + +void +f2 (float x, float y) +{ + register float a __asm ("xmm16"), b __asm ("xmm17"); + a = x; + b = y; + asm volatile ("" : "+v" (a), "+v" (b)); + a = __builtin_copysignf (a, b); + asm volatile ("" : "+v" (a)); +} + +void +f3 (float x) +{ + register float a __asm ("xmm16"); + a = x; + asm volatile ("" : "+v" (a)); + a = -a; + asm volatile ("" : "+v" (a)); +} + +void +f4 (double x) +{ + register double a __asm ("xmm18"); + a = x; + asm volatile ("" : "+v" (a)); + a = __builtin_fabs (a); + asm volatile ("" : "+v" (a)); +} + +void +f5 (double x, double y) +{ + register double a __asm ("xmm18"), b __asm ("xmm19"); + a = x; + b = y; + asm volatile ("" : "+v" (a), "+v" (b)); + a = __builtin_copysign (a, b); + asm volatile ("" : "+v" (a)); +} + +void +f6 (double x) +{ + register double a __asm ("xmm18"); + a = x; + asm volatile ("" : "+v" (a)); + a = -a; + asm volatile ("" : "+v" (a)); +} + +/* { dg-final { scan-assembler "vpandd\[^\n\r\]*xmm16" } } */ +/* { dg-final { scan-assembler "vpord\[^\n\r\]*xmm16" } } */ +/* { dg-final { scan-assembler "vpxord\[^\n\r\]*xmm16" } } */ +/* { dg-final { scan-assembler "vpandq\[^\n\r\]*xmm18" } } */ +/* { dg-final { scan-assembler "vporq\[^\n\r\]*xmm18" } } */ +/* { dg-final { scan-assembler "vpxorq\[^\n\r\]*xmm18" } } */ --- gcc/testsuite/gcc.target/i386/avx512vl-abs-copysign-2.c.jj 2016-05-05 12:48:47.065142505 +0200 +++ gcc/testsuite/gcc.target/i386/avx512vl-abs-copysign-2.c 2016-05-05 12:49:44.119383688 +0200 @@ -0,0 +1,49 @@ +/* { dg-do compile { target { avx512vl && { ! ia32 } } } } */ +/* { dg-options "-Ofast -mavx512vl" } */ + +void +f1 (__float128 x) +{ + register __float128 a __asm ("xmm16"); + a = x; + asm volatile ("" : "+v" (a)); + a = __builtin_fabsq (a); + asm volatile ("" : "+v" (a)); +} + +void +f2 (__float128 x, __float128 y) +{ + register __float128 a __asm ("xmm16"), b __asm ("xmm17"); + a = x; + b = y; + asm volatile ("" : "+v" (a), "+v" (b)); + a = __builtin_copysignq (a, b); + asm volatile ("" : "+v" (a)); +} + +void +f3 (__float128 x) +{ + register __float128 a __asm ("xmm16"); + a = x; + asm volatile ("" : "+v" (a)); + a = -a; + asm volatile ("" : "+v" (a)); +} + +__int128_t +f4 (void) +{ + register __int128_t a __asm ("xmm16"); + register __int128_t __attribute__((vector_size (16))) b __asm ("xmm17"); + a = 1; + asm volatile ("" : "+v" (a)); + b[0] = a; + asm volatile ("" : "+v" (b)); + return b[0]; +} + +/* { dg-final { scan-assembler "vpandq\[^\n\r\]*xmm16" } } */ +/* { dg-final { scan-assembler "vporq\[^\n\r\]*xmm16" } } */ +/* { dg-final { scan-assembler "vpxorq\[^\n\r\]*xmm16" } } */ Jakub