Author: sjoerdmeijer Date: Tue Mar 13 12:38:56 2018 New Revision: 327437 URL: http://llvm.org/viewvc/llvm-project?rev=327437&view=rev Log: This reverts "r327189 - [ARM] Add ARMv8.2-A FP16 vector intrinsic"
This is causing problems in testing, and PR36683 was raised. Reverting it until we have sorted out how to pass f16 vectors. Removed: cfe/trunk/test/CodeGen/arm-v8.2a-neon-intrinsics.c Modified: cfe/trunk/include/clang/Basic/arm_neon.td cfe/trunk/lib/Basic/Targets/ARM.cpp cfe/trunk/lib/Basic/Targets/ARM.h cfe/trunk/lib/CodeGen/CGBuiltin.cpp cfe/trunk/lib/CodeGen/CodeGenFunction.h cfe/trunk/test/CodeGen/arm_neon_intrinsics.c Modified: cfe/trunk/include/clang/Basic/arm_neon.td URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/arm_neon.td?rev=327437&r1=327436&r2=327437&view=diff ============================================================================== --- cfe/trunk/include/clang/Basic/arm_neon.td (original) +++ cfe/trunk/include/clang/Basic/arm_neon.td Tue Mar 13 12:38:56 2018 @@ -1363,8 +1363,8 @@ def SCALAR_VDUP_LANE : IInst<"vdup_lane" def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "sji", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">; } -// ARMv8.2-A FP16 vector intrinsics for A32/A64. -let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in { +// ARMv8.2-A FP16 intrinsics. +let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in { // ARMv8.2-A FP16 one-operand vector intrinsics. @@ -1395,12 +1395,14 @@ let ArchGuard = "defined(__ARM_FEATURE_F def FRINTPH : SInst<"vrndp", "dd", "hQh">; def FRINTMH : SInst<"vrndm", "dd", "hQh">; def FRINTXH : SInst<"vrndx", "dd", "hQh">; + def FRINTIH : SInst<"vrndi", "dd", "hQh">; // Misc. def VABSH : SInst<"vabs", "dd", "hQh">; def VNEGH : SOpInst<"vneg", "dd", "hQh", OP_NEG>; def VRECPEH : SInst<"vrecpe", "dd", "hQh">; def FRSQRTEH : SInst<"vrsqrte", "dd", "hQh">; + def FSQRTH : SInst<"vsqrt", "dd", "hQh">; // ARMv8.2-A FP16 two-operands vector intrinsics. @@ -1441,13 +1443,18 @@ let ArchGuard = "defined(__ARM_FEATURE_F // Multiplication/Division def VMULH : SOpInst<"vmul", "ddd", "hQh", OP_MUL>; + def MULXH : SInst<"vmulx", "ddd", "hQh">; + def FDIVH : IOpInst<"vdiv", "ddd", "hQh", OP_DIV>; // Pairwise addition - def VPADDH : SInst<"vpadd", "ddd", "h">; + def VPADDH : SInst<"vpadd", "ddd", "hQh">; // Pairwise Max/Min - def VPMAXH : SInst<"vpmax", "ddd", "h">; - def VPMINH : SInst<"vpmin", "ddd", "h">; + def VPMAXH : SInst<"vpmax", "ddd", "hQh">; + def VPMINH : SInst<"vpmin", "ddd", "hQh">; + // Pairwise MaxNum/MinNum + def FMAXNMPH : SInst<"vpmaxnm", "ddd", "hQh">; + def FMINNMPH : SInst<"vpminnm", "ddd", "hQh">; // Reciprocal/Sqrt def VRECPSH : SInst<"vrecps", "ddd", "hQh">; @@ -1461,63 +1468,6 @@ let ArchGuard = "defined(__ARM_FEATURE_F // ARMv8.2-A FP16 lane vector intrinsics. - // Mul lane - def VMUL_LANEH : IOpInst<"vmul_lane", "ddgi", "hQh", OP_MUL_LN>; - def VMUL_NH : IOpInst<"vmul_n", "dds", "hQh", OP_MUL_N>; - - // Data processing intrinsics - section 5 - - // Logical operations - let isHiddenLInst = 1 in - def VBSLH : SInst<"vbsl", "dudd", "hQh">; - - // Transposition operations - def VZIPH : WInst<"vzip", "2dd", "hQh">; - def VUZPH : WInst<"vuzp", "2dd", "hQh">; - def VTRNH : WInst<"vtrn", "2dd", "hQh">; - - - let ArchGuard = "!defined(__aarch64__)" in { - // Set all lanes to same value. - // Already implemented prior to ARMv8.2-A. - def VMOV_NH : WOpInst<"vmov_n", "ds", "hQh", OP_DUP>; - def VDUP_NH : WOpInst<"vdup_n", "ds", "hQh", OP_DUP>; - def VDUP_LANE1H : WOpInst<"vdup_lane", "dgi", "hQh", OP_DUP_LN>; - } - - // Vector Extract - def VEXTH : WInst<"vext", "dddi", "hQh">; - - // Reverse vector elements - def VREV64H : WOpInst<"vrev64", "dd", "hQh", OP_REV64>; -} - -// ARMv8.2-A FP16 vector intrinsics for A64 only. -let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in { - - // Vector rounding - def FRINTIH : SInst<"vrndi", "dd", "hQh">; - - // Misc. - def FSQRTH : SInst<"vsqrt", "dd", "hQh">; - - // Multiplication/Division - def MULXH : SInst<"vmulx", "ddd", "hQh">; - def FDIVH : IOpInst<"vdiv", "ddd", "hQh", OP_DIV>; - - // Pairwise addition - def VPADDH1 : SInst<"vpadd", "ddd", "Qh">; - - // Pairwise Max/Min - def VPMAXH1 : SInst<"vpmax", "ddd", "Qh">; - def VPMINH1 : SInst<"vpmin", "ddd", "Qh">; - - // Pairwise MaxNum/MinNum - def FMAXNMPH : SInst<"vpmaxnm", "ddd", "hQh">; - def FMINNMPH : SInst<"vpminnm", "ddd", "hQh">; - - // ARMv8.2-A FP16 lane vector intrinsics. - // FMA lane def VFMA_LANEH : IInst<"vfma_lane", "dddgi", "hQh">; def VFMA_LANEQH : IInst<"vfma_laneq", "dddji", "hQh">; @@ -1538,7 +1488,9 @@ let ArchGuard = "defined(__ARM_FEATURE_F def SCALAR_FMLS_LANEQH : IOpInst<"vfms_laneq", "sssji", "Sh", OP_FMS_LNQ>; // Mul lane + def VMUL_LANEH : IOpInst<"vmul_lane", "ddgi", "hQh", OP_MUL_LN>; def VMUL_LANEQH : IOpInst<"vmul_laneq", "ddji", "hQh", OP_MUL_LN>; + def VMUL_NH : IOpInst<"vmul_n", "dds", "hQh", OP_MUL_N>; // Scalar floating point multiply (scalar, by element) def SCALAR_FMUL_LANEH : IOpInst<"vmul_lane", "ssdi", "Sh", OP_SCALAR_MUL_LN>; def SCALAR_FMUL_LANEQH : IOpInst<"vmul_laneq", "ssji", "Sh", OP_SCALAR_MUL_LN>; @@ -1559,6 +1511,29 @@ let ArchGuard = "defined(__ARM_FEATURE_F def FMAXNMVH : SInst<"vmaxnmv", "sd", "hQh">; def FMINNMVH : SInst<"vminnmv", "sd", "hQh">; + // Data processing intrinsics - section 5 + + // Logical operations + let isHiddenLInst = 1 in + def VBSLH : SInst<"vbsl", "dudd", "hQh">; + + // Transposition operations + def VZIPH : WInst<"vzip", "2dd", "hQh">; + def VUZPH : WInst<"vuzp", "2dd", "hQh">; + def VTRNH : WInst<"vtrn", "2dd", "hQh">; + + // Set all lanes to same value. + /* Already implemented prior to ARMv8.2-A. + def VMOV_NH : WOpInst<"vmov_n", "ds", "hQh", OP_DUP>; + def VDUP_NH : WOpInst<"vdup_n", "ds", "hQh", OP_DUP>; + def VDUP_LANE1H : WOpInst<"vdup_lane", "dgi", "hQh", OP_DUP_LN>;*/ + + // Vector Extract + def VEXTH : WInst<"vext", "dddi", "hQh">; + + // Reverse vector elements + def VREV64H : WOpInst<"vrev64", "dd", "hQh", OP_REV64>; + // Permutation def VTRN1H : SOpInst<"vtrn1", "ddd", "hQh", OP_TRN1>; def VZIP1H : SOpInst<"vzip1", "ddd", "hQh", OP_ZIP1>; Modified: cfe/trunk/lib/Basic/Targets/ARM.cpp URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Basic/Targets/ARM.cpp?rev=327437&r1=327436&r2=327437&view=diff ============================================================================== --- cfe/trunk/lib/Basic/Targets/ARM.cpp (original) +++ cfe/trunk/lib/Basic/Targets/ARM.cpp Tue Mar 13 12:38:56 2018 @@ -379,7 +379,6 @@ bool ARMTargetInfo::handleTargetFeatures Unaligned = 1; SoftFloat = SoftFloatABI = false; HWDiv = 0; - HasFullFP16 = 0; // This does not diagnose illegal cases like having both // "+vfpv2" and "+vfpv3" or having "+neon" and "+fp-only-sp". @@ -420,8 +419,6 @@ bool ARMTargetInfo::handleTargetFeatures Unaligned = 0; } else if (Feature == "+fp16") { HW_FP |= HW_FP_HP; - } else if (Feature == "+fullfp16") { - HasFullFP16 = 1; } } HW_FP &= ~HW_FP_remove; @@ -713,12 +710,6 @@ void ARMTargetInfo::getTargetDefines(con if (Opts.UnsafeFPMath) Builder.defineMacro("__ARM_FP_FAST", "1"); - if ((FPU & NeonFPU) && HasFullFP16) - Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1"); - if (HasFullFP16) - // fp16 ARM scalar intrinsics are not implemented yet. - Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1"); - switch (ArchKind) { default: break; Modified: cfe/trunk/lib/Basic/Targets/ARM.h URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Basic/Targets/ARM.h?rev=327437&r1=327436&r2=327437&view=diff ============================================================================== --- cfe/trunk/lib/Basic/Targets/ARM.h (original) +++ cfe/trunk/lib/Basic/Targets/ARM.h Tue Mar 13 12:38:56 2018 @@ -69,7 +69,6 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetI unsigned Crypto : 1; unsigned DSP : 1; unsigned Unaligned : 1; - unsigned HasFullFP16 : 1; enum { LDREX_B = (1 << 0), /// byte (8-bit) Modified: cfe/trunk/lib/CodeGen/CGBuiltin.cpp URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGBuiltin.cpp?rev=327437&r1=327436&r2=327437&view=diff ============================================================================== --- cfe/trunk/lib/CodeGen/CGBuiltin.cpp (original) +++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp Tue Mar 13 12:38:56 2018 @@ -3397,10 +3397,10 @@ static Value *EmitTargetArchBuiltinExpr( case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: - return CGF->EmitARMBuiltinExpr(BuiltinID, E); + return CGF->EmitARMBuiltinExpr(BuiltinID, E, Arch); case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: - return CGF->EmitAArch64BuiltinExpr(BuiltinID, E); + return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch); case llvm::Triple::x86: case llvm::Triple::x86_64: return CGF->EmitX86BuiltinExpr(BuiltinID, E); @@ -3441,6 +3441,7 @@ Value *CodeGenFunction::EmitTargetBuilti static llvm::VectorType *GetNeonType(CodeGenFunction *CGF, NeonTypeFlags TypeFlags, + llvm::Triple::ArchType Arch, bool V1Ty=false) { int IsQuad = TypeFlags.isQuad(); switch (TypeFlags.getEltType()) { @@ -3451,7 +3452,12 @@ static llvm::VectorType *GetNeonType(Cod case NeonTypeFlags::Poly16: return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); case NeonTypeFlags::Float16: - return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad)); + // FIXME: Only AArch64 backend can so far properly handle half types. + // Remove else part once ARM backend support for half is complete. + if (Arch == llvm::Triple::aarch64) + return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad)); + else + return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); case NeonTypeFlags::Int32: return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); case NeonTypeFlags::Int64: @@ -3614,24 +3620,13 @@ static const NeonIntrinsicInfo ARMSIMDIn NEONMAP1(vcaleq_v, arm_neon_vacge, 0), NEONMAP1(vcalt_v, arm_neon_vacgt, 0), NEONMAP1(vcaltq_v, arm_neon_vacgt, 0), - NEONMAP0(vceqz_v), - NEONMAP0(vceqzq_v), - NEONMAP0(vcgez_v), - NEONMAP0(vcgezq_v), - NEONMAP0(vcgtz_v), - NEONMAP0(vcgtzq_v), - NEONMAP0(vclez_v), - NEONMAP0(vclezq_v), NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType), NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType), - NEONMAP0(vcltz_v), - NEONMAP0(vcltzq_v), NEONMAP1(vclz_v, ctlz, Add1ArgType), NEONMAP1(vclzq_v, ctlz, Add1ArgType), NEONMAP1(vcnt_v, ctpop, Add1ArgType), NEONMAP1(vcntq_v, ctpop, Add1ArgType), NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0), - NEONMAP0(vcvt_f16_v), NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0), NEONMAP0(vcvt_f32_v), NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), @@ -3695,7 +3690,6 @@ static const NeonIntrinsicInfo ARMSIMDIn NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0), NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0), NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0), - NEONMAP0(vcvtq_f16_v), NEONMAP0(vcvtq_f32_v), NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), @@ -3864,18 +3858,8 @@ static const NeonIntrinsicInfo AArch64SI NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), - NEONMAP0(vceqz_v), - NEONMAP0(vceqzq_v), - NEONMAP0(vcgez_v), - NEONMAP0(vcgezq_v), - NEONMAP0(vcgtz_v), - NEONMAP0(vcgtzq_v), - NEONMAP0(vclez_v), - NEONMAP0(vclezq_v), NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), - NEONMAP0(vcltz_v), - NEONMAP0(vcltzq_v), NEONMAP1(vclz_v, ctlz, Add1ArgType), NEONMAP1(vclzq_v, ctlz, Add1ArgType), NEONMAP1(vcnt_v, ctpop, Add1ArgType), @@ -4342,7 +4326,8 @@ static Value *EmitCommonNeonSISDBuiltinE Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, const char *NameHint, unsigned Modifier, const CallExpr *E, - SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1) { + SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1, + llvm::Triple::ArchType Arch) { // Get the last argument, which specifies the vector type. llvm::APSInt NeonTypeConst; const Expr *Arg = E->getArg(E->getNumArgs() - 1); @@ -4354,7 +4339,7 @@ Value *CodeGenFunction::EmitCommonNeonBu bool Usgn = Type.isUnsigned(); bool Quad = Type.isQuad(); - llvm::VectorType *VTy = GetNeonType(this, Type); + llvm::VectorType *VTy = GetNeonType(this, Type, Arch); llvm::Type *Ty = VTy; if (!Ty) return nullptr; @@ -4419,26 +4404,6 @@ Value *CodeGenFunction::EmitCommonNeonBu Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); return EmitNeonCall(F, Ops, NameHint); } - case NEON::BI__builtin_neon_vceqz_v: - case NEON::BI__builtin_neon_vceqzq_v: - return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ, - ICmpInst::ICMP_EQ, "vceqz"); - case NEON::BI__builtin_neon_vcgez_v: - case NEON::BI__builtin_neon_vcgezq_v: - return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE, - ICmpInst::ICMP_SGE, "vcgez"); - case NEON::BI__builtin_neon_vclez_v: - case NEON::BI__builtin_neon_vclezq_v: - return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE, - ICmpInst::ICMP_SLE, "vclez"); - case NEON::BI__builtin_neon_vcgtz_v: - case NEON::BI__builtin_neon_vcgtzq_v: - return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT, - ICmpInst::ICMP_SGT, "vcgtz"); - case NEON::BI__builtin_neon_vcltz_v: - case NEON::BI__builtin_neon_vcltzq_v: - return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT, - ICmpInst::ICMP_SLT, "vcltz"); case NEON::BI__builtin_neon_vclz_v: case NEON::BI__builtin_neon_vclzq_v: // We generate target-independent intrinsic, which needs a second argument @@ -4448,13 +4413,13 @@ Value *CodeGenFunction::EmitCommonNeonBu case NEON::BI__builtin_neon_vcvt_f32_v: case NEON::BI__builtin_neon_vcvtq_f32_v: Ops[0] = Builder.CreateBitCast(Ops[0], Ty); - Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad)); + Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad), Arch); return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); case NEON::BI__builtin_neon_vcvt_f16_v: case NEON::BI__builtin_neon_vcvtq_f16_v: Ops[0] = Builder.CreateBitCast(Ops[0], Ty); - Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad)); + Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad), Arch); return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); case NEON::BI__builtin_neon_vcvt_n_f16_v: @@ -5023,7 +4988,8 @@ static bool HasExtraNeonArgument(unsigne } Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, - const CallExpr *E) { + const CallExpr *E, + llvm::Triple::ArchType Arch) { if (auto Hint = GetValueForARMHint(BuiltinID)) return Hint; @@ -5562,7 +5528,7 @@ Value *CodeGenFunction::EmitARMBuiltinEx bool usgn = Type.isUnsigned(); bool rightShift = false; - llvm::VectorType *VTy = GetNeonType(this, Type); + llvm::VectorType *VTy = GetNeonType(this, Type, Arch); llvm::Type *Ty = VTy; if (!Ty) return nullptr; @@ -5575,7 +5541,7 @@ Value *CodeGenFunction::EmitARMBuiltinEx if (Builtin) return EmitCommonNeonBuiltinExpr( Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, - Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1); + Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch); unsigned Int; switch (BuiltinID) { @@ -5762,7 +5728,8 @@ Value *CodeGenFunction::EmitARMBuiltinEx static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E, - SmallVectorImpl<Value *> &Ops) { + SmallVectorImpl<Value *> &Ops, + llvm::Triple::ArchType Arch) { unsigned int Int = 0; const char *s = nullptr; @@ -5807,7 +5774,7 @@ static Value *EmitAArch64TblBuiltinExpr( // Determine the type of this overloaded NEON intrinsic. NeonTypeFlags Type(Result.getZExtValue()); - llvm::VectorType *Ty = GetNeonType(&CGF, Type); + llvm::VectorType *Ty = GetNeonType(&CGF, Type, Arch); if (!Ty) return nullptr; @@ -5917,7 +5884,8 @@ Value *CodeGenFunction::vectorWrapScalar } Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, - const CallExpr *E) { + const CallExpr *E, + llvm::Triple::ArchType Arch) { unsigned HintID = static_cast<unsigned>(-1); switch (BuiltinID) { default: break; @@ -6860,7 +6828,7 @@ Value *CodeGenFunction::EmitAArch64Built } } - llvm::VectorType *VTy = GetNeonType(this, Type); + llvm::VectorType *VTy = GetNeonType(this, Type, Arch); llvm::Type *Ty = VTy; if (!Ty) return nullptr; @@ -6874,9 +6842,9 @@ Value *CodeGenFunction::EmitAArch64Built return EmitCommonNeonBuiltinExpr( Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, Builtin->NameHint, Builtin->TypeModifier, E, Ops, - /*never use addresses*/ Address::invalid(), Address::invalid()); + /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); - if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops)) + if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) return V; unsigned Int; @@ -6925,7 +6893,7 @@ Value *CodeGenFunction::EmitAArch64Built Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); llvm::Type *VTy = GetNeonType(this, - NeonTypeFlags(NeonTypeFlags::Float64, false, true)); + NeonTypeFlags(NeonTypeFlags::Float64, false, true), Arch); Ops[2] = Builder.CreateBitCast(Ops[2], VTy); Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy); @@ -7141,17 +7109,37 @@ Value *CodeGenFunction::EmitAArch64Built Int = Intrinsic::trunc; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz"); } + case NEON::BI__builtin_neon_vceqz_v: + case NEON::BI__builtin_neon_vceqzq_v: + return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ, + ICmpInst::ICMP_EQ, "vceqz"); + case NEON::BI__builtin_neon_vcgez_v: + case NEON::BI__builtin_neon_vcgezq_v: + return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE, + ICmpInst::ICMP_SGE, "vcgez"); + case NEON::BI__builtin_neon_vclez_v: + case NEON::BI__builtin_neon_vclezq_v: + return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE, + ICmpInst::ICMP_SLE, "vclez"); + case NEON::BI__builtin_neon_vcgtz_v: + case NEON::BI__builtin_neon_vcgtzq_v: + return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT, + ICmpInst::ICMP_SGT, "vcgtz"); + case NEON::BI__builtin_neon_vcltz_v: + case NEON::BI__builtin_neon_vcltzq_v: + return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT, + ICmpInst::ICMP_SLT, "vcltz"); case NEON::BI__builtin_neon_vcvt_f64_v: case NEON::BI__builtin_neon_vcvtq_f64_v: Ops[0] = Builder.CreateBitCast(Ops[0], Ty); - Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad)); + Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad), Arch); return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); case NEON::BI__builtin_neon_vcvt_f64_f32: { assert(Type.getEltType() == NeonTypeFlags::Float64 && quad && "unexpected vcvt_f64_f32 builtin"); NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false); - Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); + Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag, Arch)); return Builder.CreateFPExt(Ops[0], Ty, "vcvt"); } @@ -7159,7 +7147,7 @@ Value *CodeGenFunction::EmitAArch64Built assert(Type.getEltType() == NeonTypeFlags::Float32 && "unexpected vcvt_f32_f64 builtin"); NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true); - Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); + Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag, Arch)); return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt"); } @@ -7256,7 +7244,7 @@ Value *CodeGenFunction::EmitAArch64Built Quad = true; Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); llvm::Type *VTy = GetNeonType(this, - NeonTypeFlags(NeonTypeFlags::Float64, false, Quad)); + NeonTypeFlags(NeonTypeFlags::Float64, false, Quad), Arch); Ops[1] = Builder.CreateBitCast(Ops[1], VTy); Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); Value *Result = Builder.CreateFMul(Ops[0], Ops[1]); Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.h URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.h?rev=327437&r1=327436&r2=327437&view=diff ============================================================================== --- cfe/trunk/lib/CodeGen/CodeGenFunction.h (original) +++ cfe/trunk/lib/CodeGen/CodeGenFunction.h Tue Mar 13 12:38:56 2018 @@ -3481,7 +3481,8 @@ public: const llvm::CmpInst::Predicate Fp, const llvm::CmpInst::Predicate Ip, const llvm::Twine &Name = ""); - llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + llvm::Triple::ArchType Arch); llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID, unsigned LLVMIntrinsic, @@ -3490,7 +3491,8 @@ public: unsigned Modifier, const CallExpr *E, SmallVectorImpl<llvm::Value *> &Ops, - Address PtrOp0, Address PtrOp1); + Address PtrOp0, Address PtrOp1, + llvm::Triple::ArchType Arch); llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID, unsigned Modifier, llvm::Type *ArgTy, const CallExpr *E); @@ -3504,7 +3506,8 @@ public: llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt, llvm::Type *Ty, bool usgn, const char *name); llvm::Value *vectorWrapScalar16(llvm::Value *Op); - llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E); + llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + llvm::Triple::ArchType Arch); llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops); llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); Removed: cfe/trunk/test/CodeGen/arm-v8.2a-neon-intrinsics.c URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm-v8.2a-neon-intrinsics.c?rev=327436&view=auto ============================================================================== --- cfe/trunk/test/CodeGen/arm-v8.2a-neon-intrinsics.c (original) +++ cfe/trunk/test/CodeGen/arm-v8.2a-neon-intrinsics.c (removed) @@ -1,982 +0,0 @@ -// RUN: %clang_cc1 -triple armv8.2a-linux-gnu -target-abi apcs-gnu -target-feature +neon -target-feature +fullfp16 \ -// RUN: -fallow-half-arguments-and-returns -S -disable-O0-optnone -emit-llvm -o - %s \ -// RUN: | opt -S -mem2reg \ -// RUN: | FileCheck %s - -// REQUIRES: arm-registered-target - -#include <arm_neon.h> - -// CHECK-LABEL: test_vabs_f16 -// CHECK: [[ABS:%.*]] = call <4 x half> @llvm.fabs.v4f16(<4 x half> %a) -// CHECK: ret <4 x half> [[ABS]] -float16x4_t test_vabs_f16(float16x4_t a) { - return vabs_f16(a); -} - -// CHECK-LABEL: test_vabsq_f16 -// CHECK: [[ABS:%.*]] = call <8 x half> @llvm.fabs.v8f16(<8 x half> %a) -// CHECK: ret <8 x half> [[ABS]] -float16x8_t test_vabsq_f16(float16x8_t a) { - return vabsq_f16(a); -} - -// CHECK-LABEL: test_vceqz_f16 -// CHECK: [[TMP1:%.*]] = fcmp oeq <4 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vceqz_f16(float16x4_t a) { - return vceqz_f16(a); -} - -// CHECK-LABEL: test_vceqzq_f16 -// CHECK: [[TMP1:%.*]] = fcmp oeq <8 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vceqzq_f16(float16x8_t a) { - return vceqzq_f16(a); -} - -// CHECK-LABEL: test_vcgez_f16 -// CHECK: [[TMP1:%.*]] = fcmp oge <4 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vcgez_f16(float16x4_t a) { - return vcgez_f16(a); -} - -// CHECK-LABEL: test_vcgezq_f16 -// CHECK: [[TMP1:%.*]] = fcmp oge <8 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vcgezq_f16(float16x8_t a) { - return vcgezq_f16(a); -} - -// CHECK-LABEL: test_vcgtz_f16 -// CHECK: [[TMP1:%.*]] = fcmp ogt <4 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vcgtz_f16(float16x4_t a) { - return vcgtz_f16(a); -} - -// CHECK-LABEL: test_vcgtzq_f16 -// CHECK: [[TMP1:%.*]] = fcmp ogt <8 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vcgtzq_f16(float16x8_t a) { - return vcgtzq_f16(a); -} - -// CHECK-LABEL: test_vclez_f16 -// CHECK: [[TMP1:%.*]] = fcmp ole <4 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vclez_f16(float16x4_t a) { - return vclez_f16(a); -} - -// CHECK-LABEL: test_vclezq_f16 -// CHECK: [[TMP1:%.*]] = fcmp ole <8 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vclezq_f16(float16x8_t a) { - return vclezq_f16(a); -} - -// CHECK-LABEL: test_vcltz_f16 -// CHECK: [[TMP1:%.*]] = fcmp olt <4 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vcltz_f16(float16x4_t a) { - return vcltz_f16(a); -} - -// CHECK-LABEL: test_vcltzq_f16 -// CHECK: [[TMP1:%.*]] = fcmp olt <8 x half> %a, zeroinitializer -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vcltzq_f16(float16x8_t a) { - return vcltzq_f16(a); -} - -// CHECK-LABEL: test_vcvt_f16_s16 -// CHECK: [[VCVT:%.*]] = sitofp <4 x i16> %a to <4 x half> -// CHECK: ret <4 x half> [[VCVT]] -float16x4_t test_vcvt_f16_s16 (int16x4_t a) { - return vcvt_f16_s16(a); -} - -// CHECK-LABEL: test_vcvtq_f16_s16 -// CHECK: [[VCVT:%.*]] = sitofp <8 x i16> %a to <8 x half> -// CHECK: ret <8 x half> [[VCVT]] -float16x8_t test_vcvtq_f16_s16 (int16x8_t a) { - return vcvtq_f16_s16(a); -} - -// CHECK-LABEL: test_vcvt_f16_u16 -// CHECK: [[VCVT:%.*]] = uitofp <4 x i16> %a to <4 x half> -// CHECK: ret <4 x half> [[VCVT]] -float16x4_t test_vcvt_f16_u16 (uint16x4_t a) { - return vcvt_f16_u16(a); -} - -// CHECK-LABEL: test_vcvtq_f16_u16 -// CHECK: [[VCVT:%.*]] = uitofp <8 x i16> %a to <8 x half> -// CHECK: ret <8 x half> [[VCVT]] -float16x8_t test_vcvtq_f16_u16 (uint16x8_t a) { - return vcvtq_f16_u16(a); -} - -// CHECK-LABEL: test_vcvt_s16_f16 -// CHECK: [[VCVT:%.*]] = fptosi <4 x half> %a to <4 x i16> -// CHECK: ret <4 x i16> [[VCVT]] -int16x4_t test_vcvt_s16_f16 (float16x4_t a) { - return vcvt_s16_f16(a); -} - -// CHECK-LABEL: test_vcvtq_s16_f16 -// CHECK: [[VCVT:%.*]] = fptosi <8 x half> %a to <8 x i16> -// CHECK: ret <8 x i16> [[VCVT]] -int16x8_t test_vcvtq_s16_f16 (float16x8_t a) { - return vcvtq_s16_f16(a); -} - -// CHECK-LABEL: test_vcvt_u16_f16 -// CHECK: [[VCVT:%.*]] = fptoui <4 x half> %a to <4 x i16> -// CHECK: ret <4 x i16> [[VCVT]] -int16x4_t test_vcvt_u16_f16 (float16x4_t a) { - return vcvt_u16_f16(a); -} - -// CHECK-LABEL: test_vcvtq_u16_f16 -// CHECK: [[VCVT:%.*]] = fptoui <8 x half> %a to <8 x i16> -// CHECK: ret <8 x i16> [[VCVT]] -int16x8_t test_vcvtq_u16_f16 (float16x8_t a) { - return vcvtq_u16_f16(a); -} - -// CHECK-LABEL: test_vcvta_s16_f16 -// CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtas.v4i16.v4f16(<4 x half> %a) -// CHECK: ret <4 x i16> [[VCVT]] -int16x4_t test_vcvta_s16_f16 (float16x4_t a) { - return vcvta_s16_f16(a); -} - -// CHECK-LABEL: test_vcvtaq_s16_f16 -// CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.arm.neon.vcvtas.v8i16.v8f16(<8 x half> %a) -// CHECK: ret <8 x i16> [[VCVT]] -int16x8_t test_vcvtaq_s16_f16 (float16x8_t a) { - return vcvtaq_s16_f16(a); -} - -// CHECK-LABEL: test_vcvtm_s16_f16 -// CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtms.v4i16.v4f16(<4 x half> %a) -// CHECK: ret <4 x i16> [[VCVT]] -int16x4_t test_vcvtm_s16_f16 (float16x4_t a) { - return vcvtm_s16_f16(a); -} - -// CHECK-LABEL: test_vcvtmq_s16_f16 -// CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.arm.neon.vcvtms.v8i16.v8f16(<8 x half> %a) -// CHECK: ret <8 x i16> [[VCVT]] -int16x8_t test_vcvtmq_s16_f16 (float16x8_t a) { - return vcvtmq_s16_f16(a); -} - -// CHECK-LABEL: test_vcvtm_u16_f16 -// CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtmu.v4i16.v4f16(<4 x half> %a) -// CHECK: ret <4 x i16> [[VCVT]] -uint16x4_t test_vcvtm_u16_f16 (float16x4_t a) { - return vcvtm_u16_f16(a); -} - -// CHECK-LABEL: test_vcvtmq_u16_f16 -// CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.arm.neon.vcvtmu.v8i16.v8f16(<8 x half> %a) -// CHECK: ret <8 x i16> [[VCVT]] -uint16x8_t test_vcvtmq_u16_f16 (float16x8_t a) { - return vcvtmq_u16_f16(a); -} - -// CHECK-LABEL: test_vcvtn_s16_f16 -// CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtns.v4i16.v4f16(<4 x half> %a) -// CHECK: ret <4 x i16> [[VCVT]] -int16x4_t test_vcvtn_s16_f16 (float16x4_t a) { - return vcvtn_s16_f16(a); -} - -// CHECK-LABEL: test_vcvtnq_s16_f16 -// CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.arm.neon.vcvtns.v8i16.v8f16(<8 x half> %a) -// CHECK: ret <8 x i16> [[VCVT]] -int16x8_t test_vcvtnq_s16_f16 (float16x8_t a) { - return vcvtnq_s16_f16(a); -} - -// CHECK-LABEL: test_vcvtn_u16_f16 -// CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtnu.v4i16.v4f16(<4 x half> %a) -// CHECK: ret <4 x i16> [[VCVT]] -uint16x4_t test_vcvtn_u16_f16 (float16x4_t a) { - return vcvtn_u16_f16(a); -} - -// CHECK-LABEL: test_vcvtnq_u16_f16 -// CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.arm.neon.vcvtnu.v8i16.v8f16(<8 x half> %a) -// CHECK: ret <8 x i16> [[VCVT]] -uint16x8_t test_vcvtnq_u16_f16 (float16x8_t a) { - return vcvtnq_u16_f16(a); -} - -// CHECK-LABEL: test_vcvtp_s16_f16 -// CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtps.v4i16.v4f16(<4 x half> %a) -// CHECK: ret <4 x i16> [[VCVT]] -int16x4_t test_vcvtp_s16_f16 (float16x4_t a) { - return vcvtp_s16_f16(a); -} - -// CHECK-LABEL: test_vcvtpq_s16_f16 -// CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.arm.neon.vcvtps.v8i16.v8f16(<8 x half> %a) -// CHECK: ret <8 x i16> [[VCVT]] -int16x8_t test_vcvtpq_s16_f16 (float16x8_t a) { - return vcvtpq_s16_f16(a); -} - -// CHECK-LABEL: test_vcvtp_u16_f16 -// CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtpu.v4i16.v4f16(<4 x half> %a) -// CHECK: ret <4 x i16> [[VCVT]] -uint16x4_t test_vcvtp_u16_f16 (float16x4_t a) { - return vcvtp_u16_f16(a); -} - -// CHECK-LABEL: test_vcvtpq_u16_f16 -// CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.arm.neon.vcvtpu.v8i16.v8f16(<8 x half> %a) -// CHECK: ret <8 x i16> [[VCVT]] -uint16x8_t test_vcvtpq_u16_f16 (float16x8_t a) { - return vcvtpq_u16_f16(a); -} - -// FIXME: Fix the zero constant when fp16 non-storage-only type becomes available. -// CHECK-LABEL: test_vneg_f16 -// CHECK: [[NEG:%.*]] = fsub <4 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %a -// CHECK: ret <4 x half> [[NEG]] -float16x4_t test_vneg_f16(float16x4_t a) { - return vneg_f16(a); -} - -// CHECK-LABEL: test_vnegq_f16 -// CHECK: [[NEG:%.*]] = fsub <8 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %a -// CHECK: ret <8 x half> [[NEG]] -float16x8_t test_vnegq_f16(float16x8_t a) { - return vnegq_f16(a); -} - -// CHECK-LABEL: test_vrecpe_f16 -// CHECK: [[RCP:%.*]] = call <4 x half> @llvm.arm.neon.vrecpe.v4f16(<4 x half> %a) -// CHECK: ret <4 x half> [[RCP]] -float16x4_t test_vrecpe_f16(float16x4_t a) { - return vrecpe_f16(a); -} - -// CHECK-LABEL: test_vrecpeq_f16 -// CHECK: [[RCP:%.*]] = call <8 x half> @llvm.arm.neon.vrecpe.v8f16(<8 x half> %a) -// CHECK: ret <8 x half> [[RCP]] -float16x8_t test_vrecpeq_f16(float16x8_t a) { - return vrecpeq_f16(a); -} - -// CHECK-LABEL: test_vrnd_f16 -// CHECK: [[RND:%.*]] = call <4 x half> @llvm.arm.neon.vrintz.v4f16(<4 x half> %a) -// CHECK: ret <4 x half> [[RND]] -float16x4_t test_vrnd_f16(float16x4_t a) { - return vrnd_f16(a); -} - -// CHECK-LABEL: test_vrndq_f16 -// CHECK: [[RND:%.*]] = call <8 x half> @llvm.arm.neon.vrintz.v8f16(<8 x half> %a) -// CHECK: ret <8 x half> [[RND]] -float16x8_t test_vrndq_f16(float16x8_t a) { - return vrndq_f16(a); -} - -// CHECK-LABEL: test_vrnda_f16 -// CHECK: [[RND:%.*]] = call <4 x half> @llvm.arm.neon.vrinta.v4f16(<4 x half> %a) -// CHECK: ret <4 x half> [[RND]] -float16x4_t test_vrnda_f16(float16x4_t a) { - return vrnda_f16(a); -} - -// CHECK-LABEL: test_vrndaq_f16 -// CHECK: [[RND:%.*]] = call <8 x half> @llvm.arm.neon.vrinta.v8f16(<8 x half> %a) -// CHECK: ret <8 x half> [[RND]] -float16x8_t test_vrndaq_f16(float16x8_t a) { - return vrndaq_f16(a); -} - -// CHECK-LABEL: test_vrndm_f16 -// CHECK: [[RND:%.*]] = call <4 x half> @llvm.arm.neon.vrintm.v4f16(<4 x half> %a) -// CHECK: ret <4 x half> [[RND]] -float16x4_t test_vrndm_f16(float16x4_t a) { - return vrndm_f16(a); -} - -// CHECK-LABEL: test_vrndmq_f16 -// CHECK: [[RND:%.*]] = call <8 x half> @llvm.arm.neon.vrintm.v8f16(<8 x half> %a) -// CHECK: ret <8 x half> [[RND]] -float16x8_t test_vrndmq_f16(float16x8_t a) { - return vrndmq_f16(a); -} - -// CHECK-LABEL: test_vrndn_f16 -// CHECK: [[RND:%.*]] = call <4 x half> @llvm.arm.neon.vrintn.v4f16(<4 x half> %a) -// CHECK: ret <4 x half> [[RND]] -float16x4_t test_vrndn_f16(float16x4_t a) { - return vrndn_f16(a); -} - -// CHECK-LABEL: test_vrndnq_f16 -// CHECK: [[RND:%.*]] = call <8 x half> @llvm.arm.neon.vrintn.v8f16(<8 x half> %a) -// CHECK: ret <8 x half> [[RND]] -float16x8_t test_vrndnq_f16(float16x8_t a) { - return vrndnq_f16(a); -} - -// CHECK-LABEL: test_vrndp_f16 -// CHECK: [[RND:%.*]] = call <4 x half> @llvm.arm.neon.vrintp.v4f16(<4 x half> %a) -// CHECK: ret <4 x half> [[RND]] -float16x4_t test_vrndp_f16(float16x4_t a) { - return vrndp_f16(a); -} - -// CHECK-LABEL: test_vrndpq_f16 -// CHECK: [[RND:%.*]] = call <8 x half> @llvm.arm.neon.vrintp.v8f16(<8 x half> %a) -// CHECK: ret <8 x half> [[RND]] -float16x8_t test_vrndpq_f16(float16x8_t a) { - return vrndpq_f16(a); -} - -// CHECK-LABEL: test_vrndx_f16 -// CHECK: [[RND:%.*]] = call <4 x half> @llvm.arm.neon.vrintx.v4f16(<4 x half> %a) -// CHECK: ret <4 x half> [[RND]] -float16x4_t test_vrndx_f16(float16x4_t a) { - return vrndx_f16(a); -} - -// CHECK-LABEL: test_vrndxq_f16 -// CHECK: [[RND:%.*]] = call <8 x half> @llvm.arm.neon.vrintx.v8f16(<8 x half> %a) -// CHECK: ret <8 x half> [[RND]] -float16x8_t test_vrndxq_f16(float16x8_t a) { - return vrndxq_f16(a); -} - -// CHECK-LABEL: test_vrsqrte_f16 -// CHECK: [[RND:%.*]] = call <4 x half> @llvm.arm.neon.vrsqrte.v4f16(<4 x half> %a) -// CHECK: ret <4 x half> [[RND]] -float16x4_t test_vrsqrte_f16(float16x4_t a) { - return vrsqrte_f16(a); -} - -// CHECK-LABEL: test_vrsqrteq_f16 -// CHECK: [[RND:%.*]] = call <8 x half> @llvm.arm.neon.vrsqrte.v8f16(<8 x half> %a) -// CHECK: ret <8 x half> [[RND]] -float16x8_t test_vrsqrteq_f16(float16x8_t a) { - return vrsqrteq_f16(a); -} - -// CHECK-LABEL: test_vadd_f16 -// CHECK: [[ADD:%.*]] = fadd <4 x half> %a, %b -// CHECK: ret <4 x half> [[ADD]] -float16x4_t test_vadd_f16(float16x4_t a, float16x4_t b) { - return vadd_f16(a, b); -} - -// CHECK-LABEL: test_vaddq_f16 -// CHECK: [[ADD:%.*]] = fadd <8 x half> %a, %b -// CHECK: ret <8 x half> [[ADD]] -float16x8_t test_vaddq_f16(float16x8_t a, float16x8_t b) { - return vaddq_f16(a, b); -} - -// CHECK-LABEL: test_vabd_f16 -// CHECK: [[ABD:%.*]] = call <4 x half> @llvm.arm.neon.vabds.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[ABD]] -float16x4_t test_vabd_f16(float16x4_t a, float16x4_t b) { - return vabd_f16(a, b); -} - -// CHECK-LABEL: test_vabdq_f16 -// CHECK: [[ABD:%.*]] = call <8 x half> @llvm.arm.neon.vabds.v8f16(<8 x half> %a, <8 x half> %b) -// CHECK: ret <8 x half> [[ABD]] -float16x8_t test_vabdq_f16(float16x8_t a, float16x8_t b) { - return vabdq_f16(a, b); -} - -// CHECK-LABEL: test_vcage_f16 -// CHECK: [[ABS:%.*]] = call <4 x i16> @llvm.arm.neon.vacge.v4i16.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x i16> [[ABS]] -uint16x4_t test_vcage_f16(float16x4_t a, float16x4_t b) { - return vcage_f16(a, b); -} - -// CHECK-LABEL: test_vcageq_f16 -// CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.arm.neon.vacge.v8i16.v8f16(<8 x half> %a, <8 x half> %b) -// CHECK: ret <8 x i16> [[ABS]] -uint16x8_t test_vcageq_f16(float16x8_t a, float16x8_t b) { - return vcageq_f16(a, b); -} - -// CHECK-LABEL: test_vcagt_f16 -// CHECK: [[ABS:%.*]] = call <4 x i16> @llvm.arm.neon.vacgt.v4i16.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x i16> [[ABS]] -uint16x4_t test_vcagt_f16(float16x4_t a, float16x4_t b) { - return vcagt_f16(a, b); -} - -// CHECK-LABEL: test_vcagtq_f16 -// CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.arm.neon.vacgt.v8i16.v8f16(<8 x half> %a, <8 x half> %b) -// CHECK: ret <8 x i16> [[ABS]] -uint16x8_t test_vcagtq_f16(float16x8_t a, float16x8_t b) { - return vcagtq_f16(a, b); -} - -// CHECK-LABEL: test_vcale_f16 -// CHECK: [[ABS:%.*]] = call <4 x i16> @llvm.arm.neon.vacge.v4i16.v4f16(<4 x half> %b, <4 x half> %a) -// CHECK: ret <4 x i16> [[ABS]] -uint16x4_t test_vcale_f16(float16x4_t a, float16x4_t b) { - return vcale_f16(a, b); -} - -// CHECK-LABEL: test_vcaleq_f16 -// CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.arm.neon.vacge.v8i16.v8f16(<8 x half> %b, <8 x half> %a) -// CHECK: ret <8 x i16> [[ABS]] -uint16x8_t test_vcaleq_f16(float16x8_t a, float16x8_t b) { - return vcaleq_f16(a, b); -} - -// CHECK-LABEL: test_vcalt_f16 -// CHECK: [[ABS:%.*]] = call <4 x i16> @llvm.arm.neon.vacgt.v4i16.v4f16(<4 x half> %b, <4 x half> %a) -// CHECK: ret <4 x i16> [[ABS]] -uint16x4_t test_vcalt_f16(float16x4_t a, float16x4_t b) { - return vcalt_f16(a, b); -} - -// CHECK-LABEL: test_vcaltq_f16 -// CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.arm.neon.vacgt.v8i16.v8f16(<8 x half> %b, <8 x half> %a) -// CHECK: ret <8 x i16> [[ABS]] -uint16x8_t test_vcaltq_f16(float16x8_t a, float16x8_t b) { - return vcaltq_f16(a, b); -} - -// CHECK-LABEL: test_vceq_f16 -// CHECK: [[TMP1:%.*]] = fcmp oeq <4 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vceq_f16(float16x4_t a, float16x4_t b) { - return vceq_f16(a, b); -} - -// CHECK-LABEL: test_vceqq_f16 -// CHECK: [[TMP1:%.*]] = fcmp oeq <8 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vceqq_f16(float16x8_t a, float16x8_t b) { - return vceqq_f16(a, b); -} - -// CHECK-LABEL: test_vcge_f16 -// CHECK: [[TMP1:%.*]] = fcmp oge <4 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vcge_f16(float16x4_t a, float16x4_t b) { - return vcge_f16(a, b); -} - -// CHECK-LABEL: test_vcgeq_f16 -// CHECK: [[TMP1:%.*]] = fcmp oge <8 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vcgeq_f16(float16x8_t a, float16x8_t b) { - return vcgeq_f16(a, b); -} - -// CHECK-LABEL: test_vcgt_f16 -// CHECK: [[TMP1:%.*]] = fcmp ogt <4 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vcgt_f16(float16x4_t a, float16x4_t b) { - return vcgt_f16(a, b); -} - -// CHECK-LABEL: test_vcgtq_f16 -// CHECK: [[TMP1:%.*]] = fcmp ogt <8 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vcgtq_f16(float16x8_t a, float16x8_t b) { - return vcgtq_f16(a, b); -} - -// CHECK-LABEL: test_vcle_f16 -// CHECK: [[TMP1:%.*]] = fcmp ole <4 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vcle_f16(float16x4_t a, float16x4_t b) { - return vcle_f16(a, b); -} - -// CHECK-LABEL: test_vcleq_f16 -// CHECK: [[TMP1:%.*]] = fcmp ole <8 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vcleq_f16(float16x8_t a, float16x8_t b) { - return vcleq_f16(a, b); -} - -// CHECK-LABEL: test_vclt_f16 -// CHECK: [[TMP1:%.*]] = fcmp olt <4 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16> -// CHECK: ret <4 x i16> [[TMP2]] -uint16x4_t test_vclt_f16(float16x4_t a, float16x4_t b) { - return vclt_f16(a, b); -} - -// CHECK-LABEL: test_vcltq_f16 -// CHECK: [[TMP1:%.*]] = fcmp olt <8 x half> %a, %b -// CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16> -// CHECK: ret <8 x i16> [[TMP2]] -uint16x8_t test_vcltq_f16(float16x8_t a, float16x8_t b) { - return vcltq_f16(a, b); -} - -// CHECK-LABEL: test_vcvt_n_f16_s16 -// CHECK: [[CVT:%.*]] = call <4 x half> @llvm.arm.neon.vcvtfxs2fp.v4f16.v4i16(<4 x i16> %vcvt_n, i32 2) -// CHECK: ret <4 x half> [[CVT]] -float16x4_t test_vcvt_n_f16_s16(int16x4_t a) { - return vcvt_n_f16_s16(a, 2); -} - -// CHECK-LABEL: test_vcvtq_n_f16_s16 -// CHECK: [[CVT:%.*]] = call <8 x half> @llvm.arm.neon.vcvtfxs2fp.v8f16.v8i16(<8 x i16> %vcvt_n, i32 2) -// CHECK: ret <8 x half> [[CVT]] -float16x8_t test_vcvtq_n_f16_s16(int16x8_t a) { - return vcvtq_n_f16_s16(a, 2); -} - -// CHECK-LABEL: test_vcvt_n_f16_u16 -// CHECK: [[CVT:%.*]] = call <4 x half> @llvm.arm.neon.vcvtfxu2fp.v4f16.v4i16(<4 x i16> %vcvt_n, i32 2) -// CHECK: ret <4 x half> [[CVT]] -float16x4_t test_vcvt_n_f16_u16(uint16x4_t a) { - return vcvt_n_f16_u16(a, 2); -} - -// CHECK-LABEL: test_vcvtq_n_f16_u16 -// CHECK: [[CVT:%.*]] = call <8 x half> @llvm.arm.neon.vcvtfxu2fp.v8f16.v8i16(<8 x i16> %vcvt_n, i32 2) -// CHECK: ret <8 x half> [[CVT]] -float16x8_t test_vcvtq_n_f16_u16(uint16x8_t a) { - return vcvtq_n_f16_u16(a, 2); -} - -// CHECK-LABEL: test_vcvt_n_s16_f16 -// CHECK: [[CVT:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtfp2fxs.v4i16.v4f16(<4 x half> %vcvt_n, i32 2) -// CHECK: ret <4 x i16> [[CVT]] -int16x4_t test_vcvt_n_s16_f16(float16x4_t a) { - return vcvt_n_s16_f16(a, 2); -} - -// CHECK-LABEL: test_vcvtq_n_s16_f16 -// CHECK: [[CVT:%.*]] = call <8 x i16> @llvm.arm.neon.vcvtfp2fxs.v8i16.v8f16(<8 x half> %vcvt_n, i32 2) -// CHECK: ret <8 x i16> [[CVT]] -int16x8_t test_vcvtq_n_s16_f16(float16x8_t a) { - return vcvtq_n_s16_f16(a, 2); -} - -// CHECK-LABEL: test_vcvt_n_u16_f16 -// CHECK: [[CVT:%.*]] = call <4 x i16> @llvm.arm.neon.vcvtfp2fxu.v4i16.v4f16(<4 x half> %vcvt_n, i32 2) -// CHECK: ret <4 x i16> [[CVT]] -uint16x4_t test_vcvt_n_u16_f16(float16x4_t a) { - return vcvt_n_u16_f16(a, 2); -} - -// CHECK-LABEL: test_vcvtq_n_u16_f16 -// CHECK: [[CVT:%.*]] = call <8 x i16> @llvm.arm.neon.vcvtfp2fxu.v8i16.v8f16(<8 x half> %vcvt_n, i32 2) -// CHECK: ret <8 x i16> [[CVT]] -uint16x8_t test_vcvtq_n_u16_f16(float16x8_t a) { - return vcvtq_n_u16_f16(a, 2); -} - -// CHECK-LABEL: test_vmax_f16 -// CHECK: [[MAX:%.*]] = call <4 x half> @llvm.arm.neon.vmaxs.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[MAX]] -float16x4_t test_vmax_f16(float16x4_t a, float16x4_t b) { - return vmax_f16(a, b); -} - -// CHECK-LABEL: test_vmaxq_f16 -// CHECK: [[MAX:%.*]] = call <8 x half> @llvm.arm.neon.vmaxs.v8f16(<8 x half> %a, <8 x half> %b) -// CHECK: ret <8 x half> [[MAX]] -float16x8_t test_vmaxq_f16(float16x8_t a, float16x8_t b) { - return vmaxq_f16(a, b); -} - -// CHECK-LABEL: test_vmaxnm_f16 -// CHECK: [[MAX:%.*]] = call <4 x half> @llvm.arm.neon.vmaxnm.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[MAX]] -float16x4_t test_vmaxnm_f16(float16x4_t a, float16x4_t b) { - return vmaxnm_f16(a, b); -} - -// CHECK-LABEL: test_vmaxnmq_f16 -// CHECK: [[MAX:%.*]] = call <8 x half> @llvm.arm.neon.vmaxnm.v8f16(<8 x half> %a, <8 x half> %b) -// CHECK: ret <8 x half> [[MAX]] -float16x8_t test_vmaxnmq_f16(float16x8_t a, float16x8_t b) { - return vmaxnmq_f16(a, b); -} - -// CHECK-LABEL: test_vmin_f16 -// CHECK: [[MIN:%.*]] = call <4 x half> @llvm.arm.neon.vmins.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[MIN]] -float16x4_t test_vmin_f16(float16x4_t a, float16x4_t b) { - return vmin_f16(a, b); -} - -// CHECK-LABEL: test_vminq_f16 -// CHECK: [[MIN:%.*]] = call <8 x half> @llvm.arm.neon.vmins.v8f16(<8 x half> %a, <8 x half> %b) -// CHECK: ret <8 x half> [[MIN]] -float16x8_t test_vminq_f16(float16x8_t a, float16x8_t b) { - return vminq_f16(a, b); -} - -// CHECK-LABEL: test_vminnm_f16 -// CHECK: [[MIN:%.*]] = call <4 x half> @llvm.arm.neon.vminnm.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[MIN]] -float16x4_t test_vminnm_f16(float16x4_t a, float16x4_t b) { - return vminnm_f16(a, b); -} - -// CHECK-LABEL: test_vminnmq_f16 -// CHECK: [[MIN:%.*]] = call <8 x half> @llvm.arm.neon.vminnm.v8f16(<8 x half> %a, <8 x half> %b) -// CHECK: ret <8 x half> [[MIN]] -float16x8_t test_vminnmq_f16(float16x8_t a, float16x8_t b) { - return vminnmq_f16(a, b); -} - -// CHECK-LABEL: test_vmul_f16 -// CHECK: [[MUL:%.*]] = fmul <4 x half> %a, %b -// CHECK: ret <4 x half> [[MUL]] -float16x4_t test_vmul_f16(float16x4_t a, float16x4_t b) { - return vmul_f16(a, b); -} - -// CHECK-LABEL: test_vmulq_f16 -// CHECK: [[MUL:%.*]] = fmul <8 x half> %a, %b -// CHECK: ret <8 x half> [[MUL]] -float16x8_t test_vmulq_f16(float16x8_t a, float16x8_t b) { - return vmulq_f16(a, b); -} - -// CHECK-LABEL: test_vpadd_f16 -// CHECK: [[ADD:%.*]] = call <4 x half> @llvm.arm.neon.vpadd.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[ADD]] -float16x4_t test_vpadd_f16(float16x4_t a, float16x4_t b) { - return vpadd_f16(a, b); -} - -// CHECK-LABEL: test_vpmax_f16 -// CHECK: [[MAX:%.*]] = call <4 x half> @llvm.arm.neon.vpmaxs.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[MAX]] -float16x4_t test_vpmax_f16(float16x4_t a, float16x4_t b) { - return vpmax_f16(a, b); -} - -// CHECK-LABEL: test_vpmin_f16 -// CHECK: [[MIN:%.*]] = call <4 x half> @llvm.arm.neon.vpmins.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[MIN]] -float16x4_t test_vpmin_f16(float16x4_t a, float16x4_t b) { - return vpmin_f16(a, b); -} - -// CHECK-LABEL: test_vrecps_f16 -// CHECK: [[MIN:%.*]] = call <4 x half> @llvm.arm.neon.vrecps.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[MIN]] -float16x4_t test_vrecps_f16(float16x4_t a, float16x4_t b) { - return vrecps_f16(a, b); -} - -// CHECK-LABEL: test_vrecpsq_f16 -// CHECK: [[MIN:%.*]] = call <8 x half> @llvm.arm.neon.vrecps.v8f16(<8 x half> %a, <8 x half> %b) -// CHECK: ret <8 x half> [[MIN]] -float16x8_t test_vrecpsq_f16(float16x8_t a, float16x8_t b) { - return vrecpsq_f16(a, b); -} - -// CHECK-LABEL: test_vrsqrts_f16 -// CHECK: [[MIN:%.*]] = call <4 x half> @llvm.arm.neon.vrsqrts.v4f16(<4 x half> %a, <4 x half> %b) -// CHECK: ret <4 x half> [[MIN]] -float16x4_t test_vrsqrts_f16(float16x4_t a, float16x4_t b) { - return vrsqrts_f16(a, b); -} - -// CHECK-LABEL: test_vrsqrtsq_f16 -// CHECK: [[MIN:%.*]] = call <8 x half> @llvm.arm.neon.vrsqrts.v8f16(<8 x half> %a, <8 x half> %b) -// CHECK: ret <8 x half> [[MIN]] -float16x8_t test_vrsqrtsq_f16(float16x8_t a, float16x8_t b) { - return vrsqrtsq_f16(a, b); -} - -// CHECK-LABEL: test_vsub_f16 -// CHECK: [[ADD:%.*]] = fsub <4 x half> %a, %b -// CHECK: ret <4 x half> [[ADD]] -float16x4_t test_vsub_f16(float16x4_t a, float16x4_t b) { - return vsub_f16(a, b); -} - -// CHECK-LABEL: test_vsubq_f16 -// CHECK: [[ADD:%.*]] = fsub <8 x half> %a, %b -// CHECK: ret <8 x half> [[ADD]] -float16x8_t test_vsubq_f16(float16x8_t a, float16x8_t b) { - return vsubq_f16(a, b); -} - -// CHECK-LABEL: test_vfma_f16 -// CHECK: [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a) -// CHECK: ret <4 x half> [[ADD]] -float16x4_t test_vfma_f16(float16x4_t a, float16x4_t b, float16x4_t c) { - return vfma_f16(a, b, c); -} - -// CHECK-LABEL: test_vfmaq_f16 -// CHECK: [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a) -// CHECK: ret <8 x half> [[ADD]] -float16x8_t test_vfmaq_f16(float16x8_t a, float16x8_t b, float16x8_t c) { - return vfmaq_f16(a, b, c); -} - -// CHECK-LABEL: test_vfms_f16 -// CHECK: [[SUB:%.*]] = fsub <4 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b -// CHECK: [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a) -// CHECK: ret <4 x half> [[ADD]] -float16x4_t test_vfms_f16(float16x4_t a, float16x4_t b, float16x4_t c) { - return vfms_f16(a, b, c); -} - -// CHECK-LABEL: test_vfmsq_f16 -// CHECK: [[SUB:%.*]] = fsub <8 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b -// CHECK: [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a) -// CHECK: ret <8 x half> [[ADD]] -float16x8_t test_vfmsq_f16(float16x8_t a, float16x8_t b, float16x8_t c) { - return vfmsq_f16(a, b, c); -} - -// CHECK-LABEL: test_vmul_lane_f16 -// CHECK: [[TMP0:%.*]] = shufflevector <4 x half> %b, <4 x half> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 3> -// CHECK: [[MUL:%.*]] = fmul <4 x half> %a, [[TMP0]] -// CHECK: ret <4 x half> [[MUL]] -float16x4_t test_vmul_lane_f16(float16x4_t a, float16x4_t b) { - return vmul_lane_f16(a, b, 3); -} - -// CHECK-LABEL: test_vmulq_lane_f16 -// CHECK: [[TMP0:%.*]] = shufflevector <4 x half> %b, <4 x half> %b, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> -// CHECK: [[MUL:%.*]] = fmul <8 x half> %a, [[TMP0]] -// CHECK: ret <8 x half> [[MUL]] -float16x8_t test_vmulq_lane_f16(float16x8_t a, float16x4_t b) { - return vmulq_lane_f16(a, b, 7); -} - -// CHECK-LABEL: test_vmul_n_f16 -// CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half [[b:%.*]], i32 0 -// CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half [[b]], i32 1 -// CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half [[b]], i32 2 -// CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half [[b]], i32 3 -// CHECK: [[MUL:%.*]] = fmul <4 x half> %a, [[TMP3]] -// CHECK: ret <4 x half> [[MUL]] -float16x4_t test_vmul_n_f16(float16x4_t a, float16_t b) { - return vmul_n_f16(a, b); -} - -// CHECK-LABEL: test_vmulq_n_f16 -// CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half [[b:%.*]], i32 0 -// CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half [[b]], i32 1 -// CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[b]], i32 2 -// CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[b]], i32 3 -// CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[b]], i32 4 -// CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[b]], i32 5 -// CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[b]], i32 6 -// CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[b]], i32 7 -// CHECK: [[MUL:%.*]] = fmul <8 x half> %a, [[TMP7]] -// CHECK: ret <8 x half> [[MUL]] -float16x8_t test_vmulq_n_f16(float16x8_t a, float16_t b) { - return vmulq_n_f16(a, b); -} - -// CHECK-LABEL: test_vbsl_f16 -// CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8> -// CHECK: [[VBSL:%.*]] = call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]], <8 x i8> [[TMP2]]) -// CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[VBSL]] to <4 x half> -// CHECK: ret <4 x half> [[TMP3]] -float16x4_t test_vbsl_f16(uint16x4_t a, float16x4_t b, float16x4_t c) { - return vbsl_f16(a, b, c); -} - -// CHECK-LABEL: test_vbslq_f16 -// CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8> -// CHECK: [[VBSL:%.*]] = call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]]) -// CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[VBSL]] to <8 x half> -// CHECK: ret <8 x half> [[TMP3]] -float16x8_t test_vbslq_f16(uint16x8_t a, float16x8_t b, float16x8_t c) { - return vbslq_f16(a, b, c); -} - -// CHECK-LABEL: test_vzip_f16 -// CHECK: [[VZIP0:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5> -// CHECK: store <4 x half> [[VZIP0]], <4 x half>* [[addr1:%.*]] -// CHECK: [[VZIP1:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7> -// CHECK: store <4 x half> [[VZIP1]], <4 x half>* [[addr2:%.*]] -float16x4x2_t test_vzip_f16(float16x4_t a, float16x4_t b) { - return vzip_f16(a, b); -} - -// CHECK-LABEL: test_vzipq_f16 -// CHECK: [[VZIP0:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> -// CHECK: store <8 x half> [[VZIP0]], <8 x half>* [[addr1:%.*]] -// CHECK: [[VZIP1:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> -// CHECK: store <8 x half> [[VZIP1]], <8 x half>* [[addr2:%.*]] -float16x8x2_t test_vzipq_f16(float16x8_t a, float16x8_t b) { - return vzipq_f16(a, b); -} - -// CHECK-LABEL: test_vuzp_f16 -// CHECK: [[VUZP0:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6> -// CHECK: store <4 x half> [[VUZP0]], <4 x half>* [[addr1:%.*]] -// CHECK: [[VUZP1:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7> -// CHECK: store <4 x half> [[VUZP1]], <4 x half>* [[addr1:%.*]] -float16x4x2_t test_vuzp_f16(float16x4_t a, float16x4_t b) { - return vuzp_f16(a, b); -} - -// CHECK-LABEL: test_vuzpq_f16 -// CHECK: [[VUZP0:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> -// CHECK: store <8 x half> [[VUZP0]], <8 x half>* [[addr1:%.*]] -// CHECK: [[VUZP1:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> -// CHECK: store <8 x half> [[VUZP1]], <8 x half>* [[addr2:%.*]] -float16x8x2_t test_vuzpq_f16(float16x8_t a, float16x8_t b) { - return vuzpq_f16(a, b); -} - -// CHECK-LABEL: test_vtrn_f16 -// CHECK: [[VTRN0:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6> -// CHECK: store <4 x half> [[VTRN0]], <4 x half>* [[addr1:%.*]] -// CHECK: [[VTRN1:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7> -// CHECK: store <4 x half> [[VTRN1]], <4 x half>* [[addr2:%.*]] -float16x4x2_t test_vtrn_f16(float16x4_t a, float16x4_t b) { - return vtrn_f16(a, b); -} - -// CHECK-LABEL: test_vtrnq_f16 -// CHECK: [[VTRN0:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> -// CHECK: store <8 x half> [[VTRN0]], <8 x half>* [[addr1:%.*]] -// CHECK: [[VTRN1:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> -// CHECK: store <8 x half> [[VTRN1]], <8 x half>* [[addr2:%.*]] -float16x8x2_t test_vtrnq_f16(float16x8_t a, float16x8_t b) { - return vtrnq_f16(a, b); -} - -// CHECK-LABEL: test_vmov_n_f16 -// CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half [[ARG:%.*]], i32 0 -// CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half [[ARG]], i32 1 -// CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half [[ARG]], i32 2 -// CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half [[ARG]], i32 3 -// CHECK: ret <4 x half> [[TMP3]] -float16x4_t test_vmov_n_f16(float16_t a) { - return vmov_n_f16(a); -} - -// CHECK-LABEL: test_vmovq_n_f16 -// CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half [[ARG:%.*]], i32 0 -// CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half [[ARG]], i32 1 -// CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[ARG]], i32 2 -// CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[ARG]], i32 3 -// CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[ARG]], i32 4 -// CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[ARG]], i32 5 -// CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[ARG]], i32 6 -// CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[ARG]], i32 7 -// CHECK: ret <8 x half> [[TMP7]] -float16x8_t test_vmovq_n_f16(float16_t a) { - return vmovq_n_f16(a); -} - -// CHECK-LABEL: test_vdup_n_f16 -// CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half [[ARG:%.*]], i32 0 -// CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half [[ARG]], i32 1 -// CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half [[ARG]], i32 2 -// CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half [[ARG]], i32 3 -// CHECK: ret <4 x half> [[TMP3]] -float16x4_t test_vdup_n_f16(float16_t a) { - return vdup_n_f16(a); -} - -// CHECK-LABEL: test_vdupq_n_f16 -// CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half [[ARG:%.*]], i32 0 -// CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half [[ARG]], i32 1 -// CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[ARG]], i32 2 -// CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[ARG]], i32 3 -// CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[ARG]], i32 4 -// CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[ARG]], i32 5 -// CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[ARG]], i32 6 -// CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[ARG]], i32 7 -// CHECK: ret <8 x half> [[TMP7]] -float16x8_t test_vdupq_n_f16(float16_t a) { - return vdupq_n_f16(a); -} - -// CHECK-LABEL: test_vdup_lane_f16 -// CHECK: [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %a, <4 x i32> <i32 3, i32 3, i32 3, i32 3> -// CHECK: ret <4 x half> [[SHFL]] -float16x4_t test_vdup_lane_f16(float16x4_t a) { - return vdup_lane_f16(a, 3); -} - -// CHECK-LABEL: test_vdupq_lane_f16 -// CHECK: [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %a, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> -// CHECK: ret <8 x half> [[SHFL]] -float16x8_t test_vdupq_lane_f16(float16x4_t a) { - return vdupq_lane_f16(a, 7); -} - -// CHECK-LABEL: @test_vext_f16( -// CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8> -// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half> -// CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> -// CHECK: [[VEXT:%.*]] = shufflevector <4 x half> [[TMP2]], <4 x half> [[TMP3]], <4 x i32> <i32 2, i32 3, i32 4, i32 5> -// CHECK: ret <4 x half> [[VEXT]] -float16x4_t test_vext_f16(float16x4_t a, float16x4_t b) { - return vext_f16(a, b, 2); -} - -// CHECK-LABEL: @test_vextq_f16( -// CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8> -// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half> -// CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> -// CHECK: [[VEXT:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> [[TMP3]], <8 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12> -// CHECK: ret <8 x half> [[VEXT]] -float16x8_t test_vextq_f16(float16x8_t a, float16x8_t b) { - return vextq_f16(a, b, 5); -} - -// CHECK-LABEL: @test_vrev64_f16( -// CHECK: [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %a, <4 x i32> <i32 3, i32 2, i32 1, i32 0> -// CHECK: ret <4 x half> [[SHFL]] -float16x4_t test_vrev64_f16(float16x4_t a) { - return vrev64_f16(a); -} - -// CHECK-LABEL: @test_vrev64q_f16( -// CHECK: [[SHFL:%.*]] = shufflevector <8 x half> %a, <8 x half> %a, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> -// CHECK: ret <8 x half> [[SHFL]] -float16x8_t test_vrev64q_f16(float16x8_t a) { - return vrev64q_f16(a); -} Modified: cfe/trunk/test/CodeGen/arm_neon_intrinsics.c URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm_neon_intrinsics.c?rev=327437&r1=327436&r2=327437&view=diff ============================================================================== --- cfe/trunk/test/CodeGen/arm_neon_intrinsics.c (original) +++ cfe/trunk/test/CodeGen/arm_neon_intrinsics.c Tue Mar 13 12:38:56 2018 @@ -3896,8 +3896,9 @@ int64x2_t test_vld1q_s64(int64_t const * // CHECK-LABEL: @test_vld1q_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD1:%.*]] = call <8 x half> @llvm.arm.neon.vld1.v8f16.p0i8(i8* [[TMP0]], i32 2) -// CHECK: ret <8 x half> [[VLD1]] +// CHECK: [[VLD1:%.*]] = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* [[TMP0]], i32 2) +// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> [[VLD1]] to <8 x half> +// CHECK: ret <8 x half> [[TMP1]] float16x8_t test_vld1q_f16(float16_t const * a) { return vld1q_f16(a); } @@ -3989,8 +3990,9 @@ int64x1_t test_vld1_s64(int64_t const * // CHECK-LABEL: @test_vld1_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD1:%.*]] = call <4 x half> @llvm.arm.neon.vld1.v4f16.p0i8(i8* [[TMP0]], i32 2) -// CHECK: ret <4 x half> [[VLD1]] +// CHECK: [[VLD1:%.*]] = call <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8(i8* [[TMP0]], i32 2) +// CHECK: [[TMP1:%.*]] = bitcast <4 x i16> [[VLD1]] to <4 x half> +// CHECK: ret <4 x half> [[TMP1]] float16x4_t test_vld1_f16(float16_t const * a) { return vld1_f16(a); } @@ -4104,11 +4106,12 @@ int64x2_t test_vld1q_dup_s64(int64_t con // CHECK-LABEL: @test_vld1q_dup_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* -// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half* -// CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]], align 2 -// CHECK: [[TMP3:%.*]] = insertelement <8 x half> undef, half [[TMP2]], i32 0 -// CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP3]], <8 x half> [[TMP3]], <8 x i32> zeroinitializer -// CHECK: ret <8 x half> [[LANE]] +// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* +// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2 +// CHECK: [[TMP3:%.*]] = insertelement <8 x i16> undef, i16 [[TMP2]], i32 0 +// CHECK: [[LANE:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> [[TMP3]], <8 x i32> zeroinitializer +// CHECK: [[TMP4:%.*]] = bitcast <8 x i16> [[LANE]] to <8 x half> +// CHECK: ret <8 x half> [[TMP4]] float16x8_t test_vld1q_dup_f16(float16_t const * a) { return vld1q_dup_f16(a); } @@ -4230,11 +4233,12 @@ int64x1_t test_vld1_dup_s64(int64_t cons // CHECK-LABEL: @test_vld1_dup_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* -// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half* -// CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]], align 2 -// CHECK: [[TMP3:%.*]] = insertelement <4 x half> undef, half [[TMP2]], i32 0 -// CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <4 x i32> zeroinitializer -// CHECK: ret <4 x half> [[LANE]] +// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* +// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2 +// CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[TMP2]], i32 0 +// CHECK: [[LANE:%.*]] = shufflevector <4 x i16> [[TMP3]], <4 x i16> [[TMP3]], <4 x i32> zeroinitializer +// CHECK: [[TMP4:%.*]] = bitcast <4 x i16> [[LANE]] to <4 x half> +// CHECK: ret <4 x half> [[TMP4]] float16x4_t test_vld1_dup_f16(float16_t const * a) { return vld1_dup_f16(a); } @@ -4361,11 +4365,12 @@ int64x2_t test_vld1q_lane_s64(int64_t co // CHECK-LABEL: @test_vld1q_lane_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> -// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to half* -// CHECK: [[TMP4:%.*]] = load half, half* [[TMP3]], align 2 -// CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x half> [[TMP2]], half [[TMP4]], i32 7 -// CHECK: ret <8 x half> [[VLD1_LANE]] +// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i16* +// CHECK: [[TMP4:%.*]] = load i16, i16* [[TMP3]], align 2 +// CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[TMP4]], i32 7 +// CHECK: [[TMP5:%.*]] = bitcast <8 x i16> [[VLD1_LANE]] to <8 x half> +// CHECK: ret <8 x half> [[TMP5]] float16x8_t test_vld1q_lane_f16(float16_t const * a, float16x8_t b) { return vld1q_lane_f16(a, b, 7); } @@ -4493,11 +4498,12 @@ int64x1_t test_vld1_lane_s64(int64_t con // CHECK-LABEL: @test_vld1_lane_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> -// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to half* -// CHECK: [[TMP4:%.*]] = load half, half* [[TMP3]], align 2 -// CHECK: [[VLD1_LANE:%.*]] = insertelement <4 x half> [[TMP2]], half [[TMP4]], i32 3 -// CHECK: ret <4 x half> [[VLD1_LANE]] +// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to i16* +// CHECK: [[TMP4:%.*]] = load i16, i16* [[TMP3]], align 2 +// CHECK: [[VLD1_LANE:%.*]] = insertelement <4 x i16> [[TMP2]], i16 [[TMP4]], i32 3 +// CHECK: [[TMP5:%.*]] = bitcast <4 x i16> [[VLD1_LANE]] to <4 x half> +// CHECK: ret <4 x half> [[TMP5]] float16x4_t test_vld1_lane_f16(float16_t const * a, float16x4_t b) { return vld1_lane_f16(a, b, 3); } @@ -4590,7 +4596,7 @@ int32x4x2_t test_vld2q_s32(int32_t const // CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[__RET]] to i8* // CHECK: [[TMP1:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD2Q_V:%.*]] = call { <8 x half>, <8 x half> +// CHECK: [[VLD2Q_V:%.*]] = call { <8 x i16>, <8 x i16> float16x8x2_t test_vld2q_f16(float16_t const * a) { return vld2q_f16(a); } @@ -4695,7 +4701,7 @@ int64x1x2_t test_vld2_s64(int64_t const // CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8* // CHECK: [[TMP1:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD2_V:%.*]] = call { <4 x half>, <4 x half> +// CHECK: [[VLD2_V:%.*]] = call { <4 x i16>, <4 x i16> float16x4x2_t test_vld2_f16(float16_t const * a) { return vld2_f16(a); } @@ -4800,7 +4806,7 @@ int64x1x2_t test_vld2_dup_s64(int64_t co // CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8* // CHECK: [[TMP1:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD_DUP:%.*]] = call { <4 x half>, <4 x half> +// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16> float16x4x2_t test_vld2_dup_f16(float16_t const * a) { return vld2_dup_f16(a); } @@ -4959,9 +4965,9 @@ int32x4x2_t test_vld2q_lane_s32(int32_t // CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i32 0, i32 1 // CHECK: [[TMP7:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16 // CHECK: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8> -// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half> -// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half> -// CHECK: [[VLD2Q_LANE_V:%.*]] = call { <8 x half>, <8 x half> +// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16> +// CHECK: [[VLD2Q_LANE_V:%.*]] = call { <8 x i16>, <8 x i16> float16x8x2_t test_vld2q_lane_f16(float16_t const * a, float16x8x2_t b) { return vld2q_lane_f16(a, b, 7); } @@ -5192,9 +5198,9 @@ int32x2x2_t test_vld2_lane_s32(int32_t c // CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i32 0, i32 1 // CHECK: [[TMP7:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8 // CHECK: [[TMP8:%.*]] = bitcast <4 x half> [[TMP7]] to <8 x i8> -// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half> -// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half> -// CHECK: [[VLD2_LANE_V:%.*]] = call { <4 x half>, <4 x half> +// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16> +// CHECK: [[VLD2_LANE_V:%.*]] = call { <4 x i16>, <4 x i16> float16x4x2_t test_vld2_lane_f16(float16_t const * a, float16x4x2_t b) { return vld2_lane_f16(a, b, 3); } @@ -5331,7 +5337,7 @@ int32x4x3_t test_vld3q_s32(int32_t const // CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x3_t* [[__RET]] to i8* // CHECK: [[TMP1:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD3Q_V:%.*]] = call { <8 x half>, <8 x half>, <8 x half> +// CHECK: [[VLD3Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> float16x8x3_t test_vld3q_f16(float16_t const * a) { return vld3q_f16(a); } @@ -5436,7 +5442,7 @@ int64x1x3_t test_vld3_s64(int64_t const // CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8* // CHECK: [[TMP1:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD3_V:%.*]] = call { <4 x half>, <4 x half>, <4 x half> +// CHECK: [[VLD3_V:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> float16x4x3_t test_vld3_f16(float16_t const * a) { return vld3_f16(a); } @@ -5541,7 +5547,7 @@ int64x1x3_t test_vld3_dup_s64(int64_t co // CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8* // CHECK: [[TMP1:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD_DUP:%.*]] = call { <4 x half>, <4 x half>, <4 x half> +// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> float16x4x3_t test_vld3_dup_f16(float16_t const * a) { return vld3_dup_f16(a); } @@ -5724,10 +5730,10 @@ int32x4x3_t test_vld3q_lane_s32(int32_t // CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i32 0, i32 2 // CHECK: [[TMP9:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16 // CHECK: [[TMP10:%.*]] = bitcast <8 x half> [[TMP9]] to <16 x i8> -// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half> -// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half> -// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x half> -// CHECK: [[VLD3Q_LANE_V:%.*]] = call { <8 x half>, <8 x half>, <8 x half> +// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16> +// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16> +// CHECK: [[VLD3Q_LANE_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> float16x8x3_t test_vld3q_lane_f16(float16_t const * a, float16x8x3_t b) { return vld3q_lane_f16(a, b, 7); } @@ -5998,10 +6004,10 @@ int32x2x3_t test_vld3_lane_s32(int32_t c // CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i32 0, i32 2 // CHECK: [[TMP9:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8 // CHECK: [[TMP10:%.*]] = bitcast <4 x half> [[TMP9]] to <8 x i8> -// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half> -// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half> -// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x half> -// CHECK: [[VLD3_LANE_V:%.*]] = call { <4 x half>, <4 x half>, <4 x half> +// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16> +// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16> +// CHECK: [[VLD3_LANE_V:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> float16x4x3_t test_vld3_lane_f16(float16_t const * a, float16x4x3_t b) { return vld3_lane_f16(a, b, 3); } @@ -6151,7 +6157,7 @@ int32x4x4_t test_vld4q_s32(int32_t const // CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x4_t* [[__RET]] to i8* // CHECK: [[TMP1:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD4Q_V:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half> +// CHECK: [[VLD4Q_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> float16x8x4_t test_vld4q_f16(float16_t const * a) { return vld4q_f16(a); } @@ -6256,7 +6262,7 @@ int64x1x4_t test_vld4_s64(int64_t const // CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8* // CHECK: [[TMP1:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD4_V:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> +// CHECK: [[VLD4_V:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> float16x4x4_t test_vld4_f16(float16_t const * a) { return vld4_f16(a); } @@ -6361,7 +6367,7 @@ int64x1x4_t test_vld4_dup_s64(int64_t co // CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8* // CHECK: [[TMP1:%.*]] = bitcast half* %a to i8* -// CHECK: [[VLD_DUP:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> +// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> float16x4x4_t test_vld4_dup_f16(float16_t const * a) { return vld4_dup_f16(a); } @@ -6568,11 +6574,11 @@ int32x4x4_t test_vld4q_lane_s32(int32_t // CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i32 0, i32 3 // CHECK: [[TMP11:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16 // CHECK: [[TMP12:%.*]] = bitcast <8 x half> [[TMP11]] to <16 x i8> -// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half> -// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half> -// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x half> -// CHECK: [[TMP16:%.*]] = bitcast <16 x i8> [[TMP12]] to <8 x half> -// CHECK: [[VLD4Q_LANE_V:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half> +// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16> +// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16> +// CHECK: [[TMP16:%.*]] = bitcast <16 x i8> [[TMP12]] to <8 x i16> +// CHECK: [[VLD4Q_LANE_V:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> float16x8x4_t test_vld4q_lane_f16(float16_t const * a, float16x8x4_t b) { return vld4q_lane_f16(a, b, 7); } @@ -6883,11 +6889,11 @@ int32x2x4_t test_vld4_lane_s32(int32_t c // CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i32 0, i32 3 // CHECK: [[TMP11:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8 // CHECK: [[TMP12:%.*]] = bitcast <4 x half> [[TMP11]] to <8 x i8> -// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half> -// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half> -// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x half> -// CHECK: [[TMP16:%.*]] = bitcast <8 x i8> [[TMP12]] to <4 x half> -// CHECK: [[VLD4_LANE_V:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> +// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16> +// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16> +// CHECK: [[TMP16:%.*]] = bitcast <8 x i8> [[TMP12]] to <4 x i16> +// CHECK: [[VLD4_LANE_V:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> float16x4x4_t test_vld4_lane_f16(float16_t const * a, float16x4x4_t b) { return vld4_lane_f16(a, b, 3); } @@ -15778,8 +15784,8 @@ void test_vst1q_s64(int64_t * a, int64x2 // CHECK-LABEL: @test_vst1q_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> -// CHECK: call void @llvm.arm.neon.vst1.p0i8.v8f16(i8* [[TMP0]], <8 x half> [[TMP2]], i32 2) +// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// CHECK: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* [[TMP0]], <8 x i16> [[TMP2]], i32 2) // CHECK: ret void void test_vst1q_f16(float16_t * a, float16x8_t b) { vst1q_f16(a, b); @@ -15889,8 +15895,8 @@ void test_vst1_s64(int64_t * a, int64x1_ // CHECK-LABEL: @test_vst1_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> -// CHECK: call void @llvm.arm.neon.vst1.p0i8.v4f16(i8* [[TMP0]], <4 x half> [[TMP2]], i32 2) +// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// CHECK: call void @llvm.arm.neon.vst1.p0i8.v4i16(i8* [[TMP0]], <4 x i16> [[TMP2]], i32 2) // CHECK: ret void void test_vst1_f16(float16_t * a, float16x4_t b) { vst1_f16(a, b); @@ -16012,10 +16018,10 @@ void test_vst1q_lane_s64(int64_t * a, in // CHECK-LABEL: @test_vst1q_lane_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> -// CHECK: [[TMP3:%.*]] = extractelement <8 x half> [[TMP2]], i32 7 -// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to half* -// CHECK: store half [[TMP3]], half* [[TMP4]], align 2 +// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// CHECK: [[TMP3:%.*]] = extractelement <8 x i16> [[TMP2]], i32 7 +// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16* +// CHECK: store i16 [[TMP3]], i16* [[TMP4]], align 2 // CHECK: ret void void test_vst1q_lane_f16(float16_t * a, float16x8_t b) { vst1q_lane_f16(a, b, 7); @@ -16144,10 +16150,10 @@ void test_vst1_lane_s64(int64_t * a, int // CHECK-LABEL: @test_vst1_lane_f16( // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> -// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> -// CHECK: [[TMP3:%.*]] = extractelement <4 x half> [[TMP2]], i32 3 -// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to half* -// CHECK: store half [[TMP3]], half* [[TMP4]], align 2 +// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// CHECK: [[TMP3:%.*]] = extractelement <4 x i16> [[TMP2]], i32 3 +// CHECK: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i16* +// CHECK: store i16 [[TMP3]], i16* [[TMP4]], align 2 // CHECK: ret void void test_vst1_lane_f16(float16_t * a, float16x4_t b) { vst1_lane_f16(a, b, 3); @@ -16349,9 +16355,9 @@ void test_vst2q_s32(int32_t * a, int32x4 // CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i32 0, i32 1 // CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16 // CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8> -// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half> -// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half> -// CHECK: call void @llvm.arm.neon.vst2.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP8]], <8 x half> [[TMP9]], i32 2) +// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16> +// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16> +// CHECK: call void @llvm.arm.neon.vst2.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP8]], <8 x i16> [[TMP9]], i32 2) // CHECK: ret void void test_vst2q_f16(float16_t * a, float16x8x2_t b) { vst2q_f16(a, b); @@ -16646,9 +16652,9 @@ void test_vst2_s64(int64_t * a, int64x1x // CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i32 0, i32 1 // CHECK: [[TMP6:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8 // CHECK: [[TMP7:%.*]] = bitcast <4 x half> [[TMP6]] to <8 x i8> -// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half> -// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half> -// CHECK: call void @llvm.arm.neon.vst2.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP8]], <4 x half> [[TMP9]], i32 2) +// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> +// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> +// CHECK: call void @llvm.arm.neon.vst2.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP8]], <4 x i16> [[TMP9]], i32 2) // CHECK: ret void void test_vst2_f16(float16_t * a, float16x4x2_t b) { vst2_f16(a, b); @@ -16849,9 +16855,9 @@ void test_vst2q_lane_s32(int32_t * a, in // CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], [2 x <8 x half>]* [[VAL1]], i32 0, i32 1 // CHECK: [[TMP6:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX2]], align 16 // CHECK: [[TMP7:%.*]] = bitcast <8 x half> [[TMP6]] to <16 x i8> -// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half> -// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half> -// CHECK: call void @llvm.arm.neon.vst2lane.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP8]], <8 x half> [[TMP9]], i32 7, i32 2) +// CHECK: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16> +// CHECK: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16> +// CHECK: call void @llvm.arm.neon.vst2lane.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP8]], <8 x i16> [[TMP9]], i32 7, i32 2) // CHECK: ret void void test_vst2q_lane_f16(float16_t * a, float16x8x2_t b) { vst2q_lane_f16(a, b, 7); @@ -17073,9 +17079,9 @@ void test_vst2_lane_s32(int32_t * a, int // CHECK: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], [2 x <4 x half>]* [[VAL1]], i32 0, i32 1 // CHECK: [[TMP6:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX2]], align 8 // CHECK: [[TMP7:%.*]] = bitcast <4 x half> [[TMP6]] to <8 x i8> -// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half> -// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half> -// CHECK: call void @llvm.arm.neon.vst2lane.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP8]], <4 x half> [[TMP9]], i32 3, i32 2) +// CHECK: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> +// CHECK: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> +// CHECK: call void @llvm.arm.neon.vst2lane.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP8]], <4 x i16> [[TMP9]], i32 3, i32 2) // CHECK: ret void void test_vst2_lane_f16(float16_t * a, float16x4x2_t b) { vst2_lane_f16(a, b, 3); @@ -17348,10 +17354,10 @@ void test_vst3q_s32(int32_t * a, int32x4 // CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i32 0, i32 2 // CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16 // CHECK: [[TMP9:%.*]] = bitcast <8 x half> [[TMP8]] to <16 x i8> -// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half> -// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half> -// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half> -// CHECK: call void @llvm.arm.neon.vst3.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP10]], <8 x half> [[TMP11]], <8 x half> [[TMP12]], i32 2) +// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16> +// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16> +// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16> +// CHECK: call void @llvm.arm.neon.vst3.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], <8 x i16> [[TMP12]], i32 2) // CHECK: ret void void test_vst3q_f16(float16_t * a, float16x8x3_t b) { vst3q_f16(a, b); @@ -17699,10 +17705,10 @@ void test_vst3_s64(int64_t * a, int64x1x // CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i32 0, i32 2 // CHECK: [[TMP8:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8 // CHECK: [[TMP9:%.*]] = bitcast <4 x half> [[TMP8]] to <8 x i8> -// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half> -// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half> -// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half> -// CHECK: call void @llvm.arm.neon.vst3.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP10]], <4 x half> [[TMP11]], <4 x half> [[TMP12]], i32 2) +// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> +// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> +// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16> +// CHECK: call void @llvm.arm.neon.vst3.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], <4 x i16> [[TMP12]], i32 2) // CHECK: ret void void test_vst3_f16(float16_t * a, float16x4x3_t b) { vst3_f16(a, b); @@ -17940,10 +17946,10 @@ void test_vst3q_lane_s32(int32_t * a, in // CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], [3 x <8 x half>]* [[VAL3]], i32 0, i32 2 // CHECK: [[TMP8:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX4]], align 16 // CHECK: [[TMP9:%.*]] = bitcast <8 x half> [[TMP8]] to <16 x i8> -// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half> -// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half> -// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half> -// CHECK: call void @llvm.arm.neon.vst3lane.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP10]], <8 x half> [[TMP11]], <8 x half> [[TMP12]], i32 7, i32 2) +// CHECK: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16> +// CHECK: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16> +// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16> +// CHECK: call void @llvm.arm.neon.vst3lane.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], <8 x i16> [[TMP12]], i32 7, i32 2) // CHECK: ret void void test_vst3q_lane_f16(float16_t * a, float16x8x3_t b) { vst3q_lane_f16(a, b, 7); @@ -18205,10 +18211,10 @@ void test_vst3_lane_s32(int32_t * a, int // CHECK: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], [3 x <4 x half>]* [[VAL3]], i32 0, i32 2 // CHECK: [[TMP8:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX4]], align 8 // CHECK: [[TMP9:%.*]] = bitcast <4 x half> [[TMP8]] to <8 x i8> -// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half> -// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half> -// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half> -// CHECK: call void @llvm.arm.neon.vst3lane.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP10]], <4 x half> [[TMP11]], <4 x half> [[TMP12]], i32 3, i32 2) +// CHECK: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> +// CHECK: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> +// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16> +// CHECK: call void @llvm.arm.neon.vst3lane.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], <4 x i16> [[TMP12]], i32 3, i32 2) // CHECK: ret void void test_vst3_lane_f16(float16_t * a, float16x4x3_t b) { vst3_lane_f16(a, b, 3); @@ -18524,11 +18530,11 @@ void test_vst4q_s32(int32_t * a, int32x4 // CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i32 0, i32 3 // CHECK: [[TMP10:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16 // CHECK: [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8> -// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half> -// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half> -// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half> -// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half> -// CHECK: call void @llvm.arm.neon.vst4.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP12]], <8 x half> [[TMP13]], <8 x half> [[TMP14]], <8 x half> [[TMP15]], i32 2) +// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16> +// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16> +// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16> +// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16> +// CHECK: call void @llvm.arm.neon.vst4.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], <8 x i16> [[TMP15]], i32 2) // CHECK: ret void void test_vst4q_f16(float16_t * a, float16x8x4_t b) { vst4q_f16(a, b); @@ -18929,11 +18935,11 @@ void test_vst4_s64(int64_t * a, int64x1x // CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i32 0, i32 3 // CHECK: [[TMP10:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8 // CHECK: [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8> -// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half> -// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half> -// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half> -// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half> -// CHECK: call void @llvm.arm.neon.vst4.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP12]], <4 x half> [[TMP13]], <4 x half> [[TMP14]], <4 x half> [[TMP15]], i32 2) +// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> +// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> +// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16> +// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16> +// CHECK: call void @llvm.arm.neon.vst4.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], <4 x i16> [[TMP15]], i32 2) // CHECK: ret void void test_vst4_f16(float16_t * a, float16x4x4_t b) { vst4_f16(a, b); @@ -19208,11 +19214,11 @@ void test_vst4q_lane_s32(int32_t * a, in // CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], [4 x <8 x half>]* [[VAL5]], i32 0, i32 3 // CHECK: [[TMP10:%.*]] = load <8 x half>, <8 x half>* [[ARRAYIDX6]], align 16 // CHECK: [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8> -// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half> -// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half> -// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x half> -// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half> -// CHECK: call void @llvm.arm.neon.vst4lane.p0i8.v8f16(i8* [[TMP3]], <8 x half> [[TMP12]], <8 x half> [[TMP13]], <8 x half> [[TMP14]], <8 x half> [[TMP15]], i32 7, i32 2) +// CHECK: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x i16> +// CHECK: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x i16> +// CHECK: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP9]] to <8 x i16> +// CHECK: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16> +// CHECK: call void @llvm.arm.neon.vst4lane.p0i8.v8i16(i8* [[TMP3]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], <8 x i16> [[TMP15]], i32 7, i32 2) // CHECK: ret void void test_vst4q_lane_f16(float16_t * a, float16x8x4_t b) { vst4q_lane_f16(a, b, 7); @@ -19514,11 +19520,11 @@ void test_vst4_lane_s32(int32_t * a, int // CHECK: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], [4 x <4 x half>]* [[VAL5]], i32 0, i32 3 // CHECK: [[TMP10:%.*]] = load <4 x half>, <4 x half>* [[ARRAYIDX6]], align 8 // CHECK: [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8> -// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half> -// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half> -// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x half> -// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half> -// CHECK: call void @llvm.arm.neon.vst4lane.p0i8.v4f16(i8* [[TMP3]], <4 x half> [[TMP12]], <4 x half> [[TMP13]], <4 x half> [[TMP14]], <4 x half> [[TMP15]], i32 3, i32 2) +// CHECK: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> +// CHECK: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> +// CHECK: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP9]] to <4 x i16> +// CHECK: [[TMP15:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16> +// CHECK: call void @llvm.arm.neon.vst4lane.p0i8.v4i16(i8* [[TMP3]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], <4 x i16> [[TMP15]], i32 3, i32 2) // CHECK: ret void void test_vst4_lane_f16(float16_t * a, float16x4x4_t b) { vst4_lane_f16(a, b, 3); _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits