Zeson created this revision. Zeson added reviewers: nemanjai, sfertile, jtony, hfinkel, syzaara, lei, kbarton. Zeson added a subscriber: cfe-commits.
It's mainly about signed and unsigned llvm builtins confusion. When function args are bool vector and signed vector, it should be mapped to signed version instruction. reference: ALTIVECPIM.pdf <http://www.nxp.com/assets/documents/data/en/reference-manuals/ALTIVECPIM.pdf> https://reviews.llvm.org/D27251 Files: lib/Headers/altivec.h test/CodeGen/builtins-ppc-altivec.c test/CodeGen/builtins-ppc-p8vector.c test/CodeGen/builtins-ppc-vsx.c
Index: test/CodeGen/builtins-ppc-vsx.c =================================================================== --- test/CodeGen/builtins-ppc-vsx.c +++ test/CodeGen/builtins-ppc-vsx.c @@ -1692,3 +1692,86 @@ // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, {{%[0-9]+}} // CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, {{%[0-9]+}} } + +signed char param_sc; +unsigned char param_uc; +short param_s; +unsigned short param_us; +int param_i; +unsigned int param_ui; +float param_f; +signed long long param_sll; + +/* ----------------------------- vec_xl_be ---------------------------------- */ +void test2() { + // CHECK-LABEL: define void @test2 + // CHECK-LE-LABEL: define void @test2 + vec_xl_be(param_sll, ¶m_sc); + // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16 + // CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}}) + // CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8> + + vec_xl_be(param_sll, ¶m_uc); + // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16 + // CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}}) + // CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8> + + vec_xl_be(param_sll, ¶m_s); + // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16 + // CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}}) + // CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> + + vec_xl_be(param_sll, ¶m_us); + // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16 + // CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}}) + // CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> + + vec_xl_be(param_sll, ¶m_i); + // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16 + // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}}) + + vec_xl_be(param_sll, ¶m_ui); + // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16 + // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}}) + + vec_xl_be(param_sll, ¶m_f); + // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16 + // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}}) +} + +/* ----------------------------- vec_xst_be --------------------------------- */ +void test3() { + // CHECK-LABEL: define void @test3 + // CHECK-LE-LABEL: define void @test3 + vec_xst_be(vsc, param_sll, ¶m_sc); + // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16 + // CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8> + // CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}}) + + vec_xst_be(vuc, param_sll, ¶m_uc); + // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16 + // CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8> + // CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}}) + + vec_xst_be(vss, param_sll, ¶m_s); + // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16 + // CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> + // CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}}) + + vec_xst_be(vus, param_sll, ¶m_us); + // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16 + // CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> + // CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}}) + + vec_xst_be(vsi, param_sll, ¶m_i); + // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16 + // CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}}) + + vec_xst_be(vui, param_sll, ¶m_ui); + // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16 + // CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}}) + + vec_xst_be(vf, param_sll, ¶m_f); + // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16 + // CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}}) +} Index: test/CodeGen/builtins-ppc-p8vector.c =================================================================== --- test/CodeGen/builtins-ppc-p8vector.c +++ test/CodeGen/builtins-ppc-p8vector.c @@ -508,6 +508,10 @@ dummy(); // CHECK: @dummy + res_i = vec_all_ne(vfa, vfa); +// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p +// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p + res_i = vec_all_ne(vda, vda); // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p @@ -625,8 +629,8 @@ // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous res_i = vec_all_ge(vbll, vsll); -// CHECK: @llvm.ppc.altivec.vcmpgtud.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p +// CHECK: @llvm.ppc.altivec.vcmpgtsd.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_ge' is ambiguous res_i = vec_all_ge(vbll, vull); @@ -665,8 +669,8 @@ // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous res_i = vec_all_gt(vbll, vsll); -// CHECK: @llvm.ppc.altivec.vcmpgtud.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p +// CHECK: @llvm.ppc.altivec.vcmpgtsd.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_gt' is ambiguous res_i = vec_all_gt(vbll, vull); @@ -705,8 +709,8 @@ // CHECK-PPC: error: call to 'vec_all_le' is ambiguous res_i = vec_all_le(vbll, vsll); -// CHECK: @llvm.ppc.altivec.vcmpgtud.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p +// CHECK: @llvm.ppc.altivec.vcmpgtsd.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_le' is ambiguous res_i = vec_all_le(vbll, vull); @@ -745,8 +749,8 @@ // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous res_i = vec_all_lt(vbll, vsll); -// CHECK: @llvm.ppc.altivec.vcmpgtud.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p +// CHECK: @llvm.ppc.altivec.vcmpgtsd.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_all_lt' is ambiguous res_i = vec_all_lt(vbll, vull); @@ -789,8 +793,8 @@ // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous res_i = vec_any_ge(vbll, vsll); -// CHECK: @llvm.ppc.altivec.vcmpgtud.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p +// CHECK: @llvm.ppc.altivec.vcmpgtsd.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_ge' is ambiguous res_i = vec_any_ge(vbll, vull); @@ -829,8 +833,8 @@ // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous res_i = vec_any_gt(vbll, vsll); -// CHECK: @llvm.ppc.altivec.vcmpgtud.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p +// CHECK: @llvm.ppc.altivec.vcmpgtsd.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_gt' is ambiguous res_i = vec_any_gt(vbll, vull); @@ -869,8 +873,8 @@ // CHECK-PPC: error: call to 'vec_any_le' is ambiguous res_i = vec_any_le(vbll, vsll); -// CHECK: @llvm.ppc.altivec.vcmpgtud.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p +// CHECK: @llvm.ppc.altivec.vcmpgtsd.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_le' is ambiguous res_i = vec_any_le(vbll, vull); @@ -909,8 +913,8 @@ // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous res_i = vec_any_lt(vbll, vsll); -// CHECK: @llvm.ppc.altivec.vcmpgtud.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p +// CHECK: @llvm.ppc.altivec.vcmpgtsd.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd.p // CHECK-PPC: error: call to 'vec_any_lt' is ambiguous res_i = vec_any_lt(vbll, vull); Index: test/CodeGen/builtins-ppc-altivec.c =================================================================== --- test/CodeGen/builtins-ppc-altivec.c +++ test/CodeGen/builtins-ppc-altivec.c @@ -4268,28 +4268,28 @@ // CHECK-LE: lshr <4 x i32> res_vsc = vec_vsrb(vsc, vuc); -// CHECK: shr <16 x i8> -// CHECK-LE: shr <16 x i8> +// CHECK: lshr <16 x i8> +// CHECK-LE: lshr <16 x i8> res_vuc = vec_vsrb(vuc, vuc); -// CHECK: shr <16 x i8> -// CHECK-LE: shr <16 x i8> +// CHECK: lshr <16 x i8> +// CHECK-LE: lshr <16 x i8> res_vs = vec_vsrh(vs, vus); -// CHECK: shr <8 x i16> -// CHECK-LE: shr <8 x i16> +// CHECK: lshr <8 x i16> +// CHECK-LE: lshr <8 x i16> res_vus = vec_vsrh(vus, vus); -// CHECK: shr <8 x i16> -// CHECK-LE: shr <8 x i16> +// CHECK: lshr <8 x i16> +// CHECK-LE: lshr <8 x i16> res_vi = vec_vsrw(vi, vui); -// CHECK: shr <4 x i32> -// CHECK-LE: shr <4 x i32> +// CHECK: lshr <4 x i32> +// CHECK-LE: lshr <4 x i32> res_vui = vec_vsrw(vui, vui); -// CHECK: shr <4 x i32> -// CHECK-LE: shr <4 x i32> +// CHECK: lshr <4 x i32> +// CHECK-LE: lshr <4 x i32> /* vec_sra */ res_vsc = vec_sra(vsc, vuc); @@ -7960,8 +7960,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p res_i = vec_all_ge(vbc, vsc); -// CHECK: @llvm.ppc.altivec.vcmpgtub.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p +// CHECK: @llvm.ppc.altivec.vcmpgtsb.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p res_i = vec_all_ge(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p @@ -7988,8 +7988,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_all_ge(vbs, vs); -// CHECK: @llvm.ppc.altivec.vcmpgtuh.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p +// CHECK: @llvm.ppc.altivec.vcmpgtsh.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p res_i = vec_all_ge(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p @@ -8016,8 +8016,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p res_i = vec_all_ge(vbi, vi); -// CHECK: @llvm.ppc.altivec.vcmpgtuw.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p +// CHECK: @llvm.ppc.altivec.vcmpgtsw.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_all_ge(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p @@ -8049,8 +8049,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p res_i = vec_all_gt(vbc, vsc); -// CHECK: @llvm.ppc.altivec.vcmpgtub.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p +// CHECK: @llvm.ppc.altivec.vcmpgtsb.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p res_i = vec_all_gt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p @@ -8077,8 +8077,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_all_gt(vbs, vs); -// CHECK: @llvm.ppc.altivec.vcmpgtuh.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p +// CHECK: @llvm.ppc.altivec.vcmpgtsh.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p res_i = vec_all_gt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p @@ -8105,8 +8105,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p res_i = vec_all_gt(vbi, vi); -// CHECK: @llvm.ppc.altivec.vcmpgtuw.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p +// CHECK: @llvm.ppc.altivec.vcmpgtsw.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_all_gt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p @@ -8143,8 +8143,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p res_i = vec_all_le(vbc, vsc); -// CHECK: @llvm.ppc.altivec.vcmpgtub.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p +// CHECK: @llvm.ppc.altivec.vcmpgtsb.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p res_i = vec_all_le(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p @@ -8171,8 +8171,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_all_le(vbs, vs); -// CHECK: @llvm.ppc.altivec.vcmpgtuh.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p +// CHECK: @llvm.ppc.altivec.vcmpgtsh.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p res_i = vec_all_le(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p @@ -8199,8 +8199,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p res_i = vec_all_le(vbi, vi); -// CHECK: @llvm.ppc.altivec.vcmpgtuw.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p +// CHECK: @llvm.ppc.altivec.vcmpgtsw.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_all_le(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p @@ -8232,8 +8232,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p res_i = vec_all_lt(vbc, vsc); -// CHECK: @llvm.ppc.altivec.vcmpgtub.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p +// CHECK: @llvm.ppc.altivec.vcmpgtsb.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p res_i = vec_all_lt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p @@ -8260,8 +8260,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_all_lt(vbs, vs); -// CHECK: @llvm.ppc.altivec.vcmpgtuh.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p +// CHECK: @llvm.ppc.altivec.vcmpgtsh.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p res_i = vec_all_lt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p @@ -8288,8 +8288,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p res_i = vec_all_lt(vbi, vi); -// CHECK: @llvm.ppc.altivec.vcmpgtuw.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p +// CHECK: @llvm.ppc.altivec.vcmpgtsw.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_all_lt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p @@ -8537,8 +8537,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p res_i = vec_any_ge(vbc, vsc); -// CHECK: @llvm.ppc.altivec.vcmpgtub.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p +// CHECK: @llvm.ppc.altivec.vcmpgtsb.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p res_i = vec_any_ge(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p @@ -8565,8 +8565,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_any_ge(vbs, vs); -// CHECK: @llvm.ppc.altivec.vcmpgtuh.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p +// CHECK: @llvm.ppc.altivec.vcmpgtsh.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p res_i = vec_any_ge(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p @@ -8593,8 +8593,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p res_i = vec_any_ge(vbi, vi); -// CHECK: @llvm.ppc.altivec.vcmpgtuw.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p +// CHECK: @llvm.ppc.altivec.vcmpgtsw.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_any_ge(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p @@ -8626,8 +8626,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p res_i = vec_any_gt(vbc, vsc); -// CHECK: @llvm.ppc.altivec.vcmpgtub.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p +// CHECK: @llvm.ppc.altivec.vcmpgtsb.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p res_i = vec_any_gt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p @@ -8654,8 +8654,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_any_gt(vbs, vs); -// CHECK: @llvm.ppc.altivec.vcmpgtuh.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p +// CHECK: @llvm.ppc.altivec.vcmpgtsh.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p res_i = vec_any_gt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p @@ -8682,8 +8682,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p res_i = vec_any_gt(vbi, vi); -// CHECK: @llvm.ppc.altivec.vcmpgtuw.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p +// CHECK: @llvm.ppc.altivec.vcmpgtsw.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_any_gt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p @@ -8715,8 +8715,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p res_i = vec_any_le(vbc, vsc); -// CHECK: @llvm.ppc.altivec.vcmpgtub.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p +// CHECK: @llvm.ppc.altivec.vcmpgtsb.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p res_i = vec_any_le(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p @@ -8743,8 +8743,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_any_le(vbs, vs); -// CHECK: @llvm.ppc.altivec.vcmpgtuh.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p +// CHECK: @llvm.ppc.altivec.vcmpgtsh.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p res_i = vec_any_le(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p @@ -8771,8 +8771,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p res_i = vec_any_le(vbi, vi); -// CHECK: @llvm.ppc.altivec.vcmpgtuw.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p +// CHECK: @llvm.ppc.altivec.vcmpgtsw.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_any_le(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p @@ -8804,8 +8804,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p res_i = vec_any_lt(vbc, vsc); -// CHECK: @llvm.ppc.altivec.vcmpgtub.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtub.p +// CHECK: @llvm.ppc.altivec.vcmpgtsb.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb.p res_i = vec_any_lt(vbc, vuc); // CHECK: @llvm.ppc.altivec.vcmpgtub.p @@ -8832,8 +8832,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p res_i = vec_any_lt(vbs, vs); -// CHECK: @llvm.ppc.altivec.vcmpgtuh.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh.p +// CHECK: @llvm.ppc.altivec.vcmpgtsh.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh.p res_i = vec_any_lt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p @@ -8860,8 +8860,8 @@ // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p res_i = vec_any_lt(vbi, vi); -// CHECK: @llvm.ppc.altivec.vcmpgtuw.p -// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw.p +// CHECK: @llvm.ppc.altivec.vcmpgtsw.p +// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw.p res_i = vec_any_lt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p @@ -9386,77 +9386,3 @@ // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16 // CHECK-LE: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16 } - -/* ----------------------------- vec_xl_be ---------------------------------- */ -void test11() { - // CHECK-LABEL: define void @test11 - // CHECK-LE-LABEL: define void @test11 - res_vsc = vec_xl_be(param_sll, ¶m_sc); - // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16 - // CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}}) - // CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8> - - res_vuc = vec_xl_be(param_sll, ¶m_uc); - // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16 - // CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}}) - // CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8> - - res_vs = vec_xl_be(param_sll, ¶m_s); - // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16 - // CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}}) - // CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> - - res_vus = vec_xl_be(param_sll, ¶m_us); - // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16 - // CHECK-LE: call <2 x double> @llvm.ppc.vsx.lxvd2x.be(i8* %{{[0-9]+}}) - // CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> - - res_vi = vec_xl_be(param_sll, ¶m_i); - // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16 - // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}}) - - res_vui = vec_xl_be(param_sll, ¶m_ui); - // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16 - // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}}) - - res_vf = vec_xl_be(param_sll, ¶m_f); - // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16 - // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(i8* %{{[0-9]+}}) -} - -/* ----------------------------- vec_xst_be --------------------------------- */ -void test12() { - // CHECK-LABEL: define void @test12 - // CHECK-LE-LABEL: define void @test12 - vec_xst_be(vsc, param_sll, ¶m_sc); - // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16 - // CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8> - // CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}}) - - vec_xst_be(vuc, param_sll, ¶m_uc); - // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16 - // CHECK-LE: shufflevector <16 x i8> %{{[0-9]+}}, <16 x i8> %{{[0-9]+}}, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8> - // CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}}) - - vec_xst_be(vs, param_sll, ¶m_s); - // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16 - // CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> - // CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}}) - - vec_xst_be(vus, param_sll, ¶m_us); - // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16 - // CHECK-LE: shufflevector <8 x i16> %{{[0-9]+}}, <8 x i16> %{{[0-9]+}}, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> - // CHECK-LE: call void @llvm.ppc.vsx.stxvd2x.be(<2 x double> %{{[0-9]+}}, i8* %{{[0-9]+}}) - - vec_xst_be(vi, param_sll, ¶m_i); - // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16 - // CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}}) - - vec_xst_be(vui, param_sll, ¶m_ui); - // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16 - // CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}}) - - vec_xst_be(vf, param_sll, ¶m_f); - // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16 - // CHECK-LE: call void @llvm.ppc.vsx.stxvw4x.be(<4 x i32> %{{[0-9]+}}, i8* %{{[0-9]+}}) -} Index: lib/Headers/altivec.h =================================================================== --- lib/Headers/altivec.h +++ lib/Headers/altivec.h @@ -9544,7 +9544,7 @@ static __inline__ vector signed char __ATTRS_o_ai vec_vsrb(vector signed char __a, vector unsigned char __b) { - return __a >> (vector signed char)__b; + return (vector signed char)((vector unsigned char)__a >> __b); } static __inline__ vector unsigned char __ATTRS_o_ai @@ -9558,7 +9558,7 @@ static __inline__ vector short __ATTRS_o_ai vec_vsrh(vector short __a, vector unsigned short __b) { - return __a >> (vector short)__b; + return (vector short)((vector unsigned short)__a >> __b); } static __inline__ vector unsigned short __ATTRS_o_ai @@ -9572,7 +9572,7 @@ static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a, vector unsigned int __b) { - return __a >> (vector int)__b; + return (vector int)((vector unsigned int)__a >> __b); } static __inline__ vector unsigned int __ATTRS_o_ai @@ -13925,8 +13925,8 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a, vector signed char __b) { - return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, - (vector unsigned char)__a); + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, + (vector signed char)__a); } static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a, @@ -13963,8 +13963,8 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a, vector short __b) { - return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b, - (vector unsigned short)__a); + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector signed short)__b, + (vector signed short)__a); } static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a, @@ -14000,8 +14000,8 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a, vector int __b) { - return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, - (vector unsigned int)__a); + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector signed int)__b, + (vector signed int)__a); } static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a, @@ -14039,8 +14039,8 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a, vector signed long long __b) { - return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b, - (vector unsigned long long)__a); + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b, + (vector signed long long)__a); } static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a, @@ -14096,8 +14096,8 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a, vector signed char __b) { - return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, - (vector unsigned char)__b); + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__a, + (vector signed char)__b); } static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a, @@ -14134,8 +14134,8 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a, vector short __b) { - return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a, - (vector unsigned short)__b); + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector signed short)__a, + (vector signed short)__b); } static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a, @@ -14171,8 +14171,8 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a, vector int __b) { - return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, - (vector unsigned int)__b); + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector signed int)__a, + (vector signed int)__b); } static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a, @@ -14210,8 +14210,8 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a, vector signed long long __b) { - return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a, - (vector unsigned long long)__b); + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__a, + (vector signed long long)__b); } static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a, @@ -14274,8 +14274,8 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a, vector signed char __b) { - return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, - (vector unsigned char)__b); + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__a, + (vector signed char)__b); } static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a, @@ -14312,8 +14312,8 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a, vector short __b) { - return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a, - (vector unsigned short)__b); + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector signed short)__a, + (vector signed short)__b); } static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a, @@ -14349,8 +14349,8 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a, vector int __b) { - return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, - (vector unsigned int)__b); + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector signed int)__a, + (vector signed int)__b); } static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a, @@ -14389,8 +14389,8 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a, vector signed long long __b) { - return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a, - (vector unsigned long long)__b); + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); } static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a, @@ -14446,8 +14446,8 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a, vector signed char __b) { - return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, - (vector unsigned char)__a); + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, + (vector signed char)__a); } static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a, @@ -14484,8 +14484,8 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a, vector short __b) { - return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b, - (vector unsigned short)__a); + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector signed short)__b, + (vector signed short)__a); } static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a, @@ -14521,8 +14521,8 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a, vector int __b) { - return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, - (vector unsigned int)__a); + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector signed int)__b, + (vector signed int)__a); } static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a, @@ -14561,8 +14561,8 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a, vector signed long long __b) { - return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b, - (vector unsigned long long)__a); + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b, + (vector signed long long)__a); } static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a, @@ -14785,7 +14785,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a, vector float __b) { #ifdef __VSX__ - return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b); + return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __b); #else return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b); #endif @@ -15069,8 +15069,8 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a, vector signed char __b) { - return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b, - (vector unsigned char)__a); + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b, + (vector signed char)__a); } static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a, @@ -15108,8 +15108,8 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a, vector short __b) { - return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b, - (vector unsigned short)__a); + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector signed short)__b, + (vector signed short)__a); } static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a, @@ -15146,8 +15146,8 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a, vector int __b) { - return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b, - (vector unsigned int)__a); + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector signed int)__b, + (vector signed int)__a); } static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a, @@ -15187,9 +15187,9 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a, vector signed long long __b) { - return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, - (vector unsigned long long)__b, - (vector unsigned long long)__a); + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, + (vector signed long long)__b, + (vector signed long long)__a); } static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a, @@ -15248,8 +15248,8 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a, vector signed char __b) { - return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a, - (vector unsigned char)__b); + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__a, + (vector signed char)__b); } static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a, @@ -15287,8 +15287,8 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a, vector short __b) { - return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a, - (vector unsigned short)__b); + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector signed short)__a, + (vector signed short)__b); } static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a, @@ -15325,8 +15325,8 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a, vector int __b) { - return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a, - (vector unsigned int)__b); + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector signed int)__a, + (vector signed int)__b); } static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a, @@ -15366,9 +15366,9 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a, vector signed long long __b) { - return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, - (vector unsigned long long)__a, - (vector unsigned long long)__b); + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, + (vector signed long long)__a, + (vector signed long long)__b); } static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a, @@ -15427,8 +15427,8 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a, vector signed char __b) { - return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a, - (vector unsigned char)__b); + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__a, + (vector signed char)__b); } static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a, @@ -15466,8 +15466,8 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a, vector short __b) { - return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a, - (vector unsigned short)__b); + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector signed short)__a, + (vector signed short)__b); } static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a, @@ -15504,8 +15504,8 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a, vector int __b) { - return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a, - (vector unsigned int)__b); + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector signed int)__a, + (vector signed int)__b); } static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a, @@ -15545,9 +15545,9 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a, vector signed long long __b) { - return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, - (vector unsigned long long)__a, - (vector unsigned long long)__b); + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, + (vector signed long long)__a, + (vector signed long long)__b); } static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a, @@ -15606,8 +15606,8 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a, vector signed char __b) { - return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b, - (vector unsigned char)__a); + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b, + (vector signed char)__a); } static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a, @@ -15645,8 +15645,8 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a, vector short __b) { - return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b, - (vector unsigned short)__a); + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector signed short)__b, + (vector signed short)__a); } static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a, @@ -15683,8 +15683,8 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a, vector int __b) { - return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b, - (vector unsigned int)__a); + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector signed int)__b, + (vector signed int)__a); } static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a, @@ -15724,9 +15724,9 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a, vector signed long long __b) { - return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, - (vector unsigned long long)__b, - (vector unsigned long long)__a); + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, + (vector signed long long)__b, + (vector signed long long)__a); } static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a, @@ -16566,6 +16566,7 @@ /* vec_xst_be */ #ifdef __LITTLE_ENDIAN__ +#ifdef __VSX__ static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed char __vec, signed long long __offset, signed char *__ptr) { @@ -16618,7 +16619,6 @@ __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr); } -#ifdef __VSX__ static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec, signed long long __offset, signed long long *__ptr) {
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits